VOL-1917 move to go mod

Change-Id: Ia8de8bd8a4f4d908cc1fa745f6f4ec949629017b
diff --git a/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh b/vendor/github.com/fullstorydev/grpcurl/mk-test-files.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
deleted file mode 100644
index e256a31..0000000
--- a/vendor/github.com/ghodss/yaml/.gitignore
+++ /dev/null
@@ -1,20 +0,0 @@
-# OSX leaves these everywhere on SMB shares
-._*
-
-# Eclipse files
-.classpath
-.project
-.settings/**
-
-# Emacs save files
-*~
-
-# Vim-related files
-[._]*.s[a-w][a-z]
-[._]s[a-w][a-z]
-*.un~
-Session.vim
-.netrwhist
-
-# Go test binaries
-*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
deleted file mode 100644
index 0e9d6ed..0000000
--- a/vendor/github.com/ghodss/yaml/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-go:
-  - 1.3
-  - 1.4
-script:
-  - go test
-  - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
deleted file mode 100644
index 7805d36..0000000
--- a/vendor/github.com/ghodss/yaml/LICENSE
+++ /dev/null
@@ -1,50 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Sam Ghods
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-Copyright (c) 2012 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
-   * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-   * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
-   * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
deleted file mode 100644
index 0200f75..0000000
--- a/vendor/github.com/ghodss/yaml/README.md
+++ /dev/null
@@ -1,121 +0,0 @@
-# YAML marshaling and unmarshaling support for Go
-
-[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
-
-## Introduction
-
-A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
-
-In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
-
-## Compatibility
-
-This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
-
-## Caveats
-
-**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
-
-```
-BAD:
-	exampleKey: !!binary gIGC
-
-GOOD:
-	exampleKey: gIGC
-... and decode the base64 data in your code.
-```
-
-**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
-
-## Installation and usage
-
-To install, run:
-
-```
-$ go get github.com/ghodss/yaml
-```
-
-And import using:
-
-```
-import "github.com/ghodss/yaml"
-```
-
-Usage is very similar to the JSON library:
-
-```go
-package main
-
-import (
-	"fmt"
-
-	"github.com/ghodss/yaml"
-)
-
-type Person struct {
-	Name string `json:"name"` // Affects YAML field names too.
-	Age  int    `json:"age"`
-}
-
-func main() {
-	// Marshal a Person struct to YAML.
-	p := Person{"John", 30}
-	y, err := yaml.Marshal(p)
-	if err != nil {
-		fmt.Printf("err: %v\n", err)
-		return
-	}
-	fmt.Println(string(y))
-	/* Output:
-	age: 30
-	name: John
-	*/
-
-	// Unmarshal the YAML back into a Person struct.
-	var p2 Person
-	err = yaml.Unmarshal(y, &p2)
-	if err != nil {
-		fmt.Printf("err: %v\n", err)
-		return
-	}
-	fmt.Println(p2)
-	/* Output:
-	{John 30}
-	*/
-}
-```
-
-`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
-
-```go
-package main
-
-import (
-	"fmt"
-
-	"github.com/ghodss/yaml"
-)
-
-func main() {
-	j := []byte(`{"name": "John", "age": 30}`)
-	y, err := yaml.JSONToYAML(j)
-	if err != nil {
-		fmt.Printf("err: %v\n", err)
-		return
-	}
-	fmt.Println(string(y))
-	/* Output:
-	name: John
-	age: 30
-	*/
-	j2, err := yaml.YAMLToJSON(y)
-	if err != nil {
-		fmt.Printf("err: %v\n", err)
-		return
-	}
-	fmt.Println(string(j2))
-	/* Output:
-	{"age":30,"name":"John"}
-	*/
-}
-```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
deleted file mode 100644
index 5860074..0000000
--- a/vendor/github.com/ghodss/yaml/fields.go
+++ /dev/null
@@ -1,501 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-package yaml
-
-import (
-	"bytes"
-	"encoding"
-	"encoding/json"
-	"reflect"
-	"sort"
-	"strings"
-	"sync"
-	"unicode"
-	"unicode/utf8"
-)
-
-// indirect walks down v allocating pointers as needed,
-// until it gets to a non-pointer.
-// if it encounters an Unmarshaler, indirect stops and returns that.
-// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
-func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
-	// If v is a named type and is addressable,
-	// start with its address, so that if the type has pointer methods,
-	// we find them.
-	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
-		v = v.Addr()
-	}
-	for {
-		// Load value from interface, but only if the result will be
-		// usefully addressable.
-		if v.Kind() == reflect.Interface && !v.IsNil() {
-			e := v.Elem()
-			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
-				v = e
-				continue
-			}
-		}
-
-		if v.Kind() != reflect.Ptr {
-			break
-		}
-
-		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
-			break
-		}
-		if v.IsNil() {
-			if v.CanSet() {
-				v.Set(reflect.New(v.Type().Elem()))
-			} else {
-				v = reflect.New(v.Type().Elem())
-			}
-		}
-		if v.Type().NumMethod() > 0 {
-			if u, ok := v.Interface().(json.Unmarshaler); ok {
-				return u, nil, reflect.Value{}
-			}
-			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
-				return nil, u, reflect.Value{}
-			}
-		}
-		v = v.Elem()
-	}
-	return nil, nil, v
-}
-
-// A field represents a single field found in a struct.
-type field struct {
-	name      string
-	nameBytes []byte                 // []byte(name)
-	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
-
-	tag       bool
-	index     []int
-	typ       reflect.Type
-	omitEmpty bool
-	quoted    bool
-}
-
-func fillField(f field) field {
-	f.nameBytes = []byte(f.name)
-	f.equalFold = foldFunc(f.nameBytes)
-	return f
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from json tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
-	if x[i].name != x[j].name {
-		return x[i].name < x[j].name
-	}
-	if len(x[i].index) != len(x[j].index) {
-		return len(x[i].index) < len(x[j].index)
-	}
-	if x[i].tag != x[j].tag {
-		return x[i].tag
-	}
-	return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
-	for k, xik := range x[i].index {
-		if k >= len(x[j].index) {
-			return false
-		}
-		if xik != x[j].index[k] {
-			return xik < x[j].index[k]
-		}
-	}
-	return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that JSON should recognize for the given type.
-// The algorithm is breadth-first search over the set of structs to include - the top struct
-// and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
-	// Anonymous fields to explore at the current level and the next.
-	current := []field{}
-	next := []field{{typ: t}}
-
-	// Count of queued names for current level and the next.
-	count := map[reflect.Type]int{}
-	nextCount := map[reflect.Type]int{}
-
-	// Types already visited at an earlier level.
-	visited := map[reflect.Type]bool{}
-
-	// Fields found.
-	var fields []field
-
-	for len(next) > 0 {
-		current, next = next, current[:0]
-		count, nextCount = nextCount, map[reflect.Type]int{}
-
-		for _, f := range current {
-			if visited[f.typ] {
-				continue
-			}
-			visited[f.typ] = true
-
-			// Scan f.typ for fields to include.
-			for i := 0; i < f.typ.NumField(); i++ {
-				sf := f.typ.Field(i)
-				if sf.PkgPath != "" { // unexported
-					continue
-				}
-				tag := sf.Tag.Get("json")
-				if tag == "-" {
-					continue
-				}
-				name, opts := parseTag(tag)
-				if !isValidTag(name) {
-					name = ""
-				}
-				index := make([]int, len(f.index)+1)
-				copy(index, f.index)
-				index[len(f.index)] = i
-
-				ft := sf.Type
-				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
-					// Follow pointer.
-					ft = ft.Elem()
-				}
-
-				// Record found field and index sequence.
-				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
-					tagged := name != ""
-					if name == "" {
-						name = sf.Name
-					}
-					fields = append(fields, fillField(field{
-						name:      name,
-						tag:       tagged,
-						index:     index,
-						typ:       ft,
-						omitEmpty: opts.Contains("omitempty"),
-						quoted:    opts.Contains("string"),
-					}))
-					if count[f.typ] > 1 {
-						// If there were multiple instances, add a second,
-						// so that the annihilation code will see a duplicate.
-						// It only cares about the distinction between 1 or 2,
-						// so don't bother generating any more copies.
-						fields = append(fields, fields[len(fields)-1])
-					}
-					continue
-				}
-
-				// Record new anonymous struct to explore in next round.
-				nextCount[ft]++
-				if nextCount[ft] == 1 {
-					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
-				}
-			}
-		}
-	}
-
-	sort.Sort(byName(fields))
-
-	// Delete all fields that are hidden by the Go rules for embedded fields,
-	// except that fields with JSON tags are promoted.
-
-	// The fields are sorted in primary order of name, secondary order
-	// of field index length. Loop over names; for each name, delete
-	// hidden fields by choosing the one dominant field that survives.
-	out := fields[:0]
-	for advance, i := 0, 0; i < len(fields); i += advance {
-		// One iteration per name.
-		// Find the sequence of fields with the name of this first field.
-		fi := fields[i]
-		name := fi.name
-		for advance = 1; i+advance < len(fields); advance++ {
-			fj := fields[i+advance]
-			if fj.name != name {
-				break
-			}
-		}
-		if advance == 1 { // Only one field with this name
-			out = append(out, fi)
-			continue
-		}
-		dominant, ok := dominantField(fields[i : i+advance])
-		if ok {
-			out = append(out, dominant)
-		}
-	}
-
-	fields = out
-	sort.Sort(byIndex(fields))
-
-	return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// JSON tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
-	// The fields are sorted in increasing index-length order. The winner
-	// must therefore be one with the shortest index length. Drop all
-	// longer entries, which is easy: just truncate the slice.
-	length := len(fields[0].index)
-	tagged := -1 // Index of first tagged field.
-	for i, f := range fields {
-		if len(f.index) > length {
-			fields = fields[:i]
-			break
-		}
-		if f.tag {
-			if tagged >= 0 {
-				// Multiple tagged fields at the same level: conflict.
-				// Return no field.
-				return field{}, false
-			}
-			tagged = i
-		}
-	}
-	if tagged >= 0 {
-		return fields[tagged], true
-	}
-	// All remaining fields have the same length. If there's more than one,
-	// we have a conflict (two fields named "X" at the same level) and we
-	// return no field.
-	if len(fields) > 1 {
-		return field{}, false
-	}
-	return fields[0], true
-}
-
-var fieldCache struct {
-	sync.RWMutex
-	m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
-	fieldCache.RLock()
-	f := fieldCache.m[t]
-	fieldCache.RUnlock()
-	if f != nil {
-		return f
-	}
-
-	// Compute fields without lock.
-	// Might duplicate effort but won't hold other computations back.
-	f = typeFields(t)
-	if f == nil {
-		f = []field{}
-	}
-
-	fieldCache.Lock()
-	if fieldCache.m == nil {
-		fieldCache.m = map[reflect.Type][]field{}
-	}
-	fieldCache.m[t] = f
-	fieldCache.Unlock()
-	return f
-}
-
-func isValidTag(s string) bool {
-	if s == "" {
-		return false
-	}
-	for _, c := range s {
-		switch {
-		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
-			// Backslash and quote chars are reserved, but
-			// otherwise any punctuation chars are allowed
-			// in a tag name.
-		default:
-			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
-				return false
-			}
-		}
-	}
-	return true
-}
-
-const (
-	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
-	kelvin       = '\u212a'
-	smallLongEss = '\u017f'
-)
-
-// foldFunc returns one of four different case folding equivalence
-// functions, from most general (and slow) to fastest:
-//
-// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
-// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
-// 3) asciiEqualFold, no special, but includes non-letters (including _)
-// 4) simpleLetterEqualFold, no specials, no non-letters.
-//
-// The letters S and K are special because they map to 3 runes, not just 2:
-//  * S maps to s and to U+017F 'ſ' Latin small letter long s
-//  * k maps to K and to U+212A 'K' Kelvin sign
-// See http://play.golang.org/p/tTxjOc0OGo
-//
-// The returned function is specialized for matching against s and
-// should only be given s. It's not curried for performance reasons.
-func foldFunc(s []byte) func(s, t []byte) bool {
-	nonLetter := false
-	special := false // special letter
-	for _, b := range s {
-		if b >= utf8.RuneSelf {
-			return bytes.EqualFold
-		}
-		upper := b & caseMask
-		if upper < 'A' || upper > 'Z' {
-			nonLetter = true
-		} else if upper == 'K' || upper == 'S' {
-			// See above for why these letters are special.
-			special = true
-		}
-	}
-	if special {
-		return equalFoldRight
-	}
-	if nonLetter {
-		return asciiEqualFold
-	}
-	return simpleLetterEqualFold
-}
-
-// equalFoldRight is a specialization of bytes.EqualFold when s is
-// known to be all ASCII (including punctuation), but contains an 's',
-// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
-// See comments on foldFunc.
-func equalFoldRight(s, t []byte) bool {
-	for _, sb := range s {
-		if len(t) == 0 {
-			return false
-		}
-		tb := t[0]
-		if tb < utf8.RuneSelf {
-			if sb != tb {
-				sbUpper := sb & caseMask
-				if 'A' <= sbUpper && sbUpper <= 'Z' {
-					if sbUpper != tb&caseMask {
-						return false
-					}
-				} else {
-					return false
-				}
-			}
-			t = t[1:]
-			continue
-		}
-		// sb is ASCII and t is not. t must be either kelvin
-		// sign or long s; sb must be s, S, k, or K.
-		tr, size := utf8.DecodeRune(t)
-		switch sb {
-		case 's', 'S':
-			if tr != smallLongEss {
-				return false
-			}
-		case 'k', 'K':
-			if tr != kelvin {
-				return false
-			}
-		default:
-			return false
-		}
-		t = t[size:]
-
-	}
-	if len(t) > 0 {
-		return false
-	}
-	return true
-}
-
-// asciiEqualFold is a specialization of bytes.EqualFold for use when
-// s is all ASCII (but may contain non-letters) and contains no
-// special-folding letters.
-// See comments on foldFunc.
-func asciiEqualFold(s, t []byte) bool {
-	if len(s) != len(t) {
-		return false
-	}
-	for i, sb := range s {
-		tb := t[i]
-		if sb == tb {
-			continue
-		}
-		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
-			if sb&caseMask != tb&caseMask {
-				return false
-			}
-		} else {
-			return false
-		}
-	}
-	return true
-}
-
-// simpleLetterEqualFold is a specialization of bytes.EqualFold for
-// use when s is all ASCII letters (no underscores, etc) and also
-// doesn't contain 'k', 'K', 's', or 'S'.
-// See comments on foldFunc.
-func simpleLetterEqualFold(s, t []byte) bool {
-	if len(s) != len(t) {
-		return false
-	}
-	for i, b := range s {
-		if b&caseMask != t[i]&caseMask {
-			return false
-		}
-	}
-	return true
-}
-
-// tagOptions is the string following a comma in a struct field's "json"
-// tag, or the empty string. It does not include the leading comma.
-type tagOptions string
-
-// parseTag splits a struct field's json tag into its name and
-// comma-separated options.
-func parseTag(tag string) (string, tagOptions) {
-	if idx := strings.Index(tag, ","); idx != -1 {
-		return tag[:idx], tagOptions(tag[idx+1:])
-	}
-	return tag, tagOptions("")
-}
-
-// Contains reports whether a comma-separated list of options
-// contains a particular substr flag. substr must be surrounded by a
-// string boundary or commas.
-func (o tagOptions) Contains(optionName string) bool {
-	if len(o) == 0 {
-		return false
-	}
-	s := string(o)
-	for s != "" {
-		var next string
-		i := strings.Index(s, ",")
-		if i >= 0 {
-			s, next = s[:i], s[i+1:]
-		}
-		if s == optionName {
-			return true
-		}
-		s = next
-	}
-	return false
-}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
deleted file mode 100644
index 4fb4054..0000000
--- a/vendor/github.com/ghodss/yaml/yaml.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package yaml
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"reflect"
-	"strconv"
-
-	"gopkg.in/yaml.v2"
-)
-
-// Marshals the object into JSON then converts JSON to YAML and returns the
-// YAML.
-func Marshal(o interface{}) ([]byte, error) {
-	j, err := json.Marshal(o)
-	if err != nil {
-		return nil, fmt.Errorf("error marshaling into JSON: %v", err)
-	}
-
-	y, err := JSONToYAML(j)
-	if err != nil {
-		return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
-	}
-
-	return y, nil
-}
-
-// Converts YAML to JSON then uses JSON to unmarshal into an object.
-func Unmarshal(y []byte, o interface{}) error {
-	vo := reflect.ValueOf(o)
-	j, err := yamlToJSON(y, &vo)
-	if err != nil {
-		return fmt.Errorf("error converting YAML to JSON: %v", err)
-	}
-
-	err = json.Unmarshal(j, o)
-	if err != nil {
-		return fmt.Errorf("error unmarshaling JSON: %v", err)
-	}
-
-	return nil
-}
-
-// Convert JSON to YAML.
-func JSONToYAML(j []byte) ([]byte, error) {
-	// Convert the JSON to an object.
-	var jsonObj interface{}
-	// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
-	// Go JSON library doesn't try to pick the right number type (int, float,
-	// etc.) when unmarshalling to interface{}, it just picks float64
-	// universally. go-yaml does go through the effort of picking the right
-	// number type, so we can preserve number type throughout this process.
-	err := yaml.Unmarshal(j, &jsonObj)
-	if err != nil {
-		return nil, err
-	}
-
-	// Marshal this object into YAML.
-	return yaml.Marshal(jsonObj)
-}
-
-// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
-// this method should be a no-op.
-//
-// Things YAML can do that are not supported by JSON:
-// * In YAML you can have binary and null keys in your maps. These are invalid
-//   in JSON. (int and float keys are converted to strings.)
-// * Binary data in YAML with the !!binary tag is not supported. If you want to
-//   use binary data with this library, encode the data as base64 as usual but do
-//   not use the !!binary tag in your YAML. This will ensure the original base64
-//   encoded data makes it all the way through to the JSON.
-func YAMLToJSON(y []byte) ([]byte, error) {
-	return yamlToJSON(y, nil)
-}
-
-func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
-	// Convert the YAML to an object.
-	var yamlObj interface{}
-	err := yaml.Unmarshal(y, &yamlObj)
-	if err != nil {
-		return nil, err
-	}
-
-	// YAML objects are not completely compatible with JSON objects (e.g. you
-	// can have non-string keys in YAML). So, convert the YAML-compatible object
-	// to a JSON-compatible object, failing with an error if irrecoverable
-	// incompatibilties happen along the way.
-	jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
-	if err != nil {
-		return nil, err
-	}
-
-	// Convert this object to JSON and return the data.
-	return json.Marshal(jsonObj)
-}
-
-func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
-	var err error
-
-	// Resolve jsonTarget to a concrete value (i.e. not a pointer or an
-	// interface). We pass decodingNull as false because we're not actually
-	// decoding into the value, we're just checking if the ultimate target is a
-	// string.
-	if jsonTarget != nil {
-		ju, tu, pv := indirect(*jsonTarget, false)
-		// We have a JSON or Text Umarshaler at this level, so we can't be trying
-		// to decode into a string.
-		if ju != nil || tu != nil {
-			jsonTarget = nil
-		} else {
-			jsonTarget = &pv
-		}
-	}
-
-	// If yamlObj is a number or a boolean, check if jsonTarget is a string -
-	// if so, coerce.  Else return normal.
-	// If yamlObj is a map or array, find the field that each key is
-	// unmarshaling to, and when you recurse pass the reflect.Value for that
-	// field back into this function.
-	switch typedYAMLObj := yamlObj.(type) {
-	case map[interface{}]interface{}:
-		// JSON does not support arbitrary keys in a map, so we must convert
-		// these keys to strings.
-		//
-		// From my reading of go-yaml v2 (specifically the resolve function),
-		// keys can only have the types string, int, int64, float64, binary
-		// (unsupported), or null (unsupported).
-		strMap := make(map[string]interface{})
-		for k, v := range typedYAMLObj {
-			// Resolve the key to a string first.
-			var keyString string
-			switch typedKey := k.(type) {
-			case string:
-				keyString = typedKey
-			case int:
-				keyString = strconv.Itoa(typedKey)
-			case int64:
-				// go-yaml will only return an int64 as a key if the system
-				// architecture is 32-bit and the key's value is between 32-bit
-				// and 64-bit. Otherwise the key type will simply be int.
-				keyString = strconv.FormatInt(typedKey, 10)
-			case float64:
-				// Stolen from go-yaml to use the same conversion to string as
-				// the go-yaml library uses to convert float to string when
-				// Marshaling.
-				s := strconv.FormatFloat(typedKey, 'g', -1, 32)
-				switch s {
-				case "+Inf":
-					s = ".inf"
-				case "-Inf":
-					s = "-.inf"
-				case "NaN":
-					s = ".nan"
-				}
-				keyString = s
-			case bool:
-				if typedKey {
-					keyString = "true"
-				} else {
-					keyString = "false"
-				}
-			default:
-				return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
-					reflect.TypeOf(k), k, v)
-			}
-
-			// jsonTarget should be a struct or a map. If it's a struct, find
-			// the field it's going to map to and pass its reflect.Value. If
-			// it's a map, find the element type of the map and pass the
-			// reflect.Value created from that type. If it's neither, just pass
-			// nil - JSON conversion will error for us if it's a real issue.
-			if jsonTarget != nil {
-				t := *jsonTarget
-				if t.Kind() == reflect.Struct {
-					keyBytes := []byte(keyString)
-					// Find the field that the JSON library would use.
-					var f *field
-					fields := cachedTypeFields(t.Type())
-					for i := range fields {
-						ff := &fields[i]
-						if bytes.Equal(ff.nameBytes, keyBytes) {
-							f = ff
-							break
-						}
-						// Do case-insensitive comparison.
-						if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
-							f = ff
-						}
-					}
-					if f != nil {
-						// Find the reflect.Value of the most preferential
-						// struct field.
-						jtf := t.Field(f.index[0])
-						strMap[keyString], err = convertToJSONableObject(v, &jtf)
-						if err != nil {
-							return nil, err
-						}
-						continue
-					}
-				} else if t.Kind() == reflect.Map {
-					// Create a zero value of the map's element type to use as
-					// the JSON target.
-					jtv := reflect.Zero(t.Type().Elem())
-					strMap[keyString], err = convertToJSONableObject(v, &jtv)
-					if err != nil {
-						return nil, err
-					}
-					continue
-				}
-			}
-			strMap[keyString], err = convertToJSONableObject(v, nil)
-			if err != nil {
-				return nil, err
-			}
-		}
-		return strMap, nil
-	case []interface{}:
-		// We need to recurse into arrays in case there are any
-		// map[interface{}]interface{}'s inside and to convert any
-		// numbers to strings.
-
-		// If jsonTarget is a slice (which it really should be), find the
-		// thing it's going to map to. If it's not a slice, just pass nil
-		// - JSON conversion will error for us if it's a real issue.
-		var jsonSliceElemValue *reflect.Value
-		if jsonTarget != nil {
-			t := *jsonTarget
-			if t.Kind() == reflect.Slice {
-				// By default slices point to nil, but we need a reflect.Value
-				// pointing to a value of the slice type, so we create one here.
-				ev := reflect.Indirect(reflect.New(t.Type().Elem()))
-				jsonSliceElemValue = &ev
-			}
-		}
-
-		// Make and use a new array.
-		arr := make([]interface{}, len(typedYAMLObj))
-		for i, v := range typedYAMLObj {
-			arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
-			if err != nil {
-				return nil, err
-			}
-		}
-		return arr, nil
-	default:
-		// If the target type is a string and the YAML type is a number,
-		// convert the YAML type to a string.
-		if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
-			// Based on my reading of go-yaml, it may return int, int64,
-			// float64, or uint64.
-			var s string
-			switch typedVal := typedYAMLObj.(type) {
-			case int:
-				s = strconv.FormatInt(int64(typedVal), 10)
-			case int64:
-				s = strconv.FormatInt(typedVal, 10)
-			case float64:
-				s = strconv.FormatFloat(typedVal, 'g', -1, 32)
-			case uint64:
-				s = strconv.FormatUint(typedVal, 10)
-			case bool:
-				if typedVal {
-					s = "true"
-				} else {
-					s = "false"
-				}
-			}
-			if len(s) > 0 {
-				yamlObj = interface{}(s)
-			}
-		}
-		return yamlObj, nil
-	}
-
-	return nil, nil
-}
diff --git a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS b/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
deleted file mode 100644
index b368efb..0000000
--- a/vendor/github.com/gogo/protobuf/GOLANG_CONTRIBUTORS
+++ /dev/null
@@ -1,5 +0,0 @@
-The contributors to the Go protobuf repository:
-
-# This source code was written by the Go contributors.
-# The master list of contributors is in the main Go distribution,
-# visible at http://tip.golang.org/CONTRIBUTORS.
\ No newline at end of file
diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE
deleted file mode 100644
index 37ec93a..0000000
--- a/vendor/github.com/golang/glog/LICENSE
+++ /dev/null
@@ -1,191 +0,0 @@
-Apache License
-Version 2.0, January 2004
-http://www.apache.org/licenses/
-
-TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
-1. Definitions.
-
-"License" shall mean the terms and conditions for use, reproduction, and
-distribution as defined by Sections 1 through 9 of this document.
-
-"Licensor" shall mean the copyright owner or entity authorized by the copyright
-owner that is granting the License.
-
-"Legal Entity" shall mean the union of the acting entity and all other entities
-that control, are controlled by, or are under common control with that entity.
-For the purposes of this definition, "control" means (i) the power, direct or
-indirect, to cause the direction or management of such entity, whether by
-contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
-outstanding shares, or (iii) beneficial ownership of such entity.
-
-"You" (or "Your") shall mean an individual or Legal Entity exercising
-permissions granted by this License.
-
-"Source" form shall mean the preferred form for making modifications, including
-but not limited to software source code, documentation source, and configuration
-files.
-
-"Object" form shall mean any form resulting from mechanical transformation or
-translation of a Source form, including but not limited to compiled object code,
-generated documentation, and conversions to other media types.
-
-"Work" shall mean the work of authorship, whether in Source or Object form, made
-available under the License, as indicated by a copyright notice that is included
-in or attached to the work (an example is provided in the Appendix below).
-
-"Derivative Works" shall mean any work, whether in Source or Object form, that
-is based on (or derived from) the Work and for which the editorial revisions,
-annotations, elaborations, or other modifications represent, as a whole, an
-original work of authorship. For the purposes of this License, Derivative Works
-shall not include works that remain separable from, or merely link (or bind by
-name) to the interfaces of, the Work and Derivative Works thereof.
-
-"Contribution" shall mean any work of authorship, including the original version
-of the Work and any modifications or additions to that Work or Derivative Works
-thereof, that is intentionally submitted to Licensor for inclusion in the Work
-by the copyright owner or by an individual or Legal Entity authorized to submit
-on behalf of the copyright owner. For the purposes of this definition,
-"submitted" means any form of electronic, verbal, or written communication sent
-to the Licensor or its representatives, including but not limited to
-communication on electronic mailing lists, source code control systems, and
-issue tracking systems that are managed by, or on behalf of, the Licensor for
-the purpose of discussing and improving the Work, but excluding communication
-that is conspicuously marked or otherwise designated in writing by the copyright
-owner as "Not a Contribution."
-
-"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
-of whom a Contribution has been received by Licensor and subsequently
-incorporated within the Work.
-
-2. Grant of Copyright License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable copyright license to reproduce, prepare Derivative Works of,
-publicly display, publicly perform, sublicense, and distribute the Work and such
-Derivative Works in Source or Object form.
-
-3. Grant of Patent License.
-
-Subject to the terms and conditions of this License, each Contributor hereby
-grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
-irrevocable (except as stated in this section) patent license to make, have
-made, use, offer to sell, sell, import, and otherwise transfer the Work, where
-such license applies only to those patent claims licensable by such Contributor
-that are necessarily infringed by their Contribution(s) alone or by combination
-of their Contribution(s) with the Work to which such Contribution(s) was
-submitted. If You institute patent litigation against any entity (including a
-cross-claim or counterclaim in a lawsuit) alleging that the Work or a
-Contribution incorporated within the Work constitutes direct or contributory
-patent infringement, then any patent licenses granted to You under this License
-for that Work shall terminate as of the date such litigation is filed.
-
-4. Redistribution.
-
-You may reproduce and distribute copies of the Work or Derivative Works thereof
-in any medium, with or without modifications, and in Source or Object form,
-provided that You meet the following conditions:
-
-You must give any other recipients of the Work or Derivative Works a copy of
-this License; and
-You must cause any modified files to carry prominent notices stating that You
-changed the files; and
-You must retain, in the Source form of any Derivative Works that You distribute,
-all copyright, patent, trademark, and attribution notices from the Source form
-of the Work, excluding those notices that do not pertain to any part of the
-Derivative Works; and
-If the Work includes a "NOTICE" text file as part of its distribution, then any
-Derivative Works that You distribute must include a readable copy of the
-attribution notices contained within such NOTICE file, excluding those notices
-that do not pertain to any part of the Derivative Works, in at least one of the
-following places: within a NOTICE text file distributed as part of the
-Derivative Works; within the Source form or documentation, if provided along
-with the Derivative Works; or, within a display generated by the Derivative
-Works, if and wherever such third-party notices normally appear. The contents of
-the NOTICE file are for informational purposes only and do not modify the
-License. You may add Your own attribution notices within Derivative Works that
-You distribute, alongside or as an addendum to the NOTICE text from the Work,
-provided that such additional attribution notices cannot be construed as
-modifying the License.
-You may add Your own copyright statement to Your modifications and may provide
-additional or different license terms and conditions for use, reproduction, or
-distribution of Your modifications, or for any such Derivative Works as a whole,
-provided Your use, reproduction, and distribution of the Work otherwise complies
-with the conditions stated in this License.
-
-5. Submission of Contributions.
-
-Unless You explicitly state otherwise, any Contribution intentionally submitted
-for inclusion in the Work by You to the Licensor shall be under the terms and
-conditions of this License, without any additional terms or conditions.
-Notwithstanding the above, nothing herein shall supersede or modify the terms of
-any separate license agreement you may have executed with Licensor regarding
-such Contributions.
-
-6. Trademarks.
-
-This License does not grant permission to use the trade names, trademarks,
-service marks, or product names of the Licensor, except as required for
-reasonable and customary use in describing the origin of the Work and
-reproducing the content of the NOTICE file.
-
-7. Disclaimer of Warranty.
-
-Unless required by applicable law or agreed to in writing, Licensor provides the
-Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
-including, without limitation, any warranties or conditions of TITLE,
-NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
-solely responsible for determining the appropriateness of using or
-redistributing the Work and assume any risks associated with Your exercise of
-permissions under this License.
-
-8. Limitation of Liability.
-
-In no event and under no legal theory, whether in tort (including negligence),
-contract, or otherwise, unless required by applicable law (such as deliberate
-and grossly negligent acts) or agreed to in writing, shall any Contributor be
-liable to You for damages, including any direct, indirect, special, incidental,
-or consequential damages of any character arising as a result of this License or
-out of the use or inability to use the Work (including but not limited to
-damages for loss of goodwill, work stoppage, computer failure or malfunction, or
-any and all other commercial damages or losses), even if such Contributor has
-been advised of the possibility of such damages.
-
-9. Accepting Warranty or Additional Liability.
-
-While redistributing the Work or Derivative Works thereof, You may choose to
-offer, and charge a fee for, acceptance of support, warranty, indemnity, or
-other liability obligations and/or rights consistent with this License. However,
-in accepting such obligations, You may act only on Your own behalf and on Your
-sole responsibility, not on behalf of any other Contributor, and only if You
-agree to indemnify, defend, and hold each Contributor harmless for any liability
-incurred by, or claims asserted against, such Contributor by reason of your
-accepting any such warranty or additional liability.
-
-END OF TERMS AND CONDITIONS
-
-APPENDIX: How to apply the Apache License to your work
-
-To apply the Apache License to your work, attach the following boilerplate
-notice, with the fields enclosed by brackets "[]" replaced with your own
-identifying information. (Don't include the brackets!) The text should be
-enclosed in the appropriate comment syntax for the file format. We also
-recommend that a file or class name and description of purpose be included on
-the same "printed page" as the copyright notice for easier identification within
-third-party archives.
-
-   Copyright [yyyy] [name of copyright owner]
-
-   Licensed under the Apache License, Version 2.0 (the "License");
-   you may not use this file except in compliance with the License.
-   You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README
deleted file mode 100644
index 387b4eb..0000000
--- a/vendor/github.com/golang/glog/README
+++ /dev/null
@@ -1,44 +0,0 @@
-glog
-====
-
-Leveled execution logs for Go.
-
-This is an efficient pure Go implementation of leveled logs in the
-manner of the open source C++ package
-	https://github.com/google/glog
-
-By binding methods to booleans it is possible to use the log package
-without paying the expense of evaluating the arguments to the log.
-Through the -vmodule flag, the package also provides fine-grained
-control over logging at the file level.
-
-The comment from glog.go introduces the ideas:
-
-	Package glog implements logging analogous to the Google-internal
-	C++ INFO/ERROR/V setup.  It provides functions Info, Warning,
-	Error, Fatal, plus formatting variants such as Infof. It
-	also provides V-style logging controlled by the -v and
-	-vmodule=file=2 flags.
-	
-	Basic examples:
-	
-		glog.Info("Prepare to repel boarders")
-	
-		glog.Fatalf("Initialization failed: %s", err)
-	
-	See the documentation for the V function for an explanation
-	of these examples:
-	
-		if glog.V(2) {
-			glog.Info("Starting transaction...")
-		}
-	
-		glog.V(2).Infoln("Processed", nItems, "elements")
-
-
-The repository contains an open source version of the log package
-used inside Google. The master copy of the source lives inside
-Google, not here. The code in this repo is for export only and is not itself
-under development. Feature requests will be ignored.
-
-Send bug reports to golang-nuts@googlegroups.com.
diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go
deleted file mode 100644
index 54bd7af..0000000
--- a/vendor/github.com/golang/glog/glog.go
+++ /dev/null
@@ -1,1180 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
-// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
-// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
-//
-// Basic examples:
-//
-//	glog.Info("Prepare to repel boarders")
-//
-//	glog.Fatalf("Initialization failed: %s", err)
-//
-// See the documentation for the V function for an explanation of these examples:
-//
-//	if glog.V(2) {
-//		glog.Info("Starting transaction...")
-//	}
-//
-//	glog.V(2).Infoln("Processed", nItems, "elements")
-//
-// Log output is buffered and written periodically using Flush. Programs
-// should call Flush before exiting to guarantee all log output is written.
-//
-// By default, all log statements write to files in a temporary directory.
-// This package provides several flags that modify this behavior.
-// As a result, flag.Parse must be called before any logging is done.
-//
-//	-logtostderr=false
-//		Logs are written to standard error instead of to files.
-//	-alsologtostderr=false
-//		Logs are written to standard error as well as to files.
-//	-stderrthreshold=ERROR
-//		Log events at or above this severity are logged to standard
-//		error as well as to files.
-//	-log_dir=""
-//		Log files will be written to this directory instead of the
-//		default temporary directory.
-//
-//	Other flags provide aids to debugging.
-//
-//	-log_backtrace_at=""
-//		When set to a file and line number holding a logging statement,
-//		such as
-//			-log_backtrace_at=gopherflakes.go:234
-//		a stack trace will be written to the Info log whenever execution
-//		hits that statement. (Unlike with -vmodule, the ".go" must be
-//		present.)
-//	-v=0
-//		Enable V-leveled logging at the specified level.
-//	-vmodule=""
-//		The syntax of the argument is a comma-separated list of pattern=N,
-//		where pattern is a literal file name (minus the ".go" suffix) or
-//		"glob" pattern and N is a V level. For instance,
-//			-vmodule=gopher*=3
-//		sets the V level to 3 in all Go files whose names begin "gopher".
-//
-package glog
-
-import (
-	"bufio"
-	"bytes"
-	"errors"
-	"flag"
-	"fmt"
-	"io"
-	stdLog "log"
-	"os"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-	"sync/atomic"
-	"time"
-)
-
-// severity identifies the sort of log: info, warning etc. It also implements
-// the flag.Value interface. The -stderrthreshold flag is of type severity and
-// should be modified only through the flag.Value interface. The values match
-// the corresponding constants in C++.
-type severity int32 // sync/atomic int32
-
-// These constants identify the log levels in order of increasing severity.
-// A message written to a high-severity log file is also written to each
-// lower-severity log file.
-const (
-	infoLog severity = iota
-	warningLog
-	errorLog
-	fatalLog
-	numSeverity = 4
-)
-
-const severityChar = "IWEF"
-
-var severityName = []string{
-	infoLog:    "INFO",
-	warningLog: "WARNING",
-	errorLog:   "ERROR",
-	fatalLog:   "FATAL",
-}
-
-// get returns the value of the severity.
-func (s *severity) get() severity {
-	return severity(atomic.LoadInt32((*int32)(s)))
-}
-
-// set sets the value of the severity.
-func (s *severity) set(val severity) {
-	atomic.StoreInt32((*int32)(s), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (s *severity) String() string {
-	return strconv.FormatInt(int64(*s), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (s *severity) Get() interface{} {
-	return *s
-}
-
-// Set is part of the flag.Value interface.
-func (s *severity) Set(value string) error {
-	var threshold severity
-	// Is it a known name?
-	if v, ok := severityByName(value); ok {
-		threshold = v
-	} else {
-		v, err := strconv.Atoi(value)
-		if err != nil {
-			return err
-		}
-		threshold = severity(v)
-	}
-	logging.stderrThreshold.set(threshold)
-	return nil
-}
-
-func severityByName(s string) (severity, bool) {
-	s = strings.ToUpper(s)
-	for i, name := range severityName {
-		if name == s {
-			return severity(i), true
-		}
-	}
-	return 0, false
-}
-
-// OutputStats tracks the number of output lines and bytes written.
-type OutputStats struct {
-	lines int64
-	bytes int64
-}
-
-// Lines returns the number of lines written.
-func (s *OutputStats) Lines() int64 {
-	return atomic.LoadInt64(&s.lines)
-}
-
-// Bytes returns the number of bytes written.
-func (s *OutputStats) Bytes() int64 {
-	return atomic.LoadInt64(&s.bytes)
-}
-
-// Stats tracks the number of lines of output and number of bytes
-// per severity level. Values must be read with atomic.LoadInt64.
-var Stats struct {
-	Info, Warning, Error OutputStats
-}
-
-var severityStats = [numSeverity]*OutputStats{
-	infoLog:    &Stats.Info,
-	warningLog: &Stats.Warning,
-	errorLog:   &Stats.Error,
-}
-
-// Level is exported because it appears in the arguments to V and is
-// the type of the v flag, which can be set programmatically.
-// It's a distinct type because we want to discriminate it from logType.
-// Variables of type level are only changed under logging.mu.
-// The -v flag is read only with atomic ops, so the state of the logging
-// module is consistent.
-
-// Level is treated as a sync/atomic int32.
-
-// Level specifies a level of verbosity for V logs. *Level implements
-// flag.Value; the -v flag is of type Level and should be modified
-// only through the flag.Value interface.
-type Level int32
-
-// get returns the value of the Level.
-func (l *Level) get() Level {
-	return Level(atomic.LoadInt32((*int32)(l)))
-}
-
-// set sets the value of the Level.
-func (l *Level) set(val Level) {
-	atomic.StoreInt32((*int32)(l), int32(val))
-}
-
-// String is part of the flag.Value interface.
-func (l *Level) String() string {
-	return strconv.FormatInt(int64(*l), 10)
-}
-
-// Get is part of the flag.Value interface.
-func (l *Level) Get() interface{} {
-	return *l
-}
-
-// Set is part of the flag.Value interface.
-func (l *Level) Set(value string) error {
-	v, err := strconv.Atoi(value)
-	if err != nil {
-		return err
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	logging.setVState(Level(v), logging.vmodule.filter, false)
-	return nil
-}
-
-// moduleSpec represents the setting of the -vmodule flag.
-type moduleSpec struct {
-	filter []modulePat
-}
-
-// modulePat contains a filter for the -vmodule flag.
-// It holds a verbosity level and a file pattern to match.
-type modulePat struct {
-	pattern string
-	literal bool // The pattern is a literal string
-	level   Level
-}
-
-// match reports whether the file matches the pattern. It uses a string
-// comparison if the pattern contains no metacharacters.
-func (m *modulePat) match(file string) bool {
-	if m.literal {
-		return file == m.pattern
-	}
-	match, _ := filepath.Match(m.pattern, file)
-	return match
-}
-
-func (m *moduleSpec) String() string {
-	// Lock because the type is not atomic. TODO: clean this up.
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	var b bytes.Buffer
-	for i, f := range m.filter {
-		if i > 0 {
-			b.WriteRune(',')
-		}
-		fmt.Fprintf(&b, "%s=%d", f.pattern, f.level)
-	}
-	return b.String()
-}
-
-// Get is part of the (Go 1.2)  flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported.
-func (m *moduleSpec) Get() interface{} {
-	return nil
-}
-
-var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N")
-
-// Syntax: -vmodule=recordio=2,file=1,gfs*=3
-func (m *moduleSpec) Set(value string) error {
-	var filter []modulePat
-	for _, pat := range strings.Split(value, ",") {
-		if len(pat) == 0 {
-			// Empty strings such as from a trailing comma can be ignored.
-			continue
-		}
-		patLev := strings.Split(pat, "=")
-		if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 {
-			return errVmoduleSyntax
-		}
-		pattern := patLev[0]
-		v, err := strconv.Atoi(patLev[1])
-		if err != nil {
-			return errors.New("syntax error: expect comma-separated list of filename=N")
-		}
-		if v < 0 {
-			return errors.New("negative value for vmodule level")
-		}
-		if v == 0 {
-			continue // Ignore. It's harmless but no point in paying the overhead.
-		}
-		// TODO: check syntax of filter?
-		filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)})
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	logging.setVState(logging.verbosity, filter, true)
-	return nil
-}
-
-// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters
-// that require filepath.Match to be called to match the pattern.
-func isLiteral(pattern string) bool {
-	return !strings.ContainsAny(pattern, `\*?[]`)
-}
-
-// traceLocation represents the setting of the -log_backtrace_at flag.
-type traceLocation struct {
-	file string
-	line int
-}
-
-// isSet reports whether the trace location has been specified.
-// logging.mu is held.
-func (t *traceLocation) isSet() bool {
-	return t.line > 0
-}
-
-// match reports whether the specified file and line matches the trace location.
-// The argument file name is the full path, not the basename specified in the flag.
-// logging.mu is held.
-func (t *traceLocation) match(file string, line int) bool {
-	if t.line != line {
-		return false
-	}
-	if i := strings.LastIndex(file, "/"); i >= 0 {
-		file = file[i+1:]
-	}
-	return t.file == file
-}
-
-func (t *traceLocation) String() string {
-	// Lock because the type is not atomic. TODO: clean this up.
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	return fmt.Sprintf("%s:%d", t.file, t.line)
-}
-
-// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the
-// struct is not exported
-func (t *traceLocation) Get() interface{} {
-	return nil
-}
-
-var errTraceSyntax = errors.New("syntax error: expect file.go:234")
-
-// Syntax: -log_backtrace_at=gopherflakes.go:234
-// Note that unlike vmodule the file extension is included here.
-func (t *traceLocation) Set(value string) error {
-	if value == "" {
-		// Unset.
-		t.line = 0
-		t.file = ""
-	}
-	fields := strings.Split(value, ":")
-	if len(fields) != 2 {
-		return errTraceSyntax
-	}
-	file, line := fields[0], fields[1]
-	if !strings.Contains(file, ".") {
-		return errTraceSyntax
-	}
-	v, err := strconv.Atoi(line)
-	if err != nil {
-		return errTraceSyntax
-	}
-	if v <= 0 {
-		return errors.New("negative or zero value for level")
-	}
-	logging.mu.Lock()
-	defer logging.mu.Unlock()
-	t.line = v
-	t.file = file
-	return nil
-}
-
-// flushSyncWriter is the interface satisfied by logging destinations.
-type flushSyncWriter interface {
-	Flush() error
-	Sync() error
-	io.Writer
-}
-
-func init() {
-	flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
-	flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
-	flag.Var(&logging.verbosity, "v", "log level for V logs")
-	flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
-	flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
-	flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
-
-	// Default stderrThreshold is ERROR.
-	logging.stderrThreshold = errorLog
-
-	logging.setVState(0, nil, false)
-	go logging.flushDaemon()
-}
-
-// Flush flushes all pending log I/O.
-func Flush() {
-	logging.lockAndFlushAll()
-}
-
-// loggingT collects all the global state of the logging setup.
-type loggingT struct {
-	// Boolean flags. Not handled atomically because the flag.Value interface
-	// does not let us avoid the =true, and that shorthand is necessary for
-	// compatibility. TODO: does this matter enough to fix? Seems unlikely.
-	toStderr     bool // The -logtostderr flag.
-	alsoToStderr bool // The -alsologtostderr flag.
-
-	// Level flag. Handled atomically.
-	stderrThreshold severity // The -stderrthreshold flag.
-
-	// freeList is a list of byte buffers, maintained under freeListMu.
-	freeList *buffer
-	// freeListMu maintains the free list. It is separate from the main mutex
-	// so buffers can be grabbed and printed to without holding the main lock,
-	// for better parallelization.
-	freeListMu sync.Mutex
-
-	// mu protects the remaining elements of this structure and is
-	// used to synchronize logging.
-	mu sync.Mutex
-	// file holds writer for each of the log types.
-	file [numSeverity]flushSyncWriter
-	// pcs is used in V to avoid an allocation when computing the caller's PC.
-	pcs [1]uintptr
-	// vmap is a cache of the V Level for each V() call site, identified by PC.
-	// It is wiped whenever the vmodule flag changes state.
-	vmap map[uintptr]Level
-	// filterLength stores the length of the vmodule filter chain. If greater
-	// than zero, it means vmodule is enabled. It may be read safely
-	// using sync.LoadInt32, but is only modified under mu.
-	filterLength int32
-	// traceLocation is the state of the -log_backtrace_at flag.
-	traceLocation traceLocation
-	// These flags are modified only under lock, although verbosity may be fetched
-	// safely using atomic.LoadInt32.
-	vmodule   moduleSpec // The state of the -vmodule flag.
-	verbosity Level      // V logging level, the value of the -v flag/
-}
-
-// buffer holds a byte Buffer for reuse. The zero value is ready for use.
-type buffer struct {
-	bytes.Buffer
-	tmp  [64]byte // temporary byte array for creating headers.
-	next *buffer
-}
-
-var logging loggingT
-
-// setVState sets a consistent state for V logging.
-// l.mu is held.
-func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) {
-	// Turn verbosity off so V will not fire while we are in transition.
-	logging.verbosity.set(0)
-	// Ditto for filter length.
-	atomic.StoreInt32(&logging.filterLength, 0)
-
-	// Set the new filters and wipe the pc->Level map if the filter has changed.
-	if setFilter {
-		logging.vmodule.filter = filter
-		logging.vmap = make(map[uintptr]Level)
-	}
-
-	// Things are consistent now, so enable filtering and verbosity.
-	// They are enabled in order opposite to that in V.
-	atomic.StoreInt32(&logging.filterLength, int32(len(filter)))
-	logging.verbosity.set(verbosity)
-}
-
-// getBuffer returns a new, ready-to-use buffer.
-func (l *loggingT) getBuffer() *buffer {
-	l.freeListMu.Lock()
-	b := l.freeList
-	if b != nil {
-		l.freeList = b.next
-	}
-	l.freeListMu.Unlock()
-	if b == nil {
-		b = new(buffer)
-	} else {
-		b.next = nil
-		b.Reset()
-	}
-	return b
-}
-
-// putBuffer returns a buffer to the free list.
-func (l *loggingT) putBuffer(b *buffer) {
-	if b.Len() >= 256 {
-		// Let big buffers die a natural death.
-		return
-	}
-	l.freeListMu.Lock()
-	b.next = l.freeList
-	l.freeList = b
-	l.freeListMu.Unlock()
-}
-
-var timeNow = time.Now // Stubbed out for testing.
-
-/*
-header formats a log header as defined by the C++ implementation.
-It returns a buffer containing the formatted header and the user's file and line number.
-The depth specifies how many stack frames above lives the source line to be identified in the log message.
-
-Log lines have this form:
-	Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg...
-where the fields are defined as follows:
-	L                A single character, representing the log level (eg 'I' for INFO)
-	mm               The month (zero padded; ie May is '05')
-	dd               The day (zero padded)
-	hh:mm:ss.uuuuuu  Time in hours, minutes and fractional seconds
-	threadid         The space-padded thread ID as returned by GetTID()
-	file             The file name
-	line             The line number
-	msg              The user-supplied message
-*/
-func (l *loggingT) header(s severity, depth int) (*buffer, string, int) {
-	_, file, line, ok := runtime.Caller(3 + depth)
-	if !ok {
-		file = "???"
-		line = 1
-	} else {
-		slash := strings.LastIndex(file, "/")
-		if slash >= 0 {
-			file = file[slash+1:]
-		}
-	}
-	return l.formatHeader(s, file, line), file, line
-}
-
-// formatHeader formats a log header using the provided file name and line number.
-func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
-	now := timeNow()
-	if line < 0 {
-		line = 0 // not a real line number, but acceptable to someDigits
-	}
-	if s > fatalLog {
-		s = infoLog // for safety.
-	}
-	buf := l.getBuffer()
-
-	// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
-	// It's worth about 3X. Fprintf is hard.
-	_, month, day := now.Date()
-	hour, minute, second := now.Clock()
-	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
-	buf.tmp[0] = severityChar[s]
-	buf.twoDigits(1, int(month))
-	buf.twoDigits(3, day)
-	buf.tmp[5] = ' '
-	buf.twoDigits(6, hour)
-	buf.tmp[8] = ':'
-	buf.twoDigits(9, minute)
-	buf.tmp[11] = ':'
-	buf.twoDigits(12, second)
-	buf.tmp[14] = '.'
-	buf.nDigits(6, 15, now.Nanosecond()/1000, '0')
-	buf.tmp[21] = ' '
-	buf.nDigits(7, 22, pid, ' ') // TODO: should be TID
-	buf.tmp[29] = ' '
-	buf.Write(buf.tmp[:30])
-	buf.WriteString(file)
-	buf.tmp[0] = ':'
-	n := buf.someDigits(1, line)
-	buf.tmp[n+1] = ']'
-	buf.tmp[n+2] = ' '
-	buf.Write(buf.tmp[:n+3])
-	return buf
-}
-
-// Some custom tiny helper functions to print the log header efficiently.
-
-const digits = "0123456789"
-
-// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i].
-func (buf *buffer) twoDigits(i, d int) {
-	buf.tmp[i+1] = digits[d%10]
-	d /= 10
-	buf.tmp[i] = digits[d%10]
-}
-
-// nDigits formats an n-digit integer at buf.tmp[i],
-// padding with pad on the left.
-// It assumes d >= 0.
-func (buf *buffer) nDigits(n, i, d int, pad byte) {
-	j := n - 1
-	for ; j >= 0 && d > 0; j-- {
-		buf.tmp[i+j] = digits[d%10]
-		d /= 10
-	}
-	for ; j >= 0; j-- {
-		buf.tmp[i+j] = pad
-	}
-}
-
-// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i].
-func (buf *buffer) someDigits(i, d int) int {
-	// Print into the top, then copy down. We know there's space for at least
-	// a 10-digit number.
-	j := len(buf.tmp)
-	for {
-		j--
-		buf.tmp[j] = digits[d%10]
-		d /= 10
-		if d == 0 {
-			break
-		}
-	}
-	return copy(buf.tmp[i:], buf.tmp[j:])
-}
-
-func (l *loggingT) println(s severity, args ...interface{}) {
-	buf, file, line := l.header(s, 0)
-	fmt.Fprintln(buf, args...)
-	l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) print(s severity, args ...interface{}) {
-	l.printDepth(s, 1, args...)
-}
-
-func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) {
-	buf, file, line := l.header(s, depth)
-	fmt.Fprint(buf, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, false)
-}
-
-func (l *loggingT) printf(s severity, format string, args ...interface{}) {
-	buf, file, line := l.header(s, 0)
-	fmt.Fprintf(buf, format, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, false)
-}
-
-// printWithFileLine behaves like print but uses the provided file and line number.  If
-// alsoLogToStderr is true, the log message always appears on standard error; it
-// will also appear in the log file unless --logtostderr is set.
-func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) {
-	buf := l.formatHeader(s, file, line)
-	fmt.Fprint(buf, args...)
-	if buf.Bytes()[buf.Len()-1] != '\n' {
-		buf.WriteByte('\n')
-	}
-	l.output(s, buf, file, line, alsoToStderr)
-}
-
-// output writes the data to the log files and releases the buffer.
-func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
-	l.mu.Lock()
-	if l.traceLocation.isSet() {
-		if l.traceLocation.match(file, line) {
-			buf.Write(stacks(false))
-		}
-	}
-	data := buf.Bytes()
-	if !flag.Parsed() {
-		os.Stderr.Write([]byte("ERROR: logging before flag.Parse: "))
-		os.Stderr.Write(data)
-	} else if l.toStderr {
-		os.Stderr.Write(data)
-	} else {
-		if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() {
-			os.Stderr.Write(data)
-		}
-		if l.file[s] == nil {
-			if err := l.createFiles(s); err != nil {
-				os.Stderr.Write(data) // Make sure the message appears somewhere.
-				l.exit(err)
-			}
-		}
-		switch s {
-		case fatalLog:
-			l.file[fatalLog].Write(data)
-			fallthrough
-		case errorLog:
-			l.file[errorLog].Write(data)
-			fallthrough
-		case warningLog:
-			l.file[warningLog].Write(data)
-			fallthrough
-		case infoLog:
-			l.file[infoLog].Write(data)
-		}
-	}
-	if s == fatalLog {
-		// If we got here via Exit rather than Fatal, print no stacks.
-		if atomic.LoadUint32(&fatalNoStacks) > 0 {
-			l.mu.Unlock()
-			timeoutFlush(10 * time.Second)
-			os.Exit(1)
-		}
-		// Dump all goroutine stacks before exiting.
-		// First, make sure we see the trace for the current goroutine on standard error.
-		// If -logtostderr has been specified, the loop below will do that anyway
-		// as the first stack in the full dump.
-		if !l.toStderr {
-			os.Stderr.Write(stacks(false))
-		}
-		// Write the stack trace for all goroutines to the files.
-		trace := stacks(true)
-		logExitFunc = func(error) {} // If we get a write error, we'll still exit below.
-		for log := fatalLog; log >= infoLog; log-- {
-			if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set.
-				f.Write(trace)
-			}
-		}
-		l.mu.Unlock()
-		timeoutFlush(10 * time.Second)
-		os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway.
-	}
-	l.putBuffer(buf)
-	l.mu.Unlock()
-	if stats := severityStats[s]; stats != nil {
-		atomic.AddInt64(&stats.lines, 1)
-		atomic.AddInt64(&stats.bytes, int64(len(data)))
-	}
-}
-
-// timeoutFlush calls Flush and returns when it completes or after timeout
-// elapses, whichever happens first.  This is needed because the hooks invoked
-// by Flush may deadlock when glog.Fatal is called from a hook that holds
-// a lock.
-func timeoutFlush(timeout time.Duration) {
-	done := make(chan bool, 1)
-	go func() {
-		Flush() // calls logging.lockAndFlushAll()
-		done <- true
-	}()
-	select {
-	case <-done:
-	case <-time.After(timeout):
-		fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout)
-	}
-}
-
-// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines.
-func stacks(all bool) []byte {
-	// We don't know how big the traces are, so grow a few times if they don't fit. Start large, though.
-	n := 10000
-	if all {
-		n = 100000
-	}
-	var trace []byte
-	for i := 0; i < 5; i++ {
-		trace = make([]byte, n)
-		nbytes := runtime.Stack(trace, all)
-		if nbytes < len(trace) {
-			return trace[:nbytes]
-		}
-		n *= 2
-	}
-	return trace
-}
-
-// logExitFunc provides a simple mechanism to override the default behavior
-// of exiting on error. Used in testing and to guarantee we reach a required exit
-// for fatal logs. Instead, exit could be a function rather than a method but that
-// would make its use clumsier.
-var logExitFunc func(error)
-
-// exit is called if there is trouble creating or writing log files.
-// It flushes the logs and exits the program; there's no point in hanging around.
-// l.mu is held.
-func (l *loggingT) exit(err error) {
-	fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err)
-	// If logExitFunc is set, we do that instead of exiting.
-	if logExitFunc != nil {
-		logExitFunc(err)
-		return
-	}
-	l.flushAll()
-	os.Exit(2)
-}
-
-// syncBuffer joins a bufio.Writer to its underlying file, providing access to the
-// file's Sync method and providing a wrapper for the Write method that provides log
-// file rotation. There are conflicting methods, so the file cannot be embedded.
-// l.mu is held for all its methods.
-type syncBuffer struct {
-	logger *loggingT
-	*bufio.Writer
-	file   *os.File
-	sev    severity
-	nbytes uint64 // The number of bytes written to this file
-}
-
-func (sb *syncBuffer) Sync() error {
-	return sb.file.Sync()
-}
-
-func (sb *syncBuffer) Write(p []byte) (n int, err error) {
-	if sb.nbytes+uint64(len(p)) >= MaxSize {
-		if err := sb.rotateFile(time.Now()); err != nil {
-			sb.logger.exit(err)
-		}
-	}
-	n, err = sb.Writer.Write(p)
-	sb.nbytes += uint64(n)
-	if err != nil {
-		sb.logger.exit(err)
-	}
-	return
-}
-
-// rotateFile closes the syncBuffer's file and starts a new one.
-func (sb *syncBuffer) rotateFile(now time.Time) error {
-	if sb.file != nil {
-		sb.Flush()
-		sb.file.Close()
-	}
-	var err error
-	sb.file, _, err = create(severityName[sb.sev], now)
-	sb.nbytes = 0
-	if err != nil {
-		return err
-	}
-
-	sb.Writer = bufio.NewWriterSize(sb.file, bufferSize)
-
-	// Write header.
-	var buf bytes.Buffer
-	fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05"))
-	fmt.Fprintf(&buf, "Running on machine: %s\n", host)
-	fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH)
-	fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n")
-	n, err := sb.file.Write(buf.Bytes())
-	sb.nbytes += uint64(n)
-	return err
-}
-
-// bufferSize sizes the buffer associated with each log file. It's large
-// so that log records can accumulate without the logging thread blocking
-// on disk I/O. The flushDaemon will block instead.
-const bufferSize = 256 * 1024
-
-// createFiles creates all the log files for severity from sev down to infoLog.
-// l.mu is held.
-func (l *loggingT) createFiles(sev severity) error {
-	now := time.Now()
-	// Files are created in decreasing severity order, so as soon as we find one
-	// has already been created, we can stop.
-	for s := sev; s >= infoLog && l.file[s] == nil; s-- {
-		sb := &syncBuffer{
-			logger: l,
-			sev:    s,
-		}
-		if err := sb.rotateFile(now); err != nil {
-			return err
-		}
-		l.file[s] = sb
-	}
-	return nil
-}
-
-const flushInterval = 30 * time.Second
-
-// flushDaemon periodically flushes the log file buffers.
-func (l *loggingT) flushDaemon() {
-	for _ = range time.NewTicker(flushInterval).C {
-		l.lockAndFlushAll()
-	}
-}
-
-// lockAndFlushAll is like flushAll but locks l.mu first.
-func (l *loggingT) lockAndFlushAll() {
-	l.mu.Lock()
-	l.flushAll()
-	l.mu.Unlock()
-}
-
-// flushAll flushes all the logs and attempts to "sync" their data to disk.
-// l.mu is held.
-func (l *loggingT) flushAll() {
-	// Flush from fatal down, in case there's trouble flushing.
-	for s := fatalLog; s >= infoLog; s-- {
-		file := l.file[s]
-		if file != nil {
-			file.Flush() // ignore error
-			file.Sync()  // ignore error
-		}
-	}
-}
-
-// CopyStandardLogTo arranges for messages written to the Go "log" package's
-// default logs to also appear in the Google logs for the named and lower
-// severities.  Subsequent changes to the standard log's default output location
-// or format may break this behavior.
-//
-// Valid names are "INFO", "WARNING", "ERROR", and "FATAL".  If the name is not
-// recognized, CopyStandardLogTo panics.
-func CopyStandardLogTo(name string) {
-	sev, ok := severityByName(name)
-	if !ok {
-		panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name))
-	}
-	// Set a log format that captures the user's file and line:
-	//   d.go:23: message
-	stdLog.SetFlags(stdLog.Lshortfile)
-	stdLog.SetOutput(logBridge(sev))
-}
-
-// logBridge provides the Write method that enables CopyStandardLogTo to connect
-// Go's standard logs to the logs provided by this package.
-type logBridge severity
-
-// Write parses the standard logging line and passes its components to the
-// logger for severity(lb).
-func (lb logBridge) Write(b []byte) (n int, err error) {
-	var (
-		file = "???"
-		line = 1
-		text string
-	)
-	// Split "d.go:23: message" into "d.go", "23", and "message".
-	if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 {
-		text = fmt.Sprintf("bad log format: %s", b)
-	} else {
-		file = string(parts[0])
-		text = string(parts[2][1:]) // skip leading space
-		line, err = strconv.Atoi(string(parts[1]))
-		if err != nil {
-			text = fmt.Sprintf("bad line number: %s", b)
-			line = 1
-		}
-	}
-	// printWithFileLine with alsoToStderr=true, so standard log messages
-	// always appear on standard error.
-	logging.printWithFileLine(severity(lb), file, line, true, text)
-	return len(b), nil
-}
-
-// setV computes and remembers the V level for a given PC
-// when vmodule is enabled.
-// File pattern matching takes the basename of the file, stripped
-// of its .go suffix, and uses filepath.Match, which is a little more
-// general than the *? matching used in C++.
-// l.mu is held.
-func (l *loggingT) setV(pc uintptr) Level {
-	fn := runtime.FuncForPC(pc)
-	file, _ := fn.FileLine(pc)
-	// The file is something like /a/b/c/d.go. We want just the d.
-	if strings.HasSuffix(file, ".go") {
-		file = file[:len(file)-3]
-	}
-	if slash := strings.LastIndex(file, "/"); slash >= 0 {
-		file = file[slash+1:]
-	}
-	for _, filter := range l.vmodule.filter {
-		if filter.match(file) {
-			l.vmap[pc] = filter.level
-			return filter.level
-		}
-	}
-	l.vmap[pc] = 0
-	return 0
-}
-
-// Verbose is a boolean type that implements Infof (like Printf) etc.
-// See the documentation of V for more information.
-type Verbose bool
-
-// V reports whether verbosity at the call site is at least the requested level.
-// The returned value is a boolean of type Verbose, which implements Info, Infoln
-// and Infof. These methods will write to the Info log if called.
-// Thus, one may write either
-//	if glog.V(2) { glog.Info("log this") }
-// or
-//	glog.V(2).Info("log this")
-// The second form is shorter but the first is cheaper if logging is off because it does
-// not evaluate its arguments.
-//
-// Whether an individual call to V generates a log record depends on the setting of
-// the -v and --vmodule flags; both are off by default. If the level in the call to
-// V is at least the value of -v, or of -vmodule for the source file containing the
-// call, the V call will log.
-func V(level Level) Verbose {
-	// This function tries hard to be cheap unless there's work to do.
-	// The fast path is two atomic loads and compares.
-
-	// Here is a cheap but safe test to see if V logging is enabled globally.
-	if logging.verbosity.get() >= level {
-		return Verbose(true)
-	}
-
-	// It's off globally but it vmodule may still be set.
-	// Here is another cheap but safe test to see if vmodule is enabled.
-	if atomic.LoadInt32(&logging.filterLength) > 0 {
-		// Now we need a proper lock to use the logging structure. The pcs field
-		// is shared so we must lock before accessing it. This is fairly expensive,
-		// but if V logging is enabled we're slow anyway.
-		logging.mu.Lock()
-		defer logging.mu.Unlock()
-		if runtime.Callers(2, logging.pcs[:]) == 0 {
-			return Verbose(false)
-		}
-		v, ok := logging.vmap[logging.pcs[0]]
-		if !ok {
-			v = logging.setV(logging.pcs[0])
-		}
-		return Verbose(v >= level)
-	}
-	return Verbose(false)
-}
-
-// Info is equivalent to the global Info function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Info(args ...interface{}) {
-	if v {
-		logging.print(infoLog, args...)
-	}
-}
-
-// Infoln is equivalent to the global Infoln function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infoln(args ...interface{}) {
-	if v {
-		logging.println(infoLog, args...)
-	}
-}
-
-// Infof is equivalent to the global Infof function, guarded by the value of v.
-// See the documentation of V for usage.
-func (v Verbose) Infof(format string, args ...interface{}) {
-	if v {
-		logging.printf(infoLog, format, args...)
-	}
-}
-
-// Info logs to the INFO log.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Info(args ...interface{}) {
-	logging.print(infoLog, args...)
-}
-
-// InfoDepth acts as Info but uses depth to determine which call frame to log.
-// InfoDepth(0, "msg") is the same as Info("msg").
-func InfoDepth(depth int, args ...interface{}) {
-	logging.printDepth(infoLog, depth, args...)
-}
-
-// Infoln logs to the INFO log.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Infoln(args ...interface{}) {
-	logging.println(infoLog, args...)
-}
-
-// Infof logs to the INFO log.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Infof(format string, args ...interface{}) {
-	logging.printf(infoLog, format, args...)
-}
-
-// Warning logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Warning(args ...interface{}) {
-	logging.print(warningLog, args...)
-}
-
-// WarningDepth acts as Warning but uses depth to determine which call frame to log.
-// WarningDepth(0, "msg") is the same as Warning("msg").
-func WarningDepth(depth int, args ...interface{}) {
-	logging.printDepth(warningLog, depth, args...)
-}
-
-// Warningln logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Warningln(args ...interface{}) {
-	logging.println(warningLog, args...)
-}
-
-// Warningf logs to the WARNING and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Warningf(format string, args ...interface{}) {
-	logging.printf(warningLog, format, args...)
-}
-
-// Error logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Error(args ...interface{}) {
-	logging.print(errorLog, args...)
-}
-
-// ErrorDepth acts as Error but uses depth to determine which call frame to log.
-// ErrorDepth(0, "msg") is the same as Error("msg").
-func ErrorDepth(depth int, args ...interface{}) {
-	logging.printDepth(errorLog, depth, args...)
-}
-
-// Errorln logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Errorln(args ...interface{}) {
-	logging.println(errorLog, args...)
-}
-
-// Errorf logs to the ERROR, WARNING, and INFO logs.
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Errorf(format string, args ...interface{}) {
-	logging.printf(errorLog, format, args...)
-}
-
-// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Fatal(args ...interface{}) {
-	logging.print(fatalLog, args...)
-}
-
-// FatalDepth acts as Fatal but uses depth to determine which call frame to log.
-// FatalDepth(0, "msg") is the same as Fatal("msg").
-func FatalDepth(depth int, args ...interface{}) {
-	logging.printDepth(fatalLog, depth, args...)
-}
-
-// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Println; a newline is appended if missing.
-func Fatalln(args ...interface{}) {
-	logging.println(fatalLog, args...)
-}
-
-// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs,
-// including a stack trace of all running goroutines, then calls os.Exit(255).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Fatalf(format string, args ...interface{}) {
-	logging.printf(fatalLog, format, args...)
-}
-
-// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks.
-// It allows Exit and relatives to use the Fatal logs.
-var fatalNoStacks uint32
-
-// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Print; a newline is appended if missing.
-func Exit(args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.print(fatalLog, args...)
-}
-
-// ExitDepth acts as Exit but uses depth to determine which call frame to log.
-// ExitDepth(0, "msg") is the same as Exit("msg").
-func ExitDepth(depth int, args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.printDepth(fatalLog, depth, args...)
-}
-
-// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-func Exitln(args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.println(fatalLog, args...)
-}
-
-// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1).
-// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing.
-func Exitf(format string, args ...interface{}) {
-	atomic.StoreUint32(&fatalNoStacks, 1)
-	logging.printf(fatalLog, format, args...)
-}
diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go
deleted file mode 100644
index 65075d2..0000000
--- a/vendor/github.com/golang/glog/glog_file.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
-//
-// Copyright 2013 Google Inc. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// File I/O for logs.
-
-package glog
-
-import (
-	"errors"
-	"flag"
-	"fmt"
-	"os"
-	"os/user"
-	"path/filepath"
-	"strings"
-	"sync"
-	"time"
-)
-
-// MaxSize is the maximum size of a log file in bytes.
-var MaxSize uint64 = 1024 * 1024 * 1800
-
-// logDirs lists the candidate directories for new log files.
-var logDirs []string
-
-// If non-empty, overrides the choice of directory in which to write logs.
-// See createLogDirs for the full list of possible destinations.
-var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
-
-func createLogDirs() {
-	if *logDir != "" {
-		logDirs = append(logDirs, *logDir)
-	}
-	logDirs = append(logDirs, os.TempDir())
-}
-
-var (
-	pid      = os.Getpid()
-	program  = filepath.Base(os.Args[0])
-	host     = "unknownhost"
-	userName = "unknownuser"
-)
-
-func init() {
-	h, err := os.Hostname()
-	if err == nil {
-		host = shortHostname(h)
-	}
-
-	current, err := user.Current()
-	if err == nil {
-		userName = current.Username
-	}
-
-	// Sanitize userName since it may contain filepath separators on Windows.
-	userName = strings.Replace(userName, `\`, "_", -1)
-}
-
-// shortHostname returns its argument, truncating at the first period.
-// For instance, given "www.google.com" it returns "www".
-func shortHostname(hostname string) string {
-	if i := strings.Index(hostname, "."); i >= 0 {
-		return hostname[:i]
-	}
-	return hostname
-}
-
-// logName returns a new log file name containing tag, with start time t, and
-// the name for the symlink for tag.
-func logName(tag string, t time.Time) (name, link string) {
-	name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
-		program,
-		host,
-		userName,
-		tag,
-		t.Year(),
-		t.Month(),
-		t.Day(),
-		t.Hour(),
-		t.Minute(),
-		t.Second(),
-		pid)
-	return name, program + "." + tag
-}
-
-var onceLogDirs sync.Once
-
-// create creates a new log file and returns the file and its filename, which
-// contains tag ("INFO", "FATAL", etc.) and t.  If the file is created
-// successfully, create also attempts to update the symlink for that tag, ignoring
-// errors.
-func create(tag string, t time.Time) (f *os.File, filename string, err error) {
-	onceLogDirs.Do(createLogDirs)
-	if len(logDirs) == 0 {
-		return nil, "", errors.New("log: no log dirs")
-	}
-	name, link := logName(tag, t)
-	var lastErr error
-	for _, dir := range logDirs {
-		fname := filepath.Join(dir, name)
-		f, err := os.Create(fname)
-		if err == nil {
-			symlink := filepath.Join(dir, link)
-			os.Remove(symlink)        // ignore err
-			os.Symlink(name, symlink) // ignore err
-			return f, fname, nil
-		}
-		lastErr = err
-	}
-	return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
-}
diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/imdario/mergo/testdata/license.yml b/vendor/github.com/imdario/mergo/testdata/license.yml
deleted file mode 100644
index 2f1ad00..0000000
--- a/vendor/github.com/imdario/mergo/testdata/license.yml
+++ /dev/null
@@ -1,4 +0,0 @@
-import: ../../../../fossene/db/schema/thing.yml
-fields:
-    site: string
-    author: root
diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/json-iterator/go/test.sh b/vendor/github.com/json-iterator/go/test.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/modern-go/concurrent/test.sh b/vendor/github.com/modern-go/concurrent/test.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/modern-go/reflect2/test.sh b/vendor/github.com/modern-go/reflect2/test.sh
old mode 100755
new mode 100644
diff --git a/vendor/github.com/modern-go/reflect2/type_map.go b/vendor/github.com/modern-go/reflect2/type_map.go
index 6d48911..3acfb55 100644
--- a/vendor/github.com/modern-go/reflect2/type_map.go
+++ b/vendor/github.com/modern-go/reflect2/type_map.go
@@ -4,6 +4,7 @@
 	"reflect"
 	"runtime"
 	"strings"
+	"sync"
 	"unsafe"
 )
 
@@ -15,10 +16,17 @@
 //go:linkname typelinks2 reflect.typelinks
 func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
 
-var types = map[string]reflect.Type{}
-var packages = map[string]map[string]reflect.Type{}
+// initOnce guards initialization of types and packages
+var initOnce sync.Once
 
-func init() {
+var types map[string]reflect.Type
+var packages map[string]map[string]reflect.Type
+
+// discoverTypes initializes types and packages
+func discoverTypes() {
+	types = make(map[string]reflect.Type)
+	packages = make(map[string]map[string]reflect.Type)
+
 	ver := runtime.Version()
 	if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
 		loadGo15Types()
@@ -90,11 +98,13 @@
 
 // TypeByName return the type by its name, just like Class.forName in java
 func TypeByName(typeName string) Type {
+	initOnce.Do(discoverTypes)
 	return Type2(types[typeName])
 }
 
 // TypeByPackageName return the type by its package and name
 func TypeByPackageName(pkgPath string, name string) Type {
+	initOnce.Do(discoverTypes)
 	pkgTypes := packages[pkgPath]
 	if pkgTypes == nil {
 		return nil