seba-365 - implemented dep

Change-Id: Ia6226d50e7615935a0c8876809a687427ff88c22
diff --git a/vendor/github.com/go-stack/stack/.travis.yml b/vendor/github.com/go-stack/stack/.travis.yml
new file mode 100644
index 0000000..5c5a2b5
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/.travis.yml
@@ -0,0 +1,15 @@
+language: go
+sudo: false
+go:
+  - 1.7.x
+  - 1.8.x
+  - 1.9.x
+  - 1.10.x
+  - 1.11.x
+  - tip
+
+before_install:
+  - go get github.com/mattn/goveralls
+
+script:
+  - goveralls -service=travis-ci
diff --git a/vendor/github.com/go-stack/stack/LICENSE.md b/vendor/github.com/go-stack/stack/LICENSE.md
new file mode 100644
index 0000000..2abf98e
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Chris Hines
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-stack/stack/README.md b/vendor/github.com/go-stack/stack/README.md
new file mode 100644
index 0000000..f11cccc
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/README.md
@@ -0,0 +1,38 @@
+[![GoDoc](https://godoc.org/github.com/go-stack/stack?status.svg)](https://godoc.org/github.com/go-stack/stack)
+[![Go Report Card](https://goreportcard.com/badge/go-stack/stack)](https://goreportcard.com/report/go-stack/stack)
+[![TravisCI](https://travis-ci.org/go-stack/stack.svg?branch=master)](https://travis-ci.org/go-stack/stack)
+[![Coverage Status](https://coveralls.io/repos/github/go-stack/stack/badge.svg?branch=master)](https://coveralls.io/github/go-stack/stack?branch=master)
+
+# stack
+
+Package stack implements utilities to capture, manipulate, and format call
+stacks. It provides a simpler API than package runtime.
+
+The implementation takes care of the minutia and special cases of interpreting
+the program counter (pc) values returned by runtime.Callers.
+
+## Versioning
+
+Package stack publishes releases via [semver](http://semver.org/) compatible Git
+tags prefixed with a single 'v'. The master branch always contains the latest
+release. The develop branch contains unreleased commits.
+
+## Formatting
+
+Package stack's types implement fmt.Formatter, which provides a simple and
+flexible way to declaratively configure formatting when used with logging or
+error tracking packages.
+
+```go
+func DoTheThing() {
+    c := stack.Caller(0)
+    log.Print(c)          // "source.go:10"
+    log.Printf("%+v", c)  // "pkg/path/source.go:10"
+    log.Printf("%n", c)   // "DoTheThing"
+
+    s := stack.Trace().TrimRuntime()
+    log.Print(s)          // "[source.go:15 caller.go:42 main.go:14]"
+}
+```
+
+See the docs for all of the supported formatting options.
diff --git a/vendor/github.com/go-stack/stack/go.mod b/vendor/github.com/go-stack/stack/go.mod
new file mode 100644
index 0000000..96a53a1
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/go.mod
@@ -0,0 +1 @@
+module github.com/go-stack/stack
diff --git a/vendor/github.com/go-stack/stack/stack.go b/vendor/github.com/go-stack/stack/stack.go
new file mode 100644
index 0000000..ac3b93b
--- /dev/null
+++ b/vendor/github.com/go-stack/stack/stack.go
@@ -0,0 +1,400 @@
+// +build go1.7
+
+// Package stack implements utilities to capture, manipulate, and format call
+// stacks. It provides a simpler API than package runtime.
+//
+// The implementation takes care of the minutia and special cases of
+// interpreting the program counter (pc) values returned by runtime.Callers.
+//
+// Package stack's types implement fmt.Formatter, which provides a simple and
+// flexible way to declaratively configure formatting when used with logging
+// or error tracking packages.
+package stack
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Call records a single function invocation from a goroutine stack.
+type Call struct {
+	frame runtime.Frame
+}
+
+// Caller returns a Call from the stack of the current goroutine. The argument
+// skip is the number of stack frames to ascend, with 0 identifying the
+// calling function.
+func Caller(skip int) Call {
+	// As of Go 1.9 we need room for up to three PC entries.
+	//
+	// 0. An entry for the stack frame prior to the target to check for
+	//    special handling needed if that prior entry is runtime.sigpanic.
+	// 1. A possible second entry to hold metadata about skipped inlined
+	//    functions. If inline functions were not skipped the target frame
+	//    PC will be here.
+	// 2. A third entry for the target frame PC when the second entry
+	//    is used for skipped inline functions.
+	var pcs [3]uintptr
+	n := runtime.Callers(skip+1, pcs[:])
+	frames := runtime.CallersFrames(pcs[:n])
+	frame, _ := frames.Next()
+	frame, _ = frames.Next()
+
+	return Call{
+		frame: frame,
+	}
+}
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", c).
+func (c Call) String() string {
+	return fmt.Sprint(c)
+}
+
+// MarshalText implements encoding.TextMarshaler. It formats the Call the same
+// as fmt.Sprintf("%v", c).
+func (c Call) MarshalText() ([]byte, error) {
+	if c.frame == (runtime.Frame{}) {
+		return nil, ErrNoFunc
+	}
+
+	buf := bytes.Buffer{}
+	fmt.Fprint(&buf, c)
+	return buf.Bytes(), nil
+}
+
+// ErrNoFunc means that the Call has a nil *runtime.Func. The most likely
+// cause is a Call with the zero value.
+var ErrNoFunc = errors.New("no call stack information")
+
+// Format implements fmt.Formatter with support for the following verbs.
+//
+//    %s    source file
+//    %d    line number
+//    %n    function name
+//    %k    last segment of the package path
+//    %v    equivalent to %s:%d
+//
+// It accepts the '+' and '#' flags for most of the verbs as follows.
+//
+//    %+s   path of source file relative to the compile time GOPATH,
+//          or the module path joined to the path of source file relative
+//          to module root
+//    %#s   full path of source file
+//    %+n   import path qualified function name
+//    %+k   full package path
+//    %+v   equivalent to %+s:%d
+//    %#v   equivalent to %#s:%d
+func (c Call) Format(s fmt.State, verb rune) {
+	if c.frame == (runtime.Frame{}) {
+		fmt.Fprintf(s, "%%!%c(NOFUNC)", verb)
+		return
+	}
+
+	switch verb {
+	case 's', 'v':
+		file := c.frame.File
+		switch {
+		case s.Flag('#'):
+			// done
+		case s.Flag('+'):
+			file = pkgFilePath(&c.frame)
+		default:
+			const sep = "/"
+			if i := strings.LastIndex(file, sep); i != -1 {
+				file = file[i+len(sep):]
+			}
+		}
+		io.WriteString(s, file)
+		if verb == 'v' {
+			buf := [7]byte{':'}
+			s.Write(strconv.AppendInt(buf[:1], int64(c.frame.Line), 10))
+		}
+
+	case 'd':
+		buf := [6]byte{}
+		s.Write(strconv.AppendInt(buf[:0], int64(c.frame.Line), 10))
+
+	case 'k':
+		name := c.frame.Function
+		const pathSep = "/"
+		start, end := 0, len(name)
+		if i := strings.LastIndex(name, pathSep); i != -1 {
+			start = i + len(pathSep)
+		}
+		const pkgSep = "."
+		if i := strings.Index(name[start:], pkgSep); i != -1 {
+			end = start + i
+		}
+		if s.Flag('+') {
+			start = 0
+		}
+		io.WriteString(s, name[start:end])
+
+	case 'n':
+		name := c.frame.Function
+		if !s.Flag('+') {
+			const pathSep = "/"
+			if i := strings.LastIndex(name, pathSep); i != -1 {
+				name = name[i+len(pathSep):]
+			}
+			const pkgSep = "."
+			if i := strings.Index(name, pkgSep); i != -1 {
+				name = name[i+len(pkgSep):]
+			}
+		}
+		io.WriteString(s, name)
+	}
+}
+
+// Frame returns the call frame infomation for the Call.
+func (c Call) Frame() runtime.Frame {
+	return c.frame
+}
+
+// PC returns the program counter for this call frame; multiple frames may
+// have the same PC value.
+//
+// Deprecated: Use Call.Frame instead.
+func (c Call) PC() uintptr {
+	return c.frame.PC
+}
+
+// CallStack records a sequence of function invocations from a goroutine
+// stack.
+type CallStack []Call
+
+// String implements fmt.Stinger. It is equivalent to fmt.Sprintf("%v", cs).
+func (cs CallStack) String() string {
+	return fmt.Sprint(cs)
+}
+
+var (
+	openBracketBytes  = []byte("[")
+	closeBracketBytes = []byte("]")
+	spaceBytes        = []byte(" ")
+)
+
+// MarshalText implements encoding.TextMarshaler. It formats the CallStack the
+// same as fmt.Sprintf("%v", cs).
+func (cs CallStack) MarshalText() ([]byte, error) {
+	buf := bytes.Buffer{}
+	buf.Write(openBracketBytes)
+	for i, pc := range cs {
+		if i > 0 {
+			buf.Write(spaceBytes)
+		}
+		fmt.Fprint(&buf, pc)
+	}
+	buf.Write(closeBracketBytes)
+	return buf.Bytes(), nil
+}
+
+// Format implements fmt.Formatter by printing the CallStack as square brackets
+// ([, ]) surrounding a space separated list of Calls each formatted with the
+// supplied verb and options.
+func (cs CallStack) Format(s fmt.State, verb rune) {
+	s.Write(openBracketBytes)
+	for i, pc := range cs {
+		if i > 0 {
+			s.Write(spaceBytes)
+		}
+		pc.Format(s, verb)
+	}
+	s.Write(closeBracketBytes)
+}
+
+// Trace returns a CallStack for the current goroutine with element 0
+// identifying the calling function.
+func Trace() CallStack {
+	var pcs [512]uintptr
+	n := runtime.Callers(1, pcs[:])
+
+	frames := runtime.CallersFrames(pcs[:n])
+	cs := make(CallStack, 0, n)
+
+	// Skip extra frame retrieved just to make sure the runtime.sigpanic
+	// special case is handled.
+	frame, more := frames.Next()
+
+	for more {
+		frame, more = frames.Next()
+		cs = append(cs, Call{frame: frame})
+	}
+
+	return cs
+}
+
+// TrimBelow returns a slice of the CallStack with all entries below c
+// removed.
+func (cs CallStack) TrimBelow(c Call) CallStack {
+	for len(cs) > 0 && cs[0] != c {
+		cs = cs[1:]
+	}
+	return cs
+}
+
+// TrimAbove returns a slice of the CallStack with all entries above c
+// removed.
+func (cs CallStack) TrimAbove(c Call) CallStack {
+	for len(cs) > 0 && cs[len(cs)-1] != c {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}
+
+// pkgIndex returns the index that results in file[index:] being the path of
+// file relative to the compile time GOPATH, and file[:index] being the
+// $GOPATH/src/ portion of file. funcName must be the name of a function in
+// file as returned by runtime.Func.Name.
+func pkgIndex(file, funcName string) int {
+	// As of Go 1.6.2 there is no direct way to know the compile time GOPATH
+	// at runtime, but we can infer the number of path segments in the GOPATH.
+	// We note that runtime.Func.Name() returns the function name qualified by
+	// the import path, which does not include the GOPATH. Thus we can trim
+	// segments from the beginning of the file path until the number of path
+	// separators remaining is one more than the number of path separators in
+	// the function name. For example, given:
+	//
+	//    GOPATH     /home/user
+	//    file       /home/user/src/pkg/sub/file.go
+	//    fn.Name()  pkg/sub.Type.Method
+	//
+	// We want to produce:
+	//
+	//    file[:idx] == /home/user/src/
+	//    file[idx:] == pkg/sub/file.go
+	//
+	// From this we can easily see that fn.Name() has one less path separator
+	// than our desired result for file[idx:]. We count separators from the
+	// end of the file path until it finds two more than in the function name
+	// and then move one character forward to preserve the initial path
+	// segment without a leading separator.
+	const sep = "/"
+	i := len(file)
+	for n := strings.Count(funcName, sep) + 2; n > 0; n-- {
+		i = strings.LastIndex(file[:i], sep)
+		if i == -1 {
+			i = -len(sep)
+			break
+		}
+	}
+	// get back to 0 or trim the leading separator
+	return i + len(sep)
+}
+
+// pkgFilePath returns the frame's filepath relative to the compile-time GOPATH,
+// or its module path joined to its path relative to the module root.
+//
+// As of Go 1.11 there is no direct way to know the compile time GOPATH or
+// module paths at runtime, but we can piece together the desired information
+// from available information. We note that runtime.Frame.Function contains the
+// function name qualified by the package path, which includes the module path
+// but not the GOPATH. We can extract the package path from that and append the
+// last segments of the file path to arrive at the desired package qualified
+// file path. For example, given:
+//
+//    GOPATH          /home/user
+//    import path     pkg/sub
+//    frame.File      /home/user/src/pkg/sub/file.go
+//    frame.Function  pkg/sub.Type.Method
+//    Desired return  pkg/sub/file.go
+//
+// It appears that we simply need to trim ".Type.Method" from frame.Function and
+// append "/" + path.Base(file).
+//
+// But there are other wrinkles. Although it is idiomatic to do so, the internal
+// name of a package is not required to match the last segment of its import
+// path. In addition, the introduction of modules in Go 1.11 allows working
+// without a GOPATH. So we also must make these work right:
+//
+//    GOPATH          /home/user
+//    import path     pkg/go-sub
+//    package name    sub
+//    frame.File      /home/user/src/pkg/go-sub/file.go
+//    frame.Function  pkg/sub.Type.Method
+//    Desired return  pkg/go-sub/file.go
+//
+//    Module path     pkg/v2
+//    import path     pkg/v2/go-sub
+//    package name    sub
+//    frame.File      /home/user/cloned-pkg/go-sub/file.go
+//    frame.Function  pkg/v2/sub.Type.Method
+//    Desired return  pkg/v2/go-sub/file.go
+//
+// We can handle all of these situations by using the package path extracted
+// from frame.Function up to, but not including, the last segment as the prefix
+// and the last two segments of frame.File as the suffix of the returned path.
+// This preserves the existing behavior when working in a GOPATH without modules
+// and a semantically equivalent behavior when used in module aware project.
+func pkgFilePath(frame *runtime.Frame) string {
+	pre := pkgPrefix(frame.Function)
+	post := pathSuffix(frame.File)
+	if pre == "" {
+		return post
+	}
+	return pre + "/" + post
+}
+
+// pkgPrefix returns the import path of the function's package with the final
+// segment removed.
+func pkgPrefix(funcName string) string {
+	const pathSep = "/"
+	end := strings.LastIndex(funcName, pathSep)
+	if end == -1 {
+		return ""
+	}
+	return funcName[:end]
+}
+
+// pathSuffix returns the last two segments of path.
+func pathSuffix(path string) string {
+	const pathSep = "/"
+	lastSep := strings.LastIndex(path, pathSep)
+	if lastSep == -1 {
+		return path
+	}
+	return path[strings.LastIndex(path[:lastSep], pathSep)+1:]
+}
+
+var runtimePath string
+
+func init() {
+	var pcs [3]uintptr
+	runtime.Callers(0, pcs[:])
+	frames := runtime.CallersFrames(pcs[:])
+	frame, _ := frames.Next()
+	file := frame.File
+
+	idx := pkgIndex(frame.File, frame.Function)
+
+	runtimePath = file[:idx]
+	if runtime.GOOS == "windows" {
+		runtimePath = strings.ToLower(runtimePath)
+	}
+}
+
+func inGoroot(c Call) bool {
+	file := c.frame.File
+	if len(file) == 0 || file[0] == '?' {
+		return true
+	}
+	if runtime.GOOS == "windows" {
+		file = strings.ToLower(file)
+	}
+	return strings.HasPrefix(file, runtimePath) || strings.HasSuffix(file, "/_testmain.go")
+}
+
+// TrimRuntime returns a slice of the CallStack with the topmost entries from
+// the go runtime removed. It considers any calls originating from unknown
+// files, files under GOROOT, or _testmain.go as part of the runtime.
+func (cs CallStack) TrimRuntime() CallStack {
+	for len(cs) > 0 && inGoroot(cs[len(cs)-1]) {
+		cs = cs[:len(cs)-1]
+	}
+	return cs
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
new file mode 100644
index 0000000..ada2b78
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
@@ -0,0 +1,1271 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2015 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
+It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
+
+This package produces a different output than the standard "encoding/json" package,
+which does not operate correctly on protocol buffers.
+*/
+package jsonpb
+
+import (
+	"bytes"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+
+	stpb "github.com/golang/protobuf/ptypes/struct"
+)
+
+const secondInNanos = int64(time.Second / time.Nanosecond)
+
+// Marshaler is a configurable object for converting between
+// protocol buffer objects and a JSON representation for them.
+type Marshaler struct {
+	// Whether to render enum values as integers, as opposed to string values.
+	EnumsAsInts bool
+
+	// Whether to render fields with zero values.
+	EmitDefaults bool
+
+	// A string to indent each level by. The presence of this field will
+	// also cause a space to appear between the field separator and
+	// value, and for newlines to be appear between fields and array
+	// elements.
+	Indent string
+
+	// Whether to use the original (.proto) name for fields.
+	OrigName bool
+
+	// A custom URL resolver to use when marshaling Any messages to JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// AnyResolver takes a type URL, present in an Any message, and resolves it into
+// an instance of the associated message.
+type AnyResolver interface {
+	Resolve(typeUrl string) (proto.Message, error)
+}
+
+func defaultResolveAny(typeUrl string) (proto.Message, error) {
+	// Only the part of typeUrl after the last slash is relevant.
+	mname := typeUrl
+	if slash := strings.LastIndex(mname, "/"); slash >= 0 {
+		mname = mname[slash+1:]
+	}
+	mt := proto.MessageType(mname)
+	if mt == nil {
+		return nil, fmt.Errorf("unknown message type %q", mname)
+	}
+	return reflect.New(mt.Elem()).Interface().(proto.Message), nil
+}
+
+// JSONPBMarshaler is implemented by protobuf messages that customize the
+// way they are marshaled to JSON. Messages that implement this should
+// also implement JSONPBUnmarshaler so that the custom format can be
+// parsed.
+//
+// The JSON marshaling must follow the proto to JSON specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBMarshaler interface {
+	MarshalJSONPB(*Marshaler) ([]byte, error)
+}
+
+// JSONPBUnmarshaler is implemented by protobuf messages that customize
+// the way they are unmarshaled from JSON. Messages that implement this
+// should also implement JSONPBMarshaler so that the custom format can be
+// produced.
+//
+// The JSON unmarshaling must follow the JSON to proto specification:
+//	https://developers.google.com/protocol-buffers/docs/proto3#json
+type JSONPBUnmarshaler interface {
+	UnmarshalJSONPB(*Unmarshaler, []byte) error
+}
+
+// Marshal marshals a protocol buffer into JSON.
+func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
+	v := reflect.ValueOf(pb)
+	if pb == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
+		return errors.New("Marshal called with nil")
+	}
+	// Check for unset required fields first.
+	if err := checkRequiredFields(pb); err != nil {
+		return err
+	}
+	writer := &errWriter{writer: out}
+	return m.marshalObject(writer, pb, "", "")
+}
+
+// MarshalToString converts a protocol buffer object to JSON string.
+func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
+	var buf bytes.Buffer
+	if err := m.Marshal(&buf, pb); err != nil {
+		return "", err
+	}
+	return buf.String(), nil
+}
+
+type int32Slice []int32
+
+var nonFinite = map[string]float64{
+	`"NaN"`:       math.NaN(),
+	`"Infinity"`:  math.Inf(1),
+	`"-Infinity"`: math.Inf(-1),
+}
+
+// For sorting extensions ids to ensure stable output.
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+type wkt interface {
+	XXX_WellKnownType() string
+}
+
+// marshalObject writes a struct to the Writer.
+func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
+	if jsm, ok := v.(JSONPBMarshaler); ok {
+		b, err := jsm.MarshalJSONPB(m)
+		if err != nil {
+			return err
+		}
+		if typeURL != "" {
+			// we are marshaling this object to an Any type
+			var js map[string]*json.RawMessage
+			if err = json.Unmarshal(b, &js); err != nil {
+				return fmt.Errorf("type %T produced invalid JSON: %v", v, err)
+			}
+			turl, err := json.Marshal(typeURL)
+			if err != nil {
+				return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
+			}
+			js["@type"] = (*json.RawMessage)(&turl)
+			if b, err = json.Marshal(js); err != nil {
+				return err
+			}
+		}
+
+		out.write(string(b))
+		return out.err
+	}
+
+	s := reflect.ValueOf(v).Elem()
+
+	// Handle well-known types.
+	if wkt, ok := v.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			// "Wrappers use the same representation in JSON
+			//  as the wrapped primitive type, ..."
+			sprop := proto.GetProperties(s.Type())
+			return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
+		case "Any":
+			// Any is a bit more involved.
+			return m.marshalAny(out, v, indent)
+		case "Duration":
+			// "Generated output always contains 0, 3, 6, or 9 fractional digits,
+			//  depending on required precision."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns <= -secondInNanos || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
+			}
+			if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
+				return errors.New("signs of seconds and nanos do not match")
+			}
+			if s < 0 {
+				ns = -ns
+			}
+			x := fmt.Sprintf("%d.%09d", s, ns)
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`s"`)
+			return out.err
+		case "Struct", "ListValue":
+			// Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice.
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
+		case "Timestamp":
+			// "RFC 3339, where generated output will always be Z-normalized
+			//  and uses 0, 3, 6 or 9 fractional digits."
+			s, ns := s.Field(0).Int(), s.Field(1).Int()
+			if ns < 0 || ns >= secondInNanos {
+				return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
+			}
+			t := time.Unix(s, ns).UTC()
+			// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
+			x := t.Format("2006-01-02T15:04:05.000000000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, "000")
+			x = strings.TrimSuffix(x, ".000")
+			out.write(`"`)
+			out.write(x)
+			out.write(`Z"`)
+			return out.err
+		case "Value":
+			// Value has a single oneof.
+			kind := s.Field(0)
+			if kind.IsNil() {
+				// "absence of any variant indicates an error"
+				return errors.New("nil Value")
+			}
+			// oneof -> *T -> T -> T.F
+			x := kind.Elem().Elem().Field(0)
+			// TODO: pass the correct Properties if needed.
+			return m.marshalValue(out, &proto.Properties{}, x, indent)
+		}
+	}
+
+	out.write("{")
+	if m.Indent != "" {
+		out.write("\n")
+	}
+
+	firstField := true
+
+	if typeURL != "" {
+		if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	for i := 0; i < s.NumField(); i++ {
+		value := s.Field(i)
+		valueField := s.Type().Field(i)
+		if strings.HasPrefix(valueField.Name, "XXX_") {
+			continue
+		}
+
+		// IsNil will panic on most value kinds.
+		switch value.Kind() {
+		case reflect.Chan, reflect.Func, reflect.Interface:
+			if value.IsNil() {
+				continue
+			}
+		}
+
+		if !m.EmitDefaults {
+			switch value.Kind() {
+			case reflect.Bool:
+				if !value.Bool() {
+					continue
+				}
+			case reflect.Int32, reflect.Int64:
+				if value.Int() == 0 {
+					continue
+				}
+			case reflect.Uint32, reflect.Uint64:
+				if value.Uint() == 0 {
+					continue
+				}
+			case reflect.Float32, reflect.Float64:
+				if value.Float() == 0 {
+					continue
+				}
+			case reflect.String:
+				if value.Len() == 0 {
+					continue
+				}
+			case reflect.Map, reflect.Ptr, reflect.Slice:
+				if value.IsNil() {
+					continue
+				}
+			}
+		}
+
+		// Oneof fields need special handling.
+		if valueField.Tag.Get("protobuf_oneof") != "" {
+			// value is an interface containing &T{real_value}.
+			sv := value.Elem().Elem() // interface -> *T -> T
+			value = sv.Field(0)
+			valueField = sv.Type().Field(0)
+		}
+		prop := jsonProperties(valueField, m.OrigName)
+		if !firstField {
+			m.writeSep(out)
+		}
+		if err := m.marshalField(out, prop, value, indent); err != nil {
+			return err
+		}
+		firstField = false
+	}
+
+	// Handle proto2 extensions.
+	if ep, ok := v.(proto.Message); ok {
+		extensions := proto.RegisteredExtensions(v)
+		// Sort extensions for stable output.
+		ids := make([]int32, 0, len(extensions))
+		for id, desc := range extensions {
+			if !proto.HasExtension(ep, desc) {
+				continue
+			}
+			ids = append(ids, id)
+		}
+		sort.Sort(int32Slice(ids))
+		for _, id := range ids {
+			desc := extensions[id]
+			if desc == nil {
+				// unknown extension
+				continue
+			}
+			ext, extErr := proto.GetExtension(ep, desc)
+			if extErr != nil {
+				return extErr
+			}
+			value := reflect.ValueOf(ext)
+			var prop proto.Properties
+			prop.Parse(desc.Tag)
+			prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
+			if !firstField {
+				m.writeSep(out)
+			}
+			if err := m.marshalField(out, &prop, value, indent); err != nil {
+				return err
+			}
+			firstField = false
+		}
+
+	}
+
+	if m.Indent != "" {
+		out.write("\n")
+		out.write(indent)
+	}
+	out.write("}")
+	return out.err
+}
+
+func (m *Marshaler) writeSep(out *errWriter) {
+	if m.Indent != "" {
+		out.write(",\n")
+	} else {
+		out.write(",")
+	}
+}
+
+func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
+	// "If the Any contains a value that has a special JSON mapping,
+	//  it will be converted as follows: {"@type": xxx, "value": yyy}.
+	//  Otherwise, the value will be converted into a JSON object,
+	//  and the "@type" field will be inserted to indicate the actual data type."
+	v := reflect.ValueOf(any).Elem()
+	turl := v.Field(0).String()
+	val := v.Field(1).Bytes()
+
+	var msg proto.Message
+	var err error
+	if m.AnyResolver != nil {
+		msg, err = m.AnyResolver.Resolve(turl)
+	} else {
+		msg, err = defaultResolveAny(turl)
+	}
+	if err != nil {
+		return err
+	}
+
+	if err := proto.Unmarshal(val, msg); err != nil {
+		return err
+	}
+
+	if _, ok := msg.(wkt); ok {
+		out.write("{")
+		if m.Indent != "" {
+			out.write("\n")
+		}
+		if err := m.marshalTypeURL(out, indent, turl); err != nil {
+			return err
+		}
+		m.writeSep(out)
+		if m.Indent != "" {
+			out.write(indent)
+			out.write(m.Indent)
+			out.write(`"value": `)
+		} else {
+			out.write(`"value":`)
+		}
+		if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
+			return err
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+		}
+		out.write("}")
+		return out.err
+	}
+
+	return m.marshalObject(out, msg, indent, turl)
+}
+
+func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"@type":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	b, err := json.Marshal(typeURL)
+	if err != nil {
+		return err
+	}
+	out.write(string(b))
+	return out.err
+}
+
+// marshalField writes field description and value to the Writer.
+func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	if m.Indent != "" {
+		out.write(indent)
+		out.write(m.Indent)
+	}
+	out.write(`"`)
+	out.write(prop.JSONName)
+	out.write(`":`)
+	if m.Indent != "" {
+		out.write(" ")
+	}
+	if err := m.marshalValue(out, prop, v, indent); err != nil {
+		return err
+	}
+	return nil
+}
+
+// marshalValue writes the value to the Writer.
+func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
+	var err error
+	v = reflect.Indirect(v)
+
+	// Handle nil pointer
+	if v.Kind() == reflect.Invalid {
+		out.write("null")
+		return out.err
+	}
+
+	// Handle repeated elements.
+	if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+		out.write("[")
+		comma := ""
+		for i := 0; i < v.Len(); i++ {
+			sliceVal := v.Index(i)
+			out.write(comma)
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+			if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
+				return err
+			}
+			comma = ","
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write("]")
+		return out.err
+	}
+
+	// Handle well-known types.
+	// Most are handled up in marshalObject (because 99% are messages).
+	if wkt, ok := v.Interface().(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "NullValue":
+			out.write("null")
+			return out.err
+		}
+	}
+
+	// Handle enumerations.
+	if !m.EnumsAsInts && prop.Enum != "" {
+		// Unknown enum values will are stringified by the proto library as their
+		// value. Such values should _not_ be quoted or they will be interpreted
+		// as an enum string instead of their value.
+		enumStr := v.Interface().(fmt.Stringer).String()
+		var valStr string
+		if v.Kind() == reflect.Ptr {
+			valStr = strconv.Itoa(int(v.Elem().Int()))
+		} else {
+			valStr = strconv.Itoa(int(v.Int()))
+		}
+		isKnownEnum := enumStr != valStr
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		out.write(enumStr)
+		if isKnownEnum {
+			out.write(`"`)
+		}
+		return out.err
+	}
+
+	// Handle nested messages.
+	if v.Kind() == reflect.Struct {
+		return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
+	}
+
+	// Handle maps.
+	// Since Go randomizes map iteration, we sort keys for stable output.
+	if v.Kind() == reflect.Map {
+		out.write(`{`)
+		keys := v.MapKeys()
+		sort.Sort(mapKeys(keys))
+		for i, k := range keys {
+			if i > 0 {
+				out.write(`,`)
+			}
+			if m.Indent != "" {
+				out.write("\n")
+				out.write(indent)
+				out.write(m.Indent)
+				out.write(m.Indent)
+			}
+
+			// TODO handle map key prop properly
+			b, err := json.Marshal(k.Interface())
+			if err != nil {
+				return err
+			}
+			s := string(b)
+
+			// If the JSON is not a string value, encode it again to make it one.
+			if !strings.HasPrefix(s, `"`) {
+				b, err := json.Marshal(s)
+				if err != nil {
+					return err
+				}
+				s = string(b)
+			}
+
+			out.write(s)
+			out.write(`:`)
+			if m.Indent != "" {
+				out.write(` `)
+			}
+
+			vprop := prop
+			if prop != nil && prop.MapValProp != nil {
+				vprop = prop.MapValProp
+			}
+			if err := m.marshalValue(out, vprop, v.MapIndex(k), indent+m.Indent); err != nil {
+				return err
+			}
+		}
+		if m.Indent != "" {
+			out.write("\n")
+			out.write(indent)
+			out.write(m.Indent)
+		}
+		out.write(`}`)
+		return out.err
+	}
+
+	// Handle non-finite floats, e.g. NaN, Infinity and -Infinity.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		f := v.Float()
+		var sval string
+		switch {
+		case math.IsInf(f, 1):
+			sval = `"Infinity"`
+		case math.IsInf(f, -1):
+			sval = `"-Infinity"`
+		case math.IsNaN(f):
+			sval = `"NaN"`
+		}
+		if sval != "" {
+			out.write(sval)
+			return out.err
+		}
+	}
+
+	// Default handling defers to the encoding/json library.
+	b, err := json.Marshal(v.Interface())
+	if err != nil {
+		return err
+	}
+	needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
+	if needToQuote {
+		out.write(`"`)
+	}
+	out.write(string(b))
+	if needToQuote {
+		out.write(`"`)
+	}
+	return out.err
+}
+
+// Unmarshaler is a configurable object for converting from a JSON
+// representation to a protocol buffer object.
+type Unmarshaler struct {
+	// Whether to allow messages to contain unknown fields, as opposed to
+	// failing to unmarshal.
+	AllowUnknownFields bool
+
+	// A custom URL resolver to use when unmarshaling Any messages from JSON.
+	// If unset, the default resolution strategy is to extract the
+	// fully-qualified type name from the type URL and pass that to
+	// proto.MessageType(string).
+	AnyResolver AnyResolver
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	inputValue := json.RawMessage{}
+	if err := dec.Decode(&inputValue); err != nil {
+		return err
+	}
+	if err := u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil); err != nil {
+		return err
+	}
+	return checkRequiredFields(pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
+	dec := json.NewDecoder(r)
+	return u.UnmarshalNext(dec, pb)
+}
+
+// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
+// This function is lenient and will decode any options permutations of the
+// related Marshaler.
+func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
+	return new(Unmarshaler).UnmarshalNext(dec, pb)
+}
+
+// Unmarshal unmarshals a JSON object stream into a protocol
+// buffer. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func Unmarshal(r io.Reader, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(r, pb)
+}
+
+// UnmarshalString will populate the fields of a protocol buffer based
+// on a JSON string. This function is lenient and will decode any options
+// permutations of the related Marshaler.
+func UnmarshalString(str string, pb proto.Message) error {
+	return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
+}
+
+// unmarshalValue converts/copies a value into the target.
+// prop may be nil.
+func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
+	targetType := target.Type()
+
+	// Allocate memory for pointer fields.
+	if targetType.Kind() == reflect.Ptr {
+		// If input value is "null" and target is a pointer type, then the field should be treated as not set
+		// UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue.
+		_, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler)
+		if string(inputValue) == "null" && targetType != reflect.TypeOf(&stpb.Value{}) && !isJSONPBUnmarshaler {
+			return nil
+		}
+		target.Set(reflect.New(targetType.Elem()))
+
+		return u.unmarshalValue(target.Elem(), inputValue, prop)
+	}
+
+	if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok {
+		return jsu.UnmarshalJSONPB(u, []byte(inputValue))
+	}
+
+	// Handle well-known types that are not pointers.
+	if w, ok := target.Addr().Interface().(wkt); ok {
+		switch w.XXX_WellKnownType() {
+		case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
+			"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
+			return u.unmarshalValue(target.Field(0), inputValue, prop)
+		case "Any":
+			// Use json.RawMessage pointer type instead of value to support pre-1.8 version.
+			// 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see
+			// https://github.com/golang/go/issues/14493
+			var jsonFields map[string]*json.RawMessage
+			if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+				return err
+			}
+
+			val, ok := jsonFields["@type"]
+			if !ok || val == nil {
+				return errors.New("Any JSON doesn't have '@type'")
+			}
+
+			var turl string
+			if err := json.Unmarshal([]byte(*val), &turl); err != nil {
+				return fmt.Errorf("can't unmarshal Any's '@type': %q", *val)
+			}
+			target.Field(0).SetString(turl)
+
+			var m proto.Message
+			var err error
+			if u.AnyResolver != nil {
+				m, err = u.AnyResolver.Resolve(turl)
+			} else {
+				m, err = defaultResolveAny(turl)
+			}
+			if err != nil {
+				return err
+			}
+
+			if _, ok := m.(wkt); ok {
+				val, ok := jsonFields["value"]
+				if !ok {
+					return errors.New("Any JSON doesn't have 'value'")
+				}
+
+				if err := u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			} else {
+				delete(jsonFields, "@type")
+				nestedProto, err := json.Marshal(jsonFields)
+				if err != nil {
+					return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
+				}
+
+				if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil {
+					return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err)
+				}
+			}
+
+			b, err := proto.Marshal(m)
+			if err != nil {
+				return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err)
+			}
+			target.Field(1).SetBytes(b)
+
+			return nil
+		case "Duration":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			d, err := time.ParseDuration(unq)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			target.Field(0).SetInt(s)
+			target.Field(1).SetInt(ns)
+			return nil
+		case "Timestamp":
+			unq, err := unquote(string(inputValue))
+			if err != nil {
+				return err
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, unq)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+
+			target.Field(0).SetInt(t.Unix())
+			target.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Struct":
+			var m map[string]json.RawMessage
+			if err := json.Unmarshal(inputValue, &m); err != nil {
+				return fmt.Errorf("bad StructValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(map[string]*stpb.Value{}))
+			for k, jv := range m {
+				pv := &stpb.Value{}
+				if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil {
+					return fmt.Errorf("bad value in StructValue for key %q: %v", k, err)
+				}
+				target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv))
+			}
+			return nil
+		case "ListValue":
+			var s []json.RawMessage
+			if err := json.Unmarshal(inputValue, &s); err != nil {
+				return fmt.Errorf("bad ListValue: %v", err)
+			}
+
+			target.Field(0).Set(reflect.ValueOf(make([]*stpb.Value, len(s))))
+			for i, sv := range s {
+				if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil {
+					return err
+				}
+			}
+			return nil
+		case "Value":
+			ivStr := string(inputValue)
+			if ivStr == "null" {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NullValue{}))
+			} else if v, err := strconv.ParseFloat(ivStr, 0); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_NumberValue{v}))
+			} else if v, err := unquote(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StringValue{v}))
+			} else if v, err := strconv.ParseBool(ivStr); err == nil {
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_BoolValue{v}))
+			} else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil {
+				lv := &stpb.ListValue{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_ListValue{lv}))
+				return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop)
+			} else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil {
+				sv := &stpb.Struct{}
+				target.Field(0).Set(reflect.ValueOf(&stpb.Value_StructValue{sv}))
+				return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop)
+			} else {
+				return fmt.Errorf("unrecognized type for Value %q", ivStr)
+			}
+			return nil
+		}
+	}
+
+	// Handle enums, which have an underlying type of int32,
+	// and may appear as strings.
+	// The case of an enum appearing as a number is handled
+	// at the bottom of this function.
+	if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
+		vmap := proto.EnumValueMap(prop.Enum)
+		// Don't need to do unquoting; valid enum names
+		// are from a limited character set.
+		s := inputValue[1 : len(inputValue)-1]
+		n, ok := vmap[string(s)]
+		if !ok {
+			return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
+		}
+		if target.Kind() == reflect.Ptr { // proto2
+			target.Set(reflect.New(targetType.Elem()))
+			target = target.Elem()
+		}
+		if targetType.Kind() != reflect.Int32 {
+			return fmt.Errorf("invalid target %q for enum %s", targetType.Kind(), prop.Enum)
+		}
+		target.SetInt(int64(n))
+		return nil
+	}
+
+	// Handle nested messages.
+	if targetType.Kind() == reflect.Struct {
+		var jsonFields map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
+			return err
+		}
+
+		consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
+			// Be liberal in what names we accept; both orig_name and camelName are okay.
+			fieldNames := acceptedJSONFieldNames(prop)
+
+			vOrig, okOrig := jsonFields[fieldNames.orig]
+			vCamel, okCamel := jsonFields[fieldNames.camel]
+			if !okOrig && !okCamel {
+				return nil, false
+			}
+			// If, for some reason, both are present in the data, favour the camelName.
+			var raw json.RawMessage
+			if okOrig {
+				raw = vOrig
+				delete(jsonFields, fieldNames.orig)
+			}
+			if okCamel {
+				raw = vCamel
+				delete(jsonFields, fieldNames.camel)
+			}
+			return raw, true
+		}
+
+		sprops := proto.GetProperties(targetType)
+		for i := 0; i < target.NumField(); i++ {
+			ft := target.Type().Field(i)
+			if strings.HasPrefix(ft.Name, "XXX_") {
+				continue
+			}
+
+			valueForField, ok := consumeField(sprops.Prop[i])
+			if !ok {
+				continue
+			}
+
+			if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
+				return err
+			}
+		}
+		// Check for any oneof fields.
+		if len(jsonFields) > 0 {
+			for _, oop := range sprops.OneofTypes {
+				raw, ok := consumeField(oop.Prop)
+				if !ok {
+					continue
+				}
+				nv := reflect.New(oop.Type.Elem())
+				target.Field(oop.Field).Set(nv)
+				if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
+					return err
+				}
+			}
+		}
+		// Handle proto2 extensions.
+		if len(jsonFields) > 0 {
+			if ep, ok := target.Addr().Interface().(proto.Message); ok {
+				for _, ext := range proto.RegisteredExtensions(ep) {
+					name := fmt.Sprintf("[%s]", ext.Name)
+					raw, ok := jsonFields[name]
+					if !ok {
+						continue
+					}
+					delete(jsonFields, name)
+					nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem())
+					if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil {
+						return err
+					}
+					if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil {
+						return err
+					}
+				}
+			}
+		}
+		if !u.AllowUnknownFields && len(jsonFields) > 0 {
+			// Pick any field to be the scapegoat.
+			var f string
+			for fname := range jsonFields {
+				f = fname
+				break
+			}
+			return fmt.Errorf("unknown field %q in %v", f, targetType)
+		}
+		return nil
+	}
+
+	// Handle arrays (which aren't encoded bytes)
+	if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
+		var slc []json.RawMessage
+		if err := json.Unmarshal(inputValue, &slc); err != nil {
+			return err
+		}
+		if slc != nil {
+			l := len(slc)
+			target.Set(reflect.MakeSlice(targetType, l, l))
+			for i := 0; i < l; i++ {
+				if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
+					return err
+				}
+			}
+		}
+		return nil
+	}
+
+	// Handle maps (whose keys are always strings)
+	if targetType.Kind() == reflect.Map {
+		var mp map[string]json.RawMessage
+		if err := json.Unmarshal(inputValue, &mp); err != nil {
+			return err
+		}
+		if mp != nil {
+			target.Set(reflect.MakeMap(targetType))
+			for ks, raw := range mp {
+				// Unmarshal map key. The core json library already decoded the key into a
+				// string, so we handle that specially. Other types were quoted post-serialization.
+				var k reflect.Value
+				if targetType.Key().Kind() == reflect.String {
+					k = reflect.ValueOf(ks)
+				} else {
+					k = reflect.New(targetType.Key()).Elem()
+					var kprop *proto.Properties
+					if prop != nil && prop.MapKeyProp != nil {
+						kprop = prop.MapKeyProp
+					}
+					if err := u.unmarshalValue(k, json.RawMessage(ks), kprop); err != nil {
+						return err
+					}
+				}
+
+				// Unmarshal map value.
+				v := reflect.New(targetType.Elem()).Elem()
+				var vprop *proto.Properties
+				if prop != nil && prop.MapValProp != nil {
+					vprop = prop.MapValProp
+				}
+				if err := u.unmarshalValue(v, raw, vprop); err != nil {
+					return err
+				}
+				target.SetMapIndex(k, v)
+			}
+		}
+		return nil
+	}
+
+	// Non-finite numbers can be encoded as strings.
+	isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isFloat {
+		if num, ok := nonFinite[string(inputValue)]; ok {
+			target.SetFloat(num)
+			return nil
+		}
+	}
+
+	// integers & floats can be encoded as strings. In this case we drop
+	// the quotes and proceed as normal.
+	isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 ||
+		targetType.Kind() == reflect.Int32 || targetType.Kind() == reflect.Uint32 ||
+		targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64
+	if isNum && strings.HasPrefix(string(inputValue), `"`) {
+		inputValue = inputValue[1 : len(inputValue)-1]
+	}
+
+	// Use the encoding/json for parsing other value types.
+	return json.Unmarshal(inputValue, target.Addr().Interface())
+}
+
+func unquote(s string) (string, error) {
+	var ret string
+	err := json.Unmarshal([]byte(s), &ret)
+	return ret, err
+}
+
+// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
+func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
+	var prop proto.Properties
+	prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
+	if origName || prop.JSONName == "" {
+		prop.JSONName = prop.OrigName
+	}
+	return &prop
+}
+
+type fieldNames struct {
+	orig, camel string
+}
+
+func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
+	opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
+	if prop.JSONName != "" {
+		opts.camel = prop.JSONName
+	}
+	return opts
+}
+
+// Writer wrapper inspired by https://blog.golang.org/errors-are-values
+type errWriter struct {
+	writer io.Writer
+	err    error
+}
+
+func (w *errWriter) write(str string) {
+	if w.err != nil {
+		return
+	}
+	_, w.err = w.writer.Write([]byte(str))
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+//
+// Numeric keys are sorted in numeric order per
+// https://developers.google.com/protocol-buffers/docs/proto#maps.
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int      { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+	if k := s[i].Kind(); k == s[j].Kind() {
+		switch k {
+		case reflect.String:
+			return s[i].String() < s[j].String()
+		case reflect.Int32, reflect.Int64:
+			return s[i].Int() < s[j].Int()
+		case reflect.Uint32, reflect.Uint64:
+			return s[i].Uint() < s[j].Uint()
+		}
+	}
+	return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
+
+// checkRequiredFields returns an error if any required field in the given proto message is not set.
+// This function is used by both Marshal and Unmarshal.  While required fields only exist in a
+// proto2 message, a proto3 message can contain proto2 message(s).
+func checkRequiredFields(pb proto.Message) error {
+	// Most well-known type messages do not contain required fields.  The "Any" type may contain
+	// a message that has required fields.
+	//
+	// When an Any message is being marshaled, the code will invoked proto.Unmarshal on Any.Value
+	// field in order to transform that into JSON, and that should have returned an error if a
+	// required field is not set in the embedded message.
+	//
+	// When an Any message is being unmarshaled, the code will have invoked proto.Marshal on the
+	// embedded message to store the serialized message in Any.Value field, and that should have
+	// returned an error if a required field is not set.
+	if _, ok := pb.(wkt); ok {
+		return nil
+	}
+
+	v := reflect.ValueOf(pb)
+	// Skip message if it is not a struct pointer.
+	if v.Kind() != reflect.Ptr {
+		return nil
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return nil
+	}
+
+	for i := 0; i < v.NumField(); i++ {
+		field := v.Field(i)
+		sfield := v.Type().Field(i)
+
+		if sfield.PkgPath != "" {
+			// blank PkgPath means the field is exported; skip if not exported
+			continue
+		}
+
+		if strings.HasPrefix(sfield.Name, "XXX_") {
+			continue
+		}
+
+		// Oneof field is an interface implemented by wrapper structs containing the actual oneof
+		// field, i.e. an interface containing &T{real_value}.
+		if sfield.Tag.Get("protobuf_oneof") != "" {
+			if field.Kind() != reflect.Interface {
+				continue
+			}
+			v := field.Elem()
+			if v.Kind() != reflect.Ptr || v.IsNil() {
+				continue
+			}
+			v = v.Elem()
+			if v.Kind() != reflect.Struct || v.NumField() < 1 {
+				continue
+			}
+			field = v.Field(0)
+			sfield = v.Type().Field(0)
+		}
+
+		protoTag := sfield.Tag.Get("protobuf")
+		if protoTag == "" {
+			continue
+		}
+		var prop proto.Properties
+		prop.Init(sfield.Type, sfield.Name, protoTag, &sfield)
+
+		switch field.Kind() {
+		case reflect.Map:
+			if field.IsNil() {
+				continue
+			}
+			// Check each map value.
+			keys := field.MapKeys()
+			for _, k := range keys {
+				v := field.MapIndex(k)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Slice:
+			// Handle non-repeated type, e.g. bytes.
+			if !prop.Repeated {
+				if prop.Required && field.IsNil() {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+
+			// Handle repeated type.
+			if field.IsNil() {
+				continue
+			}
+			// Check each slice item.
+			for i := 0; i < field.Len(); i++ {
+				v := field.Index(i)
+				if err := checkRequiredFieldsInValue(v); err != nil {
+					return err
+				}
+			}
+		case reflect.Ptr:
+			if field.IsNil() {
+				if prop.Required {
+					return fmt.Errorf("required field %q is not set", prop.Name)
+				}
+				continue
+			}
+			if err := checkRequiredFieldsInValue(field); err != nil {
+				return err
+			}
+		}
+	}
+
+	// Handle proto2 extensions.
+	for _, ext := range proto.RegisteredExtensions(pb) {
+		if !proto.HasExtension(pb, ext) {
+			continue
+		}
+		ep, err := proto.GetExtension(pb, ext)
+		if err != nil {
+			return err
+		}
+		err = checkRequiredFieldsInValue(reflect.ValueOf(ep))
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func checkRequiredFieldsInValue(v reflect.Value) error {
+	if pm, ok := v.Interface().(proto.Message); ok {
+		return checkRequiredFields(pm)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..3cd3249
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,253 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..dea2617
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							di.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				di := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						di.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..3abfed2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,203 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f9b6e41
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,301 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1 := extensionAsLegacyType(e1.value)
+		m2 := extensionAsLegacyType(e2.value)
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..fa88add
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,607 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc *ExtensionDesc
+
+	// value is a concrete value for the extension field. Let the type of
+	// desc.ExtensionType be the "API type" and the type of Extension.value
+	// be the "storage type". The API type and storage type are the same except:
+	//	* For scalars (except []byte), the API type uses *T,
+	//	while the storage type uses T.
+	//	* For repeated fields, the API type uses []T, while the storage type
+	//	uses *[]T.
+	//
+	// The reason for the divergence is so that the storage type more naturally
+	// matches what is expected of when retrieving the values through the
+	// protobuf reflection APIs.
+	//
+	// The value may only be populated if desc is also populated.
+	value interface{}
+
+	// enc is the raw bytes for the extension field.
+	enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, extension.Field)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if err := checkExtensionTypes(epb, extension); err != nil {
+			return nil, err
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return extensionAsLegacyType(e.value), nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = extensionAsStorageType(v)
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return extensionAsLegacyType(e.value), nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
+
+// extensionAsLegacyType converts an value in the storage type as the API type.
+// See Extension.value.
+func extensionAsLegacyType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		// Represent primitive types as a pointer to the value.
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Slice:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	}
+	return v
+}
+
+// extensionAsStorageType converts an value in the API type as the storage type.
+// See Extension.value.
+func extensionAsStorageType(v interface{}) interface{} {
+	switch rv := reflect.ValueOf(v); rv.Kind() {
+	case reflect.Ptr:
+		// Represent slice types as the value itself.
+		switch rv.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+			if rv.IsNil() {
+				v = reflect.Zero(rv.Type().Elem()).Interface()
+			} else {
+				v = rv.Elem().Interface()
+			}
+		}
+	case reflect.Slice:
+		// Represent slice types as a pointer to the value.
+		if rv.Type().Elem().Kind() != reflect.Uint8 {
+			rv2 := reflect.New(rv.Type())
+			rv2.Elem().Set(rv)
+			v = rv2.Interface()
+		}
+	}
+	return v
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..fdd328b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,965 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/golang/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/golang/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	index := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	v = v.Elem()
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or []*T or map[T]*T
+		switch f.Kind() {
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	ProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..94fa919
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,360 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	if deref {
+		u = u.Elem()
+	}
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..dbfffe0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,313 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	} else {
+		// The interface is not of pointer type. The data word is the pointer
+		// to the data.
+		p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+	}
+	if deref {
+		p.p = *(*unsafe.Pointer)(p.p)
+	}
+	return p
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..79668ff
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,545 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default    string // default value
+	HasDefault bool   // whether an explicit default was provided
+
+	stype reflect.Type      // set for struct types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	switch t1 := typ; t1.Kind() {
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+
+	case reflect.Slice:
+		if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct {
+			p.stype = t2.Elem()
+		}
+
+	case reflect.Map:
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	var oots []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oots = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oots = m.XXX_OneofWrappers()
+	}
+	if len(oots) > 0 {
+		// Interpret oneof metadata.
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, oot := range oots {
+			oop := &OneofProperties{
+				Type: reflect.ValueOf(oot).Type(), // *T
+				Prop: new(Properties),
+			}
+			sft := oop.Type.Elem().Field(0)
+			oop.Prop.Name = sft.Name
+			oop.Prop.Parse(sft.Tag.Get("protobuf"))
+			// There will be exactly one interface field that
+			// this new value is assignable to.
+			for i := 0; i < t.NumField(); i++ {
+				f := t.Field(i)
+				if f.Type.Kind() != reflect.Interface {
+					continue
+				}
+				if !oop.Type.AssignableTo(f.Type) {
+					continue
+				}
+				oop.Field = i
+				break
+			}
+			prop.OneofTypes[oop.Prop.OrigName] = oop
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..5cb11fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go
@@ -0,0 +1,2776 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+	deref     bool // dereference the pointer before operating on it; implies isptr
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.sizecache = invalidField
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			u.v1extensions = toField(&f)
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct {
+		t = t.Elem()
+	}
+	sizer, marshaler := typeMarshaler(t, tags, false, false)
+	var deref bool
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		t = reflect.PtrTo(t)
+		deref = true
+	}
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizer,
+		marshaler: marshaler,
+		isptr:     t.Kind() == reflect.Ptr,
+		deref:     deref,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizer,
+			marshaler: marshaler,
+		}
+	}
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if slice {
+				return makeMessageSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeMessageMarshaler(getMarshalInfo(t))
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)      // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false)   // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr, ei.deref)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr, ei.deref)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		p.buf, err = m.XXX_Marshal(p.buf, p.deterministic)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, err := m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..5525def
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_merge.go
@@ -0,0 +1,654 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("message field %s without pointer", tf))
+			case isSlice: // E.g., []*pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mi.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mi := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mi.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..acee2fc
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2053 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) {
+				panic("bad type for XXX_extensions field: " + f.Type.Name())
+			}
+			u.oldExtensions = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	var oneofImplementers []interface{}
+	switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+	case oneofFuncsIface:
+		_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+	case oneofWrappersIface:
+		oneofImplementers = m.XXX_OneofWrappers()
+	}
+	for _, v := range oneofImplementers {
+		tptr := reflect.TypeOf(v) // *Msg_X
+		typ := tptr.Elem()        // Msg_X
+
+		f := typ.Field(0) // oneof implementers have one field
+		baseUnmarshal := fieldUnmarshaler(&f)
+		tags := strings.Split(f.Tag.Get("protobuf"), ",")
+		fieldNum, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tags[1])
+		}
+		var name string
+		for _, tag := range tags {
+			if strings.HasPrefix(tag, "name=") {
+				name = strings.TrimPrefix(tag, "name=")
+				break
+			}
+		}
+
+		// Find the oneof field that this struct implements.
+		// Might take O(n^2) to process all of the oneofs, but who cares.
+		for _, of := range oneofFields {
+			if tptr.Implements(of.ityp) {
+				// We have found the corresponding interface for this struct.
+				// That lets us know where this struct should be stored
+				// when we encounter it during unmarshaling.
+				unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+				u.setTag(fieldNum, of.field, unmarshal, 0, name)
+			}
+		}
+
+	}
+
+	// Get extension ranges, if any.
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding))
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val"))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..1aaee72
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,843 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		// Enums have a String method, so writeAny will work fine.
+		if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv.Addr()
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+			text, err := etm.MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, err := fmt.Fprintf(w, "/* %v */\n", err)
+			return err
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, err := w.Write(endBraceNewline); err != nil {
+				return err
+			}
+			continue
+		}
+		if _, err := fmt.Fprint(w, tag); err != nil {
+			return err
+		}
+		if wire != WireStartGroup {
+			if err := w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err = w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	ep, _ := extendable(pv.Interface())
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+	m, mu := ep.extensionsRead()
+	if m == nil {
+		return nil
+	}
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(ep, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..bb55a3a
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,880 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(i), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..1ded05b
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2887 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	// Tag-delimited aggregate.
+	// Group type is deprecated and not supported in proto3. However, Proto3
+	// implementations should still be able to parse the group wire format and
+	// treat group fields as unknown fields.
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
+	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+	0: "IDEMPOTENCY_UNKNOWN",
+	1: "NO_SIDE_EFFECTS",
+	2: "IDEMPOTENT",
+}
+
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+	"IDEMPOTENCY_UNKNOWN": 0,
+	"NO_SIDE_EFFECTS":     1,
+	"IDEMPOTENT":          2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+	p := new(MethodOptions_IdempotencyLevel)
+	*p = x
+	return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+	if err != nil {
+		return err
+	}
+	*x = MethodOptions_IdempotencyLevel(value)
+	return nil
+}
+
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()    {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{0}
+}
+
+func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
+}
+func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
+}
+func (m *FileDescriptorSet) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorSet.Size(m)
+}
+func (m *FileDescriptorSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()    {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{1}
+}
+
+func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
+}
+func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
+}
+func (m *FileDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorProto.Size(m)
+}
+func (m *FileDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()    {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2}
+}
+
+func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
+}
+func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
+}
+func (m *DescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto.Size(m)
+}
+func (m *DescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
+}
+func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+type ExtensionRangeOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage()    {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
+}
+func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
+}
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
+}
+func (m *ExtensionRangeOptions) XXX_Size() int {
+	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
+}
+func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()    {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{4}
+}
+
+func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
+}
+func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
+}
+func (m *FieldDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FieldDescriptorProto.Size(m)
+}
+func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()    {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{5}
+}
+
+func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
+}
+func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
+}
+func (m *OneofDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_OneofDescriptorProto.Size(m)
+}
+func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	// Range of reserved numeric values. Reserved numeric values may not be used
+	// by enum values in the same enum declaration. Reserved ranges may not
+	// overlap.
+	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved enum value names, which may not be reused. A given name may only
+	// be reserved once.
+	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()    {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6}
+}
+
+func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
+}
+func (m *EnumDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto.Size(m)
+}
+func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
+func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{6, 0}
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()    {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{7}
+}
+
+func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
+}
+func (m *EnumValueDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
+}
+func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()    {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{8}
+}
+
+func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
+}
+func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
+}
+func (m *ServiceDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
+}
+func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()    {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{9}
+}
+
+func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
+}
+func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
+}
+func (m *MethodDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_MethodDescriptorProto.Size(m)
+}
+func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// This option does nothing.
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// By default Swift generators will take the proto package and CamelCase it
+	// replacing '.' with underscore and use that to prefix the types/symbols
+	// defined. When this options is provided, they will use this value instead
+	// to prefix the types/symbols defined.
+	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+	// Sets the php class prefix which is prepended to all php generated classes
+	// from this .proto. Default is empty.
+	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be used
+	// for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// The parser stores options it doesn't recognize here.
+	// See the documentation for the "Options" section above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FileOptions) Reset()         { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()    {}
+func (*FileOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{10}
+}
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+func (m *FileOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
+}
+func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
+}
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
+}
+func (m *FileOptions) XXX_Size() int {
+	return xxx_messageInfo_FileOptions.Size(m)
+}
+func (m *FileOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileOptions proto.InternalMessageInfo
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+	if m != nil && m.SwiftPrefix != nil {
+		return *m.SwiftPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+	if m != nil && m.PhpClassPrefix != nil {
+		return *m.PhpClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementions still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()    {}
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{11}
+}
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
+}
+func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
+}
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
+}
+func (m *MessageOptions) XXX_Size() int {
+	return xxx_messageInfo_MessageOptions.Size(m)
+}
+func (m *MessageOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+	// is represented as JavaScript string, which avoids loss of precision that
+	// can happen when a large value is converted to a floating point JavaScript.
+	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+	// use the JavaScript "number" type.  The behavior of the default option
+	// JS_NORMAL is implementation dependent.
+	//
+	// This option is an enum to permit additional types to be added, e.g.
+	// goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()    {}
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{12}
+}
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
+}
+func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
+}
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
+}
+func (m *FieldOptions) XXX_Size() int {
+	return xxx_messageInfo_FieldOptions.Size(m)
+}
+func (m *FieldOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()    {}
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{13}
+}
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
+}
+func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
+}
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
+}
+func (m *OneofOptions) XXX_Size() int {
+	return xxx_messageInfo_OneofOptions.Size(m)
+}
+func (m *OneofOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()    {}
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{14}
+}
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
+}
+func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
+}
+func (m *EnumOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumOptions.Size(m)
+}
+func (m *EnumOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()    {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{15}
+}
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
+}
+func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
+}
+func (m *EnumValueOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumValueOptions.Size(m)
+}
+func (m *EnumValueOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()    {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{16}
+}
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
+}
+func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
+}
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
+}
+func (m *ServiceOptions) XXX_Size() int {
+	return xxx_messageInfo_ServiceOptions.Size(m)
+}
+func (m *ServiceOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()    {}
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{17}
+}
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
+}
+func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
+}
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
+}
+func (m *MethodOptions) XXX_Size() int {
+	return xxx_messageInfo_MethodOptions.Size(m)
+}
+func (m *MethodOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+	if m != nil && m.IdempotencyLevel != nil {
+		return *m.IdempotencyLevel
+	}
+	return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()    {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18}
+}
+
+func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
+}
+func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
+}
+func (m *UninterpretedOption) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption.Size(m)
+}
+func (m *UninterpretedOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{18, 0}
+}
+
+func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
+}
+func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
+}
+func (m *UninterpretedOption_NamePart) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
+}
+func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendent.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()    {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19}
+}
+
+func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
+}
+func (m *SourceCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo.Size(m)
+}
+func (m *SourceCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()    {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{19, 0}
+}
+
+func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
+}
+func (m *SourceCodeInfo_Location) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
+}
+func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()    {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20}
+}
+
+func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
+}
+func (m *GeneratedCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
+}
+func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e5baabe45344a177, []int{20, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor_e5baabe45344a177) }
+
+var fileDescriptor_e5baabe45344a177 = []byte{
+	// 2589 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6,
+	0x15, 0x0e, 0xf5, 0xb7, 0xd2, 0x91, 0x56, 0x3b, 0x3b, 0xbb, 0xb1, 0xe9, 0xcd, 0x8f, 0xd7, 0xca,
+	0x8f, 0xd7, 0x4e, 0xac, 0x0d, 0x1c, 0xdb, 0x71, 0xd6, 0x45, 0x5a, 0xad, 0x44, 0x6f, 0xe4, 0xee,
+	0x4a, 0x2a, 0xa5, 0x6d, 0x7e, 0x80, 0x82, 0x98, 0x25, 0x47, 0x12, 0x6d, 0x8a, 0x64, 0x48, 0xca,
+	0xf6, 0x06, 0xbd, 0x30, 0xd0, 0xab, 0x5e, 0x15, 0xe8, 0x55, 0x51, 0x14, 0xbd, 0xe8, 0x4d, 0x80,
+	0x3e, 0x40, 0x81, 0xde, 0xf5, 0x09, 0x0a, 0xe4, 0x0d, 0x8a, 0xb6, 0x40, 0xfb, 0x08, 0xbd, 0x2c,
+	0x66, 0x86, 0xa4, 0x48, 0x49, 0x1b, 0x6f, 0x02, 0xc4, 0xb9, 0x92, 0xe6, 0x3b, 0xdf, 0x39, 0x73,
+	0xe6, 0xcc, 0x99, 0x99, 0x33, 0x43, 0xd8, 0x1e, 0x39, 0xce, 0xc8, 0xa2, 0xbb, 0xae, 0xe7, 0x04,
+	0xce, 0xc9, 0x74, 0xb8, 0x6b, 0x50, 0x5f, 0xf7, 0x4c, 0x37, 0x70, 0xbc, 0x3a, 0xc7, 0xf0, 0x9a,
+	0x60, 0xd4, 0x23, 0x46, 0xed, 0x08, 0xd6, 0xef, 0x9b, 0x16, 0x6d, 0xc5, 0xc4, 0x3e, 0x0d, 0xf0,
+	0x5d, 0xc8, 0x0d, 0x4d, 0x8b, 0xca, 0xd2, 0x76, 0x76, 0xa7, 0x7c, 0xf3, 0xcd, 0xfa, 0x9c, 0x52,
+	0x3d, 0xad, 0xd1, 0x63, 0xb0, 0xca, 0x35, 0x6a, 0xff, 0xce, 0xc1, 0xc6, 0x12, 0x29, 0xc6, 0x90,
+	0xb3, 0xc9, 0x84, 0x59, 0x94, 0x76, 0x4a, 0x2a, 0xff, 0x8f, 0x65, 0x58, 0x71, 0x89, 0xfe, 0x88,
+	0x8c, 0xa8, 0x9c, 0xe1, 0x70, 0xd4, 0xc4, 0xaf, 0x03, 0x18, 0xd4, 0xa5, 0xb6, 0x41, 0x6d, 0xfd,
+	0x54, 0xce, 0x6e, 0x67, 0x77, 0x4a, 0x6a, 0x02, 0xc1, 0xef, 0xc0, 0xba, 0x3b, 0x3d, 0xb1, 0x4c,
+	0x5d, 0x4b, 0xd0, 0x60, 0x3b, 0xbb, 0x93, 0x57, 0x91, 0x10, 0xb4, 0x66, 0xe4, 0xab, 0xb0, 0xf6,
+	0x84, 0x92, 0x47, 0x49, 0x6a, 0x99, 0x53, 0xab, 0x0c, 0x4e, 0x10, 0x9b, 0x50, 0x99, 0x50, 0xdf,
+	0x27, 0x23, 0xaa, 0x05, 0xa7, 0x2e, 0x95, 0x73, 0x7c, 0xf4, 0xdb, 0x0b, 0xa3, 0x9f, 0x1f, 0x79,
+	0x39, 0xd4, 0x1a, 0x9c, 0xba, 0x14, 0x37, 0xa0, 0x44, 0xed, 0xe9, 0x44, 0x58, 0xc8, 0x9f, 0x11,
+	0x3f, 0xc5, 0x9e, 0x4e, 0xe6, 0xad, 0x14, 0x99, 0x5a, 0x68, 0x62, 0xc5, 0xa7, 0xde, 0x63, 0x53,
+	0xa7, 0x72, 0x81, 0x1b, 0xb8, 0xba, 0x60, 0xa0, 0x2f, 0xe4, 0xf3, 0x36, 0x22, 0x3d, 0xdc, 0x84,
+	0x12, 0x7d, 0x1a, 0x50, 0xdb, 0x37, 0x1d, 0x5b, 0x5e, 0xe1, 0x46, 0xde, 0x5a, 0x32, 0x8b, 0xd4,
+	0x32, 0xe6, 0x4d, 0xcc, 0xf4, 0xf0, 0x1d, 0x58, 0x71, 0xdc, 0xc0, 0x74, 0x6c, 0x5f, 0x2e, 0x6e,
+	0x4b, 0x3b, 0xe5, 0x9b, 0xaf, 0x2e, 0x4d, 0x84, 0xae, 0xe0, 0xa8, 0x11, 0x19, 0xb7, 0x01, 0xf9,
+	0xce, 0xd4, 0xd3, 0xa9, 0xa6, 0x3b, 0x06, 0xd5, 0x4c, 0x7b, 0xe8, 0xc8, 0x25, 0x6e, 0xe0, 0xf2,
+	0xe2, 0x40, 0x38, 0xb1, 0xe9, 0x18, 0xb4, 0x6d, 0x0f, 0x1d, 0xb5, 0xea, 0xa7, 0xda, 0xf8, 0x02,
+	0x14, 0xfc, 0x53, 0x3b, 0x20, 0x4f, 0xe5, 0x0a, 0xcf, 0x90, 0xb0, 0x55, 0xfb, 0x6b, 0x01, 0xd6,
+	0xce, 0x93, 0x62, 0xf7, 0x20, 0x3f, 0x64, 0xa3, 0x94, 0x33, 0xdf, 0x26, 0x06, 0x42, 0x27, 0x1d,
+	0xc4, 0xc2, 0x77, 0x0c, 0x62, 0x03, 0xca, 0x36, 0xf5, 0x03, 0x6a, 0x88, 0x8c, 0xc8, 0x9e, 0x33,
+	0xa7, 0x40, 0x28, 0x2d, 0xa6, 0x54, 0xee, 0x3b, 0xa5, 0xd4, 0xa7, 0xb0, 0x16, 0xbb, 0xa4, 0x79,
+	0xc4, 0x1e, 0x45, 0xb9, 0xb9, 0xfb, 0x3c, 0x4f, 0xea, 0x4a, 0xa4, 0xa7, 0x32, 0x35, 0xb5, 0x4a,
+	0x53, 0x6d, 0xdc, 0x02, 0x70, 0x6c, 0xea, 0x0c, 0x35, 0x83, 0xea, 0x96, 0x5c, 0x3c, 0x23, 0x4a,
+	0x5d, 0x46, 0x59, 0x88, 0x92, 0x23, 0x50, 0xdd, 0xc2, 0x1f, 0xce, 0x52, 0x6d, 0xe5, 0x8c, 0x4c,
+	0x39, 0x12, 0x8b, 0x6c, 0x21, 0xdb, 0x8e, 0xa1, 0xea, 0x51, 0x96, 0xf7, 0xd4, 0x08, 0x47, 0x56,
+	0xe2, 0x4e, 0xd4, 0x9f, 0x3b, 0x32, 0x35, 0x54, 0x13, 0x03, 0x5b, 0xf5, 0x92, 0x4d, 0xfc, 0x06,
+	0xc4, 0x80, 0xc6, 0xd3, 0x0a, 0xf8, 0x2e, 0x54, 0x89, 0xc0, 0x0e, 0x99, 0xd0, 0xad, 0x2f, 0xa1,
+	0x9a, 0x0e, 0x0f, 0xde, 0x84, 0xbc, 0x1f, 0x10, 0x2f, 0xe0, 0x59, 0x98, 0x57, 0x45, 0x03, 0x23,
+	0xc8, 0x52, 0xdb, 0xe0, 0xbb, 0x5c, 0x5e, 0x65, 0x7f, 0xf1, 0x4f, 0x66, 0x03, 0xce, 0xf2, 0x01,
+	0xbf, 0xbd, 0x38, 0xa3, 0x29, 0xcb, 0xf3, 0xe3, 0xde, 0xfa, 0x00, 0x56, 0x53, 0x03, 0x38, 0x6f,
+	0xd7, 0xb5, 0x5f, 0xc2, 0xcb, 0x4b, 0x4d, 0xe3, 0x4f, 0x61, 0x73, 0x6a, 0x9b, 0x76, 0x40, 0x3d,
+	0xd7, 0xa3, 0x2c, 0x63, 0x45, 0x57, 0xf2, 0x7f, 0x56, 0xce, 0xc8, 0xb9, 0xe3, 0x24, 0x5b, 0x58,
+	0x51, 0x37, 0xa6, 0x8b, 0xe0, 0xf5, 0x52, 0xf1, 0xbf, 0x2b, 0xe8, 0xd9, 0xb3, 0x67, 0xcf, 0x32,
+	0xb5, 0xdf, 0x15, 0x60, 0x73, 0xd9, 0x9a, 0x59, 0xba, 0x7c, 0x2f, 0x40, 0xc1, 0x9e, 0x4e, 0x4e,
+	0xa8, 0xc7, 0x83, 0x94, 0x57, 0xc3, 0x16, 0x6e, 0x40, 0xde, 0x22, 0x27, 0xd4, 0x92, 0x73, 0xdb,
+	0xd2, 0x4e, 0xf5, 0xe6, 0x3b, 0xe7, 0x5a, 0x95, 0xf5, 0x43, 0xa6, 0xa2, 0x0a, 0x4d, 0xfc, 0x11,
+	0xe4, 0xc2, 0x2d, 0x9a, 0x59, 0xb8, 0x7e, 0x3e, 0x0b, 0x6c, 0x2d, 0xa9, 0x5c, 0x0f, 0xbf, 0x02,
+	0x25, 0xf6, 0x2b, 0x72, 0xa3, 0xc0, 0x7d, 0x2e, 0x32, 0x80, 0xe5, 0x05, 0xde, 0x82, 0x22, 0x5f,
+	0x26, 0x06, 0x8d, 0x8e, 0xb6, 0xb8, 0xcd, 0x12, 0xcb, 0xa0, 0x43, 0x32, 0xb5, 0x02, 0xed, 0x31,
+	0xb1, 0xa6, 0x94, 0x27, 0x7c, 0x49, 0xad, 0x84, 0xe0, 0xcf, 0x19, 0x86, 0x2f, 0x43, 0x59, 0xac,
+	0x2a, 0xd3, 0x36, 0xe8, 0x53, 0xbe, 0x7b, 0xe6, 0x55, 0xb1, 0xd0, 0xda, 0x0c, 0x61, 0xdd, 0x3f,
+	0xf4, 0x1d, 0x3b, 0x4a, 0x4d, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x1f, 0xcc, 0x6f, 0xdc, 0xaf, 0x2d,
+	0x1f, 0xde, 0x7c, 0x4e, 0xd5, 0xfe, 0x92, 0x81, 0x1c, 0xdf, 0x2f, 0xd6, 0xa0, 0x3c, 0xf8, 0xac,
+	0xa7, 0x68, 0xad, 0xee, 0xf1, 0xfe, 0xa1, 0x82, 0x24, 0x5c, 0x05, 0xe0, 0xc0, 0xfd, 0xc3, 0x6e,
+	0x63, 0x80, 0x32, 0x71, 0xbb, 0xdd, 0x19, 0xdc, 0xb9, 0x85, 0xb2, 0xb1, 0xc2, 0xb1, 0x00, 0x72,
+	0x49, 0xc2, 0xfb, 0x37, 0x51, 0x1e, 0x23, 0xa8, 0x08, 0x03, 0xed, 0x4f, 0x95, 0xd6, 0x9d, 0x5b,
+	0xa8, 0x90, 0x46, 0xde, 0xbf, 0x89, 0x56, 0xf0, 0x2a, 0x94, 0x38, 0xb2, 0xdf, 0xed, 0x1e, 0xa2,
+	0x62, 0x6c, 0xb3, 0x3f, 0x50, 0xdb, 0x9d, 0x03, 0x54, 0x8a, 0x6d, 0x1e, 0xa8, 0xdd, 0xe3, 0x1e,
+	0x82, 0xd8, 0xc2, 0x91, 0xd2, 0xef, 0x37, 0x0e, 0x14, 0x54, 0x8e, 0x19, 0xfb, 0x9f, 0x0d, 0x94,
+	0x3e, 0xaa, 0xa4, 0xdc, 0x7a, 0xff, 0x26, 0x5a, 0x8d, 0xbb, 0x50, 0x3a, 0xc7, 0x47, 0xa8, 0x8a,
+	0xd7, 0x61, 0x55, 0x74, 0x11, 0x39, 0xb1, 0x36, 0x07, 0xdd, 0xb9, 0x85, 0xd0, 0xcc, 0x11, 0x61,
+	0x65, 0x3d, 0x05, 0xdc, 0xb9, 0x85, 0x70, 0xad, 0x09, 0x79, 0x9e, 0x5d, 0x18, 0x43, 0xf5, 0xb0,
+	0xb1, 0xaf, 0x1c, 0x6a, 0xdd, 0xde, 0xa0, 0xdd, 0xed, 0x34, 0x0e, 0x91, 0x34, 0xc3, 0x54, 0xe5,
+	0x67, 0xc7, 0x6d, 0x55, 0x69, 0xa1, 0x4c, 0x12, 0xeb, 0x29, 0x8d, 0x81, 0xd2, 0x42, 0xd9, 0x9a,
+	0x0e, 0x9b, 0xcb, 0xf6, 0xc9, 0xa5, 0x2b, 0x23, 0x31, 0xc5, 0x99, 0x33, 0xa6, 0x98, 0xdb, 0x5a,
+	0x98, 0xe2, 0x7f, 0x65, 0x60, 0x63, 0xc9, 0x59, 0xb1, 0xb4, 0x93, 0x1f, 0x43, 0x5e, 0xa4, 0xa8,
+	0x38, 0x3d, 0xaf, 0x2d, 0x3d, 0x74, 0x78, 0xc2, 0x2e, 0x9c, 0xa0, 0x5c, 0x2f, 0x59, 0x41, 0x64,
+	0xcf, 0xa8, 0x20, 0x98, 0x89, 0x85, 0x3d, 0xfd, 0x17, 0x0b, 0x7b, 0xba, 0x38, 0xf6, 0xee, 0x9c,
+	0xe7, 0xd8, 0xe3, 0xd8, 0xb7, 0xdb, 0xdb, 0xf3, 0x4b, 0xf6, 0xf6, 0x7b, 0xb0, 0xbe, 0x60, 0xe8,
+	0xdc, 0x7b, 0xec, 0xaf, 0x24, 0x90, 0xcf, 0x0a, 0xce, 0x73, 0x76, 0xba, 0x4c, 0x6a, 0xa7, 0xbb,
+	0x37, 0x1f, 0xc1, 0x2b, 0x67, 0x4f, 0xc2, 0xc2, 0x5c, 0x7f, 0x25, 0xc1, 0x85, 0xe5, 0x95, 0xe2,
+	0x52, 0x1f, 0x3e, 0x82, 0xc2, 0x84, 0x06, 0x63, 0x27, 0xaa, 0x96, 0xde, 0x5e, 0x72, 0x06, 0x33,
+	0xf1, 0xfc, 0x64, 0x87, 0x5a, 0xc9, 0x43, 0x3c, 0x7b, 0x56, 0xb9, 0x27, 0xbc, 0x59, 0xf0, 0xf4,
+	0xd7, 0x19, 0x78, 0x79, 0xa9, 0xf1, 0xa5, 0x8e, 0xbe, 0x06, 0x60, 0xda, 0xee, 0x34, 0x10, 0x15,
+	0x91, 0xd8, 0x60, 0x4b, 0x1c, 0xe1, 0x9b, 0x17, 0xdb, 0x3c, 0xa7, 0x41, 0x2c, 0xcf, 0x72, 0x39,
+	0x08, 0x88, 0x13, 0xee, 0xce, 0x1c, 0xcd, 0x71, 0x47, 0x5f, 0x3f, 0x63, 0xa4, 0x0b, 0x89, 0xf9,
+	0x1e, 0x20, 0xdd, 0x32, 0xa9, 0x1d, 0x68, 0x7e, 0xe0, 0x51, 0x32, 0x31, 0xed, 0x11, 0x3f, 0x41,
+	0x8a, 0x7b, 0xf9, 0x21, 0xb1, 0x7c, 0xaa, 0xae, 0x09, 0x71, 0x3f, 0x92, 0x32, 0x0d, 0x9e, 0x40,
+	0x5e, 0x42, 0xa3, 0x90, 0xd2, 0x10, 0xe2, 0x58, 0xa3, 0xf6, 0xdb, 0x12, 0x94, 0x13, 0x75, 0x35,
+	0xbe, 0x02, 0x95, 0x87, 0xe4, 0x31, 0xd1, 0xa2, 0xbb, 0x92, 0x88, 0x44, 0x99, 0x61, 0xbd, 0xf0,
+	0xbe, 0xf4, 0x1e, 0x6c, 0x72, 0x8a, 0x33, 0x0d, 0xa8, 0xa7, 0xe9, 0x16, 0xf1, 0x7d, 0x1e, 0xb4,
+	0x22, 0xa7, 0x62, 0x26, 0xeb, 0x32, 0x51, 0x33, 0x92, 0xe0, 0xdb, 0xb0, 0xc1, 0x35, 0x26, 0x53,
+	0x2b, 0x30, 0x5d, 0x8b, 0x6a, 0xec, 0xf6, 0xe6, 0xf3, 0x93, 0x24, 0xf6, 0x6c, 0x9d, 0x31, 0x8e,
+	0x42, 0x02, 0xf3, 0xc8, 0xc7, 0x2d, 0x78, 0x8d, 0xab, 0x8d, 0xa8, 0x4d, 0x3d, 0x12, 0x50, 0x8d,
+	0x7e, 0x31, 0x25, 0x96, 0xaf, 0x11, 0xdb, 0xd0, 0xc6, 0xc4, 0x1f, 0xcb, 0x9b, 0xcc, 0xc0, 0x7e,
+	0x46, 0x96, 0xd4, 0x4b, 0x8c, 0x78, 0x10, 0xf2, 0x14, 0x4e, 0x6b, 0xd8, 0xc6, 0xc7, 0xc4, 0x1f,
+	0xe3, 0x3d, 0xb8, 0xc0, 0xad, 0xf8, 0x81, 0x67, 0xda, 0x23, 0x4d, 0x1f, 0x53, 0xfd, 0x91, 0x36,
+	0x0d, 0x86, 0x77, 0xe5, 0x57, 0x92, 0xfd, 0x73, 0x0f, 0xfb, 0x9c, 0xd3, 0x64, 0x94, 0xe3, 0x60,
+	0x78, 0x17, 0xf7, 0xa1, 0xc2, 0x26, 0x63, 0x62, 0x7e, 0x49, 0xb5, 0xa1, 0xe3, 0xf1, 0xa3, 0xb1,
+	0xba, 0x64, 0x6b, 0x4a, 0x44, 0xb0, 0xde, 0x0d, 0x15, 0x8e, 0x1c, 0x83, 0xee, 0xe5, 0xfb, 0x3d,
+	0x45, 0x69, 0xa9, 0xe5, 0xc8, 0xca, 0x7d, 0xc7, 0x63, 0x09, 0x35, 0x72, 0xe2, 0x00, 0x97, 0x45,
+	0x42, 0x8d, 0x9c, 0x28, 0xbc, 0xb7, 0x61, 0x43, 0xd7, 0xc5, 0x98, 0x4d, 0x5d, 0x0b, 0xef, 0x58,
+	0xbe, 0x8c, 0x52, 0xc1, 0xd2, 0xf5, 0x03, 0x41, 0x08, 0x73, 0xdc, 0xc7, 0x1f, 0xc2, 0xcb, 0xb3,
+	0x60, 0x25, 0x15, 0xd7, 0x17, 0x46, 0x39, 0xaf, 0x7a, 0x1b, 0x36, 0xdc, 0xd3, 0x45, 0x45, 0x9c,
+	0xea, 0xd1, 0x3d, 0x9d, 0x57, 0xfb, 0x00, 0x36, 0xdd, 0xb1, 0xbb, 0xa8, 0x77, 0x3d, 0xa9, 0x87,
+	0xdd, 0xb1, 0x3b, 0xaf, 0xf8, 0x16, 0xbf, 0x70, 0x7b, 0x54, 0x27, 0x01, 0x35, 0xe4, 0x8b, 0x49,
+	0x7a, 0x42, 0x80, 0x77, 0x01, 0xe9, 0xba, 0x46, 0x6d, 0x72, 0x62, 0x51, 0x8d, 0x78, 0xd4, 0x26,
+	0xbe, 0x7c, 0x39, 0x49, 0xae, 0xea, 0xba, 0xc2, 0xa5, 0x0d, 0x2e, 0xc4, 0xd7, 0x61, 0xdd, 0x39,
+	0x79, 0xa8, 0x8b, 0x94, 0xd4, 0x5c, 0x8f, 0x0e, 0xcd, 0xa7, 0xf2, 0x9b, 0x3c, 0xbe, 0x6b, 0x4c,
+	0xc0, 0x13, 0xb2, 0xc7, 0x61, 0x7c, 0x0d, 0x90, 0xee, 0x8f, 0x89, 0xe7, 0xf2, 0x3d, 0xd9, 0x77,
+	0x89, 0x4e, 0xe5, 0xb7, 0x04, 0x55, 0xe0, 0x9d, 0x08, 0x66, 0x4b, 0xc2, 0x7f, 0x62, 0x0e, 0x83,
+	0xc8, 0xe2, 0x55, 0xb1, 0x24, 0x38, 0x16, 0x5a, 0xdb, 0x01, 0xc4, 0x42, 0x91, 0xea, 0x78, 0x87,
+	0xd3, 0xaa, 0xee, 0xd8, 0x4d, 0xf6, 0xfb, 0x06, 0xac, 0x32, 0xe6, 0xac, 0xd3, 0x6b, 0xa2, 0x20,
+	0x73, 0xc7, 0x89, 0x1e, 0x6f, 0xc1, 0x05, 0x46, 0x9a, 0xd0, 0x80, 0x18, 0x24, 0x20, 0x09, 0xf6,
+	0xbb, 0x9c, 0xcd, 0xe2, 0x7e, 0x14, 0x0a, 0x53, 0x7e, 0x7a, 0xd3, 0x93, 0xd3, 0x38, 0xb3, 0x6e,
+	0x08, 0x3f, 0x19, 0x16, 0xe5, 0xd6, 0xf7, 0x56, 0x74, 0xd7, 0xf6, 0xa0, 0x92, 0x4c, 0x7c, 0x5c,
+	0x02, 0x91, 0xfa, 0x48, 0x62, 0x55, 0x50, 0xb3, 0xdb, 0x62, 0xf5, 0xcb, 0xe7, 0x0a, 0xca, 0xb0,
+	0x3a, 0xea, 0xb0, 0x3d, 0x50, 0x34, 0xf5, 0xb8, 0x33, 0x68, 0x1f, 0x29, 0x28, 0x9b, 0x28, 0xd8,
+	0x1f, 0xe4, 0x8a, 0x6f, 0xa3, 0xab, 0xb5, 0xaf, 0x33, 0x50, 0x4d, 0xdf, 0xc0, 0xf0, 0x8f, 0xe0,
+	0x62, 0xf4, 0x5c, 0xe2, 0xd3, 0x40, 0x7b, 0x62, 0x7a, 0x7c, 0x45, 0x4e, 0x88, 0x38, 0x1d, 0xe3,
+	0x9c, 0xd8, 0x0c, 0x59, 0x7d, 0x1a, 0x7c, 0x62, 0x7a, 0x6c, 0xbd, 0x4d, 0x48, 0x80, 0x0f, 0xe1,
+	0xb2, 0xed, 0x68, 0x7e, 0x40, 0x6c, 0x83, 0x78, 0x86, 0x36, 0x7b, 0xa8, 0xd2, 0x88, 0xae, 0x53,
+	0xdf, 0x77, 0xc4, 0x49, 0x18, 0x5b, 0x79, 0xd5, 0x76, 0xfa, 0x21, 0x79, 0x76, 0x44, 0x34, 0x42,
+	0xea, 0x5c, 0xfe, 0x66, 0xcf, 0xca, 0xdf, 0x57, 0xa0, 0x34, 0x21, 0xae, 0x46, 0xed, 0xc0, 0x3b,
+	0xe5, 0x75, 0x77, 0x51, 0x2d, 0x4e, 0x88, 0xab, 0xb0, 0xf6, 0x0b, 0xb9, 0xfe, 0x3c, 0xc8, 0x15,
+	0x8b, 0xa8, 0xf4, 0x20, 0x57, 0x2c, 0x21, 0xa8, 0xfd, 0x33, 0x0b, 0x95, 0x64, 0x1d, 0xce, 0xae,
+	0x35, 0x3a, 0x3f, 0xb2, 0x24, 0xbe, 0xa9, 0xbd, 0xf1, 0x8d, 0x55, 0x7b, 0xbd, 0xc9, 0xce, 0xb2,
+	0xbd, 0x82, 0xa8, 0x8e, 0x55, 0xa1, 0xc9, 0xea, 0x08, 0x96, 0x6c, 0x54, 0x54, 0x23, 0x45, 0x35,
+	0x6c, 0xe1, 0x03, 0x28, 0x3c, 0xf4, 0xb9, 0xed, 0x02, 0xb7, 0xfd, 0xe6, 0x37, 0xdb, 0x7e, 0xd0,
+	0xe7, 0xc6, 0x4b, 0x0f, 0xfa, 0x5a, 0xa7, 0xab, 0x1e, 0x35, 0x0e, 0xd5, 0x50, 0x1d, 0x5f, 0x82,
+	0x9c, 0x45, 0xbe, 0x3c, 0x4d, 0x9f, 0x7a, 0x1c, 0x3a, 0xef, 0x24, 0x5c, 0x82, 0xdc, 0x13, 0x4a,
+	0x1e, 0xa5, 0xcf, 0x1a, 0x0e, 0x7d, 0x8f, 0x8b, 0x61, 0x17, 0xf2, 0x3c, 0x5e, 0x18, 0x20, 0x8c,
+	0x18, 0x7a, 0x09, 0x17, 0x21, 0xd7, 0xec, 0xaa, 0x6c, 0x41, 0x20, 0xa8, 0x08, 0x54, 0xeb, 0xb5,
+	0x95, 0xa6, 0x82, 0x32, 0xb5, 0xdb, 0x50, 0x10, 0x41, 0x60, 0x8b, 0x25, 0x0e, 0x03, 0x7a, 0x29,
+	0x6c, 0x86, 0x36, 0xa4, 0x48, 0x7a, 0x7c, 0xb4, 0xaf, 0xa8, 0x28, 0x93, 0x9e, 0xea, 0x1c, 0xca,
+	0xd7, 0x7c, 0xa8, 0x24, 0x0b, 0xf1, 0x17, 0x73, 0xc9, 0xfe, 0x9b, 0x04, 0xe5, 0x44, 0x61, 0xcd,
+	0x2a, 0x22, 0x62, 0x59, 0xce, 0x13, 0x8d, 0x58, 0x26, 0xf1, 0xc3, 0xd4, 0x00, 0x0e, 0x35, 0x18,
+	0x72, 0xde, 0xa9, 0x7b, 0x41, 0x4b, 0x24, 0x8f, 0x0a, 0xb5, 0x3f, 0x4a, 0x80, 0xe6, 0x2b, 0xdb,
+	0x39, 0x37, 0xa5, 0x1f, 0xd2, 0xcd, 0xda, 0x1f, 0x24, 0xa8, 0xa6, 0xcb, 0xd9, 0x39, 0xf7, 0xae,
+	0xfc, 0xa0, 0xee, 0xfd, 0x23, 0x03, 0xab, 0xa9, 0x22, 0xf6, 0xbc, 0xde, 0x7d, 0x01, 0xeb, 0xa6,
+	0x41, 0x27, 0xae, 0x13, 0x50, 0x5b, 0x3f, 0xd5, 0x2c, 0xfa, 0x98, 0x5a, 0x72, 0x8d, 0x6f, 0x1a,
+	0xbb, 0xdf, 0x5c, 0x26, 0xd7, 0xdb, 0x33, 0xbd, 0x43, 0xa6, 0xb6, 0xb7, 0xd1, 0x6e, 0x29, 0x47,
+	0xbd, 0xee, 0x40, 0xe9, 0x34, 0x3f, 0xd3, 0x8e, 0x3b, 0x3f, 0xed, 0x74, 0x3f, 0xe9, 0xa8, 0xc8,
+	0x9c, 0xa3, 0x7d, 0x8f, 0xcb, 0xbe, 0x07, 0x68, 0xde, 0x29, 0x7c, 0x11, 0x96, 0xb9, 0x85, 0x5e,
+	0xc2, 0x1b, 0xb0, 0xd6, 0xe9, 0x6a, 0xfd, 0x76, 0x4b, 0xd1, 0x94, 0xfb, 0xf7, 0x95, 0xe6, 0xa0,
+	0x2f, 0x1e, 0x3e, 0x62, 0xf6, 0x20, 0xb5, 0xc0, 0x6b, 0xbf, 0xcf, 0xc2, 0xc6, 0x12, 0x4f, 0x70,
+	0x23, 0xbc, 0xb2, 0x88, 0x5b, 0xd4, 0x8d, 0xf3, 0x78, 0x5f, 0x67, 0x35, 0x43, 0x8f, 0x78, 0x41,
+	0x78, 0xc3, 0xb9, 0x06, 0x2c, 0x4a, 0x76, 0x60, 0x0e, 0x4d, 0xea, 0x85, 0xef, 0x44, 0xe2, 0x1e,
+	0xb3, 0x36, 0xc3, 0xc5, 0x53, 0xd1, 0xbb, 0x80, 0x5d, 0xc7, 0x37, 0x03, 0xf3, 0x31, 0xd5, 0x4c,
+	0x3b, 0x7a, 0x54, 0x62, 0xf7, 0x9a, 0x9c, 0x8a, 0x22, 0x49, 0xdb, 0x0e, 0x62, 0xb6, 0x4d, 0x47,
+	0x64, 0x8e, 0xcd, 0x36, 0xf3, 0xac, 0x8a, 0x22, 0x49, 0xcc, 0xbe, 0x02, 0x15, 0xc3, 0x99, 0xb2,
+	0x62, 0x4f, 0xf0, 0xd8, 0xd9, 0x21, 0xa9, 0x65, 0x81, 0xc5, 0x94, 0xb0, 0x8c, 0x9f, 0xbd, 0x66,
+	0x55, 0xd4, 0xb2, 0xc0, 0x04, 0xe5, 0x2a, 0xac, 0x91, 0xd1, 0xc8, 0x63, 0xc6, 0x23, 0x43, 0xe2,
+	0x62, 0x52, 0x8d, 0x61, 0x4e, 0xdc, 0x7a, 0x00, 0xc5, 0x28, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0xd0,
+	0x5c, 0x71, 0xdb, 0xce, 0xec, 0x94, 0xd4, 0xa2, 0x1d, 0x09, 0xaf, 0x40, 0xc5, 0xf4, 0xb5, 0xd9,
+	0xe3, 0x7c, 0x66, 0x3b, 0xb3, 0x53, 0x54, 0xcb, 0xa6, 0x1f, 0x3f, 0x6c, 0xd6, 0xbe, 0xca, 0x40,
+	0x35, 0xfd, 0x71, 0x01, 0xb7, 0xa0, 0x68, 0x39, 0x3a, 0xe1, 0xa9, 0x25, 0xbe, 0x6c, 0xed, 0x3c,
+	0xe7, 0x7b, 0x44, 0xfd, 0x30, 0xe4, 0xab, 0xb1, 0xe6, 0xd6, 0xdf, 0x25, 0x28, 0x46, 0x30, 0xbe,
+	0x00, 0x39, 0x97, 0x04, 0x63, 0x6e, 0x2e, 0xbf, 0x9f, 0x41, 0x92, 0xca, 0xdb, 0x0c, 0xf7, 0x5d,
+	0x62, 0xf3, 0x14, 0x08, 0x71, 0xd6, 0x66, 0xf3, 0x6a, 0x51, 0x62, 0xf0, 0x5b, 0x8f, 0x33, 0x99,
+	0x50, 0x3b, 0xf0, 0xa3, 0x79, 0x0d, 0xf1, 0x66, 0x08, 0xe3, 0x77, 0x60, 0x3d, 0xf0, 0x88, 0x69,
+	0xa5, 0xb8, 0x39, 0xce, 0x45, 0x91, 0x20, 0x26, 0xef, 0xc1, 0xa5, 0xc8, 0xae, 0x41, 0x03, 0xa2,
+	0x8f, 0xa9, 0x31, 0x53, 0x2a, 0xf0, 0xd7, 0x8d, 0x8b, 0x21, 0xa1, 0x15, 0xca, 0x23, 0xdd, 0xda,
+	0xd7, 0x12, 0xac, 0x47, 0xf7, 0x34, 0x23, 0x0e, 0xd6, 0x11, 0x00, 0xb1, 0x6d, 0x27, 0x48, 0x86,
+	0x6b, 0x31, 0x95, 0x17, 0xf4, 0xea, 0x8d, 0x58, 0x49, 0x4d, 0x18, 0xd8, 0x9a, 0x00, 0xcc, 0x24,
+	0x67, 0x86, 0xed, 0x32, 0x94, 0xc3, 0x2f, 0x47, 0xfc, 0xf3, 0xa3, 0xb8, 0xd9, 0x83, 0x80, 0xd8,
+	0x85, 0x0e, 0x6f, 0x42, 0xfe, 0x84, 0x8e, 0x4c, 0x3b, 0x7c, 0x0f, 0x16, 0x8d, 0xe8, 0xfd, 0x25,
+	0x17, 0xbf, 0xbf, 0xec, 0xff, 0x46, 0x82, 0x0d, 0xdd, 0x99, 0xcc, 0xfb, 0xbb, 0x8f, 0xe6, 0x9e,
+	0x17, 0xfc, 0x8f, 0xa5, 0xcf, 0x3f, 0x1a, 0x99, 0xc1, 0x78, 0x7a, 0x52, 0xd7, 0x9d, 0xc9, 0xee,
+	0xc8, 0xb1, 0x88, 0x3d, 0x9a, 0x7d, 0x3f, 0xe5, 0x7f, 0xf4, 0x1b, 0x23, 0x6a, 0xdf, 0x18, 0x39,
+	0x89, 0xaf, 0xa9, 0xf7, 0x66, 0x7f, 0xff, 0x27, 0x49, 0x7f, 0xca, 0x64, 0x0f, 0x7a, 0xfb, 0x7f,
+	0xce, 0x6c, 0x1d, 0x88, 0xee, 0x7a, 0x51, 0x78, 0x54, 0x3a, 0xb4, 0xa8, 0xce, 0x86, 0xfc, 0xff,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x3e, 0xe8, 0xef, 0xc4, 0x9b, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..ed08fcb
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,883 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//  Based on original Protocol Buffers design by
+//  Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+  repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+  optional string name = 1;       // file name, relative to root of source tree
+  optional string package = 2;    // e.g. "foo", "foo.bar", etc.
+
+  // Names of files imported by this file.
+  repeated string dependency = 3;
+  // Indexes of the public imported files in the dependency list above.
+  repeated int32 public_dependency = 10;
+  // Indexes of the weak imported files in the dependency list.
+  // For Google-internal migration only. Do not use.
+  repeated int32 weak_dependency = 11;
+
+  // All top-level definitions in this file.
+  repeated DescriptorProto message_type = 4;
+  repeated EnumDescriptorProto enum_type = 5;
+  repeated ServiceDescriptorProto service = 6;
+  repeated FieldDescriptorProto extension = 7;
+
+  optional FileOptions options = 8;
+
+  // This field contains optional information about the original source code.
+  // You may safely remove this entire field without harming runtime
+  // functionality of the descriptors -- the information is needed only by
+  // development tools.
+  optional SourceCodeInfo source_code_info = 9;
+
+  // The syntax of the proto file.
+  // The supported values are "proto2" and "proto3".
+  optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+  optional string name = 1;
+
+  repeated FieldDescriptorProto field = 2;
+  repeated FieldDescriptorProto extension = 6;
+
+  repeated DescriptorProto nested_type = 3;
+  repeated EnumDescriptorProto enum_type = 4;
+
+  message ExtensionRange {
+    optional int32 start = 1;
+    optional int32 end = 2;
+
+    optional ExtensionRangeOptions options = 3;
+  }
+  repeated ExtensionRange extension_range = 5;
+
+  repeated OneofDescriptorProto oneof_decl = 8;
+
+  optional MessageOptions options = 7;
+
+  // Range of reserved tag numbers. Reserved tag numbers may not be used by
+  // fields or extension ranges in the same message. Reserved ranges may
+  // not overlap.
+  message ReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Exclusive.
+  }
+  repeated ReservedRange reserved_range = 9;
+  // Reserved field names, which may not be used by fields in the same message.
+  // A given name may only be reserved once.
+  repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+  enum Type {
+    // 0 is reserved for errors.
+    // Order is weird for historical reasons.
+    TYPE_DOUBLE         = 1;
+    TYPE_FLOAT          = 2;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+    // negative values are likely.
+    TYPE_INT64          = 3;
+    TYPE_UINT64         = 4;
+    // Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+    // negative values are likely.
+    TYPE_INT32          = 5;
+    TYPE_FIXED64        = 6;
+    TYPE_FIXED32        = 7;
+    TYPE_BOOL           = 8;
+    TYPE_STRING         = 9;
+    // Tag-delimited aggregate.
+    // Group type is deprecated and not supported in proto3. However, Proto3
+    // implementations should still be able to parse the group wire format and
+    // treat group fields as unknown fields.
+    TYPE_GROUP          = 10;
+    TYPE_MESSAGE        = 11;  // Length-delimited aggregate.
+
+    // New in version 2.
+    TYPE_BYTES          = 12;
+    TYPE_UINT32         = 13;
+    TYPE_ENUM           = 14;
+    TYPE_SFIXED32       = 15;
+    TYPE_SFIXED64       = 16;
+    TYPE_SINT32         = 17;  // Uses ZigZag encoding.
+    TYPE_SINT64         = 18;  // Uses ZigZag encoding.
+  };
+
+  enum Label {
+    // 0 is reserved for errors
+    LABEL_OPTIONAL      = 1;
+    LABEL_REQUIRED      = 2;
+    LABEL_REPEATED      = 3;
+  };
+
+  optional string name = 1;
+  optional int32 number = 3;
+  optional Label label = 4;
+
+  // If type_name is set, this need not be set.  If both this and type_name
+  // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+  optional Type type = 5;
+
+  // For message and enum types, this is the name of the type.  If the name
+  // starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+  // rules are used to find the type (i.e. first the nested types within this
+  // message are searched, then within the parent, on up to the root
+  // namespace).
+  optional string type_name = 6;
+
+  // For extensions, this is the name of the type being extended.  It is
+  // resolved in the same manner as type_name.
+  optional string extendee = 2;
+
+  // For numeric types, contains the original text representation of the value.
+  // For booleans, "true" or "false".
+  // For strings, contains the default text contents (not escaped in any way).
+  // For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+  // TODO(kenton):  Base-64 encode?
+  optional string default_value = 7;
+
+  // If set, gives the index of a oneof in the containing type's oneof_decl
+  // list.  This field is a member of that oneof.
+  optional int32 oneof_index = 9;
+
+  // JSON name of this field. The value is set by protocol compiler. If the
+  // user has set a "json_name" option on this field, that option's value
+  // will be used. Otherwise, it's deduced from the field's name by converting
+  // it to camelCase.
+  optional string json_name = 10;
+
+  optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+  optional string name = 1;
+  optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+  optional string name = 1;
+
+  repeated EnumValueDescriptorProto value = 2;
+
+  optional EnumOptions options = 3;
+
+  // Range of reserved numeric values. Reserved values may not be used by
+  // entries in the same enum. Reserved ranges may not overlap.
+  //
+  // Note that this is distinct from DescriptorProto.ReservedRange in that it
+  // is inclusive such that it can appropriately represent the entire int32
+  // domain.
+  message EnumReservedRange {
+    optional int32 start = 1; // Inclusive.
+    optional int32 end = 2;   // Inclusive.
+  }
+
+  // Range of reserved numeric values. Reserved numeric values may not be used
+  // by enum values in the same enum declaration. Reserved ranges may not
+  // overlap.
+  repeated EnumReservedRange reserved_range = 4;
+
+  // Reserved enum value names, which may not be reused. A given name may only
+  // be reserved once.
+  repeated string reserved_name = 5;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+  optional string name = 1;
+  optional int32 number = 2;
+
+  optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+  optional string name = 1;
+  repeated MethodDescriptorProto method = 2;
+
+  optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+  optional string name = 1;
+
+  // Input and output type names.  These are resolved in the same way as
+  // FieldDescriptorProto.type_name, but must refer to a message type.
+  optional string input_type = 2;
+  optional string output_type = 3;
+
+  optional MethodOptions options = 4;
+
+  // Identifies if client streams multiple client messages
+  optional bool client_streaming = 5 [default=false];
+  // Identifies if server streams multiple server messages
+  optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached.  These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them.  Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+//   organization, or for experimental options, use field numbers 50000
+//   through 99999.  It is up to you to ensure that you do not use the
+//   same number for multiple options.
+// * For options which will be published and used publicly by multiple
+//   independent entities, e-mail protobuf-global-extension-registry@google.com
+//   to reserve extension numbers. Simply provide your project name (e.g.
+//   Objective-C plugin) and your project website (if available) -- there's no
+//   need to explain how you intend to use them. Usually you only need one
+//   extension number. You can declare multiple options with only one extension
+//   number by putting them in a sub-message. See the Custom Options section of
+//   the docs for examples:
+//   https://developers.google.com/protocol-buffers/docs/proto#options
+//   If this turns out to be popular, a web service will be set up
+//   to automatically assign option numbers.
+
+
+message FileOptions {
+
+  // Sets the Java package where classes generated from this .proto will be
+  // placed.  By default, the proto package is used, but this is often
+  // inappropriate because proto packages do not normally start with backwards
+  // domain names.
+  optional string java_package = 1;
+
+
+  // If set, all the classes from the .proto file are wrapped in a single
+  // outer class with the given name.  This applies to both Proto1
+  // (equivalent to the old "--one_java_file" option) and Proto2 (where
+  // a .proto always translates to a single class, but you may want to
+  // explicitly choose the class name).
+  optional string java_outer_classname = 8;
+
+  // If set true, then the Java code generator will generate a separate .java
+  // file for each top-level message, enum, and service defined in the .proto
+  // file.  Thus, these types will *not* be nested inside the outer class
+  // named by java_outer_classname.  However, the outer class will still be
+  // generated to contain the file's getDescriptor() method as well as any
+  // top-level extensions defined in the file.
+  optional bool java_multiple_files = 10 [default=false];
+
+  // This option does nothing.
+  optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+  // If set true, then the Java2 code generator will generate code that
+  // throws an exception whenever an attempt is made to assign a non-UTF-8
+  // byte sequence to a string field.
+  // Message reflection will do the same.
+  // However, an extension field still accepts non-UTF-8 byte sequences.
+  // This option has no effect on when used with the lite runtime.
+  optional bool java_string_check_utf8 = 27 [default=false];
+
+
+  // Generated classes can be optimized for speed or code size.
+  enum OptimizeMode {
+    SPEED = 1;        // Generate complete code for parsing, serialization,
+                      // etc.
+    CODE_SIZE = 2;    // Use ReflectionOps to implement these methods.
+    LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+  }
+  optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+  // Sets the Go package where structs generated from this .proto will be
+  // placed. If omitted, the Go package will be derived from the following:
+  //   - The basename of the package import path, if provided.
+  //   - Otherwise, the package statement in the .proto file, if present.
+  //   - Otherwise, the basename of the .proto file, without extension.
+  optional string go_package = 11;
+
+
+
+  // Should generic services be generated in each language?  "Generic" services
+  // are not specific to any particular RPC system.  They are generated by the
+  // main code generators in each language (without additional plugins).
+  // Generic services were the only kind of service generation supported by
+  // early versions of google.protobuf.
+  //
+  // Generic services are now considered deprecated in favor of using plugins
+  // that generate code specific to your particular RPC system.  Therefore,
+  // these default to false.  Old code which depends on generic services should
+  // explicitly set them to true.
+  optional bool cc_generic_services = 16 [default=false];
+  optional bool java_generic_services = 17 [default=false];
+  optional bool py_generic_services = 18 [default=false];
+  optional bool php_generic_services = 42 [default=false];
+
+  // Is this file deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for everything in the file, or it will be completely ignored; in the very
+  // least, this is a formalization for deprecating files.
+  optional bool deprecated = 23 [default=false];
+
+  // Enables the use of arenas for the proto messages in this file. This applies
+  // only to generated classes for C++.
+  optional bool cc_enable_arenas = 31 [default=false];
+
+
+  // Sets the objective c class prefix which is prepended to all objective c
+  // generated classes from this .proto. There is no default.
+  optional string objc_class_prefix = 36;
+
+  // Namespace for generated classes; defaults to the package.
+  optional string csharp_namespace = 37;
+
+  // By default Swift generators will take the proto package and CamelCase it
+  // replacing '.' with underscore and use that to prefix the types/symbols
+  // defined. When this options is provided, they will use this value instead
+  // to prefix the types/symbols defined.
+  optional string swift_prefix = 39;
+
+  // Sets the php class prefix which is prepended to all php generated classes
+  // from this .proto. Default is empty.
+  optional string php_class_prefix = 40;
+
+  // Use this option to change the namespace of php generated classes. Default
+  // is empty. When this option is empty, the package name will be used for
+  // determining the namespace.
+  optional string php_namespace = 41;
+
+
+  // Use this option to change the namespace of php generated metadata classes.
+  // Default is empty. When this option is empty, the proto file name will be used
+  // for determining the namespace.
+  optional string php_metadata_namespace = 44;
+
+  // Use this option to change the package of ruby generated classes. Default
+  // is empty. When this option is not set, the package name will be used for
+  // determining the ruby package.
+  optional string ruby_package = 45;
+
+  // The parser stores options it doesn't recognize here.
+  // See the documentation for the "Options" section above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message.
+  // See the documentation for the "Options" section above.
+  extensions 1000 to max;
+
+  reserved 38;
+}
+
+message MessageOptions {
+  // Set true to use the old proto1 MessageSet wire format for extensions.
+  // This is provided for backwards-compatibility with the MessageSet wire
+  // format.  You should not use this for any other reason:  It's less
+  // efficient, has fewer features, and is more complicated.
+  //
+  // The message must be defined exactly as follows:
+  //   message Foo {
+  //     option message_set_wire_format = true;
+  //     extensions 4 to max;
+  //   }
+  // Note that the message cannot have any defined fields; MessageSets only
+  // have extensions.
+  //
+  // All extensions of your type must be singular messages; e.g. they cannot
+  // be int32s, enums, or repeated messages.
+  //
+  // Because this is an option, the above two restrictions are not enforced by
+  // the protocol compiler.
+  optional bool message_set_wire_format = 1 [default=false];
+
+  // Disables the generation of the standard "descriptor()" accessor, which can
+  // conflict with a field of the same name.  This is meant to make migration
+  // from proto1 easier; new code should avoid fields named "descriptor".
+  optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+  // Is this message deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the message, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating messages.
+  optional bool deprecated = 3 [default=false];
+
+  // Whether the message is an automatically generated map entry type for the
+  // maps field.
+  //
+  // For maps fields:
+  //     map<KeyType, ValueType> map_field = 1;
+  // The parsed descriptor looks like:
+  //     message MapFieldEntry {
+  //         option map_entry = true;
+  //         optional KeyType key = 1;
+  //         optional ValueType value = 2;
+  //     }
+  //     repeated MapFieldEntry map_field = 1;
+  //
+  // Implementations may choose not to generate the map_entry=true message, but
+  // use a native map in the target language to hold the keys and values.
+  // The reflection APIs in such implementions still need to work as
+  // if the field is a repeated message field.
+  //
+  // NOTE: Do not set the option in .proto files. Always use the maps syntax
+  // instead. The option should only be implicitly set by the proto compiler
+  // parser.
+  optional bool map_entry = 7;
+
+  reserved 8;  // javalite_serializable
+  reserved 9;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message FieldOptions {
+  // The ctype option instructs the C++ code generator to use a different
+  // representation of the field than it normally would.  See the specific
+  // options below.  This option is not yet implemented in the open source
+  // release -- sorry, we'll try to include it in a future version!
+  optional CType ctype = 1 [default = STRING];
+  enum CType {
+    // Default mode.
+    STRING = 0;
+
+    CORD = 1;
+
+    STRING_PIECE = 2;
+  }
+  // The packed option can be enabled for repeated primitive fields to enable
+  // a more efficient representation on the wire. Rather than repeatedly
+  // writing the tag and type for each element, the entire array is encoded as
+  // a single length-delimited blob. In proto3, only explicit setting it to
+  // false will avoid using packed encoding.
+  optional bool packed = 2;
+
+  // The jstype option determines the JavaScript type used for values of the
+  // field.  The option is permitted only for 64 bit integral and fixed types
+  // (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+  // is represented as JavaScript string, which avoids loss of precision that
+  // can happen when a large value is converted to a floating point JavaScript.
+  // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+  // use the JavaScript "number" type.  The behavior of the default option
+  // JS_NORMAL is implementation dependent.
+  //
+  // This option is an enum to permit additional types to be added, e.g.
+  // goog.math.Integer.
+  optional JSType jstype = 6 [default = JS_NORMAL];
+  enum JSType {
+    // Use the default type.
+    JS_NORMAL = 0;
+
+    // Use JavaScript strings.
+    JS_STRING = 1;
+
+    // Use JavaScript numbers.
+    JS_NUMBER = 2;
+  }
+
+  // Should this field be parsed lazily?  Lazy applies only to message-type
+  // fields.  It means that when the outer message is initially parsed, the
+  // inner message's contents will not be parsed but instead stored in encoded
+  // form.  The inner message will actually be parsed when it is first accessed.
+  //
+  // This is only a hint.  Implementations are free to choose whether to use
+  // eager or lazy parsing regardless of the value of this option.  However,
+  // setting this option true suggests that the protocol author believes that
+  // using lazy parsing on this field is worth the additional bookkeeping
+  // overhead typically needed to implement it.
+  //
+  // This option does not affect the public interface of any generated code;
+  // all method signatures remain the same.  Furthermore, thread-safety of the
+  // interface is not affected by this option; const methods remain safe to
+  // call from multiple threads concurrently, while non-const methods continue
+  // to require exclusive access.
+  //
+  //
+  // Note that implementations may choose not to check required fields within
+  // a lazy sub-message.  That is, calling IsInitialized() on the outer message
+  // may return true even if the inner message has missing required fields.
+  // This is necessary because otherwise the inner message would have to be
+  // parsed in order to perform the check, defeating the purpose of lazy
+  // parsing.  An implementation which chooses not to check required fields
+  // must be consistent about it.  That is, for any particular sub-message, the
+  // implementation must either *always* check its required fields, or *never*
+  // check its required fields, regardless of whether or not the message has
+  // been parsed.
+  optional bool lazy = 5 [default=false];
+
+  // Is this field deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for accessors, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating fields.
+  optional bool deprecated = 3 [default=false];
+
+  // For Google-internal migration only. Do not use.
+  optional bool weak = 10 [default=false];
+
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+
+  reserved 4;  // removed jtype
+}
+
+message OneofOptions {
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumOptions {
+
+  // Set this option to true to allow mapping different tag names to the same
+  // value.
+  optional bool allow_alias = 2;
+
+  // Is this enum deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum, or it will be completely ignored; in the very least, this
+  // is a formalization for deprecating enums.
+  optional bool deprecated = 3 [default=false];
+
+  reserved 5;  // javanano_as_lite
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message EnumValueOptions {
+  // Is this enum value deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the enum value, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating enum values.
+  optional bool deprecated = 1 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this service deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the service, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating services.
+  optional bool deprecated = 33 [default=false];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+message MethodOptions {
+
+  // Note:  Field numbers 1 through 32 are reserved for Google's internal RPC
+  //   framework.  We apologize for hoarding these numbers to ourselves, but
+  //   we were already using them long before we decided to release Protocol
+  //   Buffers.
+
+  // Is this method deprecated?
+  // Depending on the target platform, this can emit Deprecated annotations
+  // for the method, or it will be completely ignored; in the very least,
+  // this is a formalization for deprecating methods.
+  optional bool deprecated = 33 [default=false];
+
+  // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+  // or neither? HTTP based RPC implementation may choose GET verb for safe
+  // methods, and PUT verb for idempotent methods instead of the default POST.
+  enum IdempotencyLevel {
+    IDEMPOTENCY_UNKNOWN = 0;
+    NO_SIDE_EFFECTS     = 1; // implies idempotent
+    IDEMPOTENT          = 2; // idempotent, but may have side effects
+  }
+  optional IdempotencyLevel idempotency_level =
+      34 [default=IDEMPOTENCY_UNKNOWN];
+
+  // The parser stores options it doesn't recognize here. See above.
+  repeated UninterpretedOption uninterpreted_option = 999;
+
+  // Clients can define custom options in extensions of this message. See above.
+  extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+  // The name of the uninterpreted option.  Each string represents a segment in
+  // a dot-separated name.  is_extension is true iff a segment represents an
+  // extension (denoted with parentheses in options specs in .proto files).
+  // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+  // "foo.(bar.baz).qux".
+  message NamePart {
+    required string name_part = 1;
+    required bool is_extension = 2;
+  }
+  repeated NamePart name = 2;
+
+  // The value of the uninterpreted option, in whatever type the tokenizer
+  // identified it as during parsing. Exactly one of these should be set.
+  optional string identifier_value = 3;
+  optional uint64 positive_int_value = 4;
+  optional int64 negative_int_value = 5;
+  optional double double_value = 6;
+  optional bytes string_value = 7;
+  optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+  // A Location identifies a piece of source code in a .proto file which
+  // corresponds to a particular definition.  This information is intended
+  // to be useful to IDEs, code indexers, documentation generators, and similar
+  // tools.
+  //
+  // For example, say we have a file like:
+  //   message Foo {
+  //     optional string foo = 1;
+  //   }
+  // Let's look at just the field definition:
+  //   optional string foo = 1;
+  //   ^       ^^     ^^  ^  ^^^
+  //   a       bc     de  f  ghi
+  // We have the following locations:
+  //   span   path               represents
+  //   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+  //   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+  //   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+  //   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+  //   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+  //
+  // Notes:
+  // - A location may refer to a repeated field itself (i.e. not to any
+  //   particular index within it).  This is used whenever a set of elements are
+  //   logically enclosed in a single code segment.  For example, an entire
+  //   extend block (possibly containing multiple extension definitions) will
+  //   have an outer location whose path refers to the "extensions" repeated
+  //   field without an index.
+  // - Multiple locations may have the same path.  This happens when a single
+  //   logical declaration is spread out across multiple places.  The most
+  //   obvious example is the "extend" block again -- there may be multiple
+  //   extend blocks in the same scope, each of which will have the same path.
+  // - A location's span is not always a subset of its parent's span.  For
+  //   example, the "extendee" of an extension declaration appears at the
+  //   beginning of the "extend" block and is shared by all extensions within
+  //   the block.
+  // - Just because a location's span is a subset of some other location's span
+  //   does not mean that it is a descendent.  For example, a "group" defines
+  //   both a type and a field in a single declaration.  Thus, the locations
+  //   corresponding to the type and field and their components will overlap.
+  // - Code which tries to interpret locations should probably be designed to
+  //   ignore those that it doesn't understand, as more types of locations could
+  //   be recorded in the future.
+  repeated Location location = 1;
+  message Location {
+    // Identifies which part of the FileDescriptorProto was defined at this
+    // location.
+    //
+    // Each element is a field number or an index.  They form a path from
+    // the root FileDescriptorProto to the place where the definition.  For
+    // example, this path:
+    //   [ 4, 3, 2, 7, 1 ]
+    // refers to:
+    //   file.message_type(3)  // 4, 3
+    //       .field(7)         // 2, 7
+    //       .name()           // 1
+    // This is because FileDescriptorProto.message_type has field number 4:
+    //   repeated DescriptorProto message_type = 4;
+    // and DescriptorProto.field has field number 2:
+    //   repeated FieldDescriptorProto field = 2;
+    // and FieldDescriptorProto.name has field number 1:
+    //   optional string name = 1;
+    //
+    // Thus, the above path gives the location of a field name.  If we removed
+    // the last element:
+    //   [ 4, 3, 2, 7 ]
+    // this path refers to the whole field declaration (from the beginning
+    // of the label to the terminating semicolon).
+    repeated int32 path = 1 [packed=true];
+
+    // Always has exactly three or four elements: start line, start column,
+    // end line (optional, otherwise assumed same as start line), end column.
+    // These are packed into a single field for efficiency.  Note that line
+    // and column numbers are zero-based -- typically you will want to add
+    // 1 to each before displaying to a user.
+    repeated int32 span = 2 [packed=true];
+
+    // If this SourceCodeInfo represents a complete declaration, these are any
+    // comments appearing before and after the declaration which appear to be
+    // attached to the declaration.
+    //
+    // A series of line comments appearing on consecutive lines, with no other
+    // tokens appearing on those lines, will be treated as a single comment.
+    //
+    // leading_detached_comments will keep paragraphs of comments that appear
+    // before (but not connected to) the current element. Each paragraph,
+    // separated by empty lines, will be one comment element in the repeated
+    // field.
+    //
+    // Only the comment content is provided; comment markers (e.g. //) are
+    // stripped out.  For block comments, leading whitespace and an asterisk
+    // will be stripped from the beginning of each line other than the first.
+    // Newlines are included in the output.
+    //
+    // Examples:
+    //
+    //   optional int32 foo = 1;  // Comment attached to foo.
+    //   // Comment attached to bar.
+    //   optional int32 bar = 2;
+    //
+    //   optional string baz = 3;
+    //   // Comment attached to baz.
+    //   // Another line attached to baz.
+    //
+    //   // Comment attached to qux.
+    //   //
+    //   // Another line attached to qux.
+    //   optional double qux = 4;
+    //
+    //   // Detached comment for corge. This is not leading or trailing comments
+    //   // to qux or corge because there are blank lines separating it from
+    //   // both.
+    //
+    //   // Detached comment for corge paragraph 2.
+    //
+    //   optional string corge = 5;
+    //   /* Block comment attached
+    //    * to corge.  Leading asterisks
+    //    * will be removed. */
+    //   /* Block comment attached to
+    //    * grault. */
+    //   optional int32 grault = 6;
+    //
+    //   // ignored detached comments.
+    optional string leading_comments = 3;
+    optional string trailing_comments = 4;
+    repeated string leading_detached_comments = 6;
+  }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+  // An Annotation connects some span of text in generated code to an element
+  // of its generating .proto file.
+  repeated Annotation annotation = 1;
+  message Annotation {
+    // Identifies the element in the original source .proto file. This field
+    // is formatted the same as SourceCodeInfo.Location.path.
+    repeated int32 path = 1 [packed=true];
+
+    // Identifies the filesystem path to the original source .proto.
+    optional string source_file = 2;
+
+    // Identifies the starting offset in bytes in the generated code
+    // that relates to the identified object.
+    optional int32 begin = 3;
+
+    // Identifies the ending offset in bytes in the generated code that
+    // relates to the identified offset. The end offset should be one past
+    // the last relevant byte (so the length of the text = end - begin).
+    optional int32 end = 4;
+  }
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
new file mode 100644
index 0000000..6f4a902
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/generator.go
@@ -0,0 +1,2806 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+	The code generator for the plugin for the Google protocol buffer compiler.
+	It generates Go code from the protocol buffer description files read by the
+	main routine.
+*/
+package generator
+
+import (
+	"bufio"
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"fmt"
+	"go/ast"
+	"go/build"
+	"go/parser"
+	"go/printer"
+	"go/token"
+	"log"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/protoc-gen-go/generator/internal/remap"
+
+	"github.com/golang/protobuf/protoc-gen-go/descriptor"
+	plugin "github.com/golang/protobuf/protoc-gen-go/plugin"
+)
+
+// generatedCodeVersion indicates a version of the generated code.
+// It is incremented whenever an incompatibility between the generated code and
+// proto package is introduced; the generated code references
+// a constant, proto.ProtoPackageIsVersionN (where N is generatedCodeVersion).
+const generatedCodeVersion = 3
+
+// A Plugin provides functionality to add to the output during Go code generation,
+// such as to produce RPC stubs.
+type Plugin interface {
+	// Name identifies the plugin.
+	Name() string
+	// Init is called once after data structures are built but before
+	// code generation begins.
+	Init(g *Generator)
+	// Generate produces the code generated by the plugin for this file,
+	// except for the imports, by calling the generator's methods P, In, and Out.
+	Generate(file *FileDescriptor)
+	// GenerateImports produces the import declarations for this file.
+	// It is called after Generate.
+	GenerateImports(file *FileDescriptor)
+}
+
+var plugins []Plugin
+
+// RegisterPlugin installs a (second-order) plugin to be run when the Go output is generated.
+// It is typically called during initialization.
+func RegisterPlugin(p Plugin) {
+	plugins = append(plugins, p)
+}
+
+// A GoImportPath is the import path of a Go package. e.g., "google.golang.org/genproto/protobuf".
+type GoImportPath string
+
+func (p GoImportPath) String() string { return strconv.Quote(string(p)) }
+
+// A GoPackageName is the name of a Go package. e.g., "protobuf".
+type GoPackageName string
+
+// Each type we import as a protocol buffer (other than FileDescriptorProto) needs
+// a pointer to the FileDescriptorProto that represents it.  These types achieve that
+// wrapping by placing each Proto inside a struct with the pointer to its File. The
+// structs have the same names as their contents, with "Proto" removed.
+// FileDescriptor is used to store the things that it points to.
+
+// The file and package name method are common to messages and enums.
+type common struct {
+	file *FileDescriptor // File this object comes from.
+}
+
+// GoImportPath is the import path of the Go package containing the type.
+func (c *common) GoImportPath() GoImportPath {
+	return c.file.importPath
+}
+
+func (c *common) File() *FileDescriptor { return c.file }
+
+func fileIsProto3(file *descriptor.FileDescriptorProto) bool {
+	return file.GetSyntax() == "proto3"
+}
+
+func (c *common) proto3() bool { return fileIsProto3(c.file.FileDescriptorProto) }
+
+// Descriptor represents a protocol buffer message.
+type Descriptor struct {
+	common
+	*descriptor.DescriptorProto
+	parent   *Descriptor            // The containing message, if any.
+	nested   []*Descriptor          // Inner messages, if any.
+	enums    []*EnumDescriptor      // Inner enums, if any.
+	ext      []*ExtensionDescriptor // Extensions, if any.
+	typename []string               // Cached typename vector.
+	index    int                    // The index into the container, whether the file or another message.
+	path     string                 // The SourceCodeInfo path as comma-separated integers.
+	group    bool
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (d *Descriptor) TypeName() []string {
+	if d.typename != nil {
+		return d.typename
+	}
+	n := 0
+	for parent := d; parent != nil; parent = parent.parent {
+		n++
+	}
+	s := make([]string, n)
+	for parent := d; parent != nil; parent = parent.parent {
+		n--
+		s[n] = parent.GetName()
+	}
+	d.typename = s
+	return s
+}
+
+// EnumDescriptor describes an enum. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type EnumDescriptor struct {
+	common
+	*descriptor.EnumDescriptorProto
+	parent   *Descriptor // The containing message, if any.
+	typename []string    // Cached typename vector.
+	index    int         // The index into the container, whether the file or a message.
+	path     string      // The SourceCodeInfo path as comma-separated integers.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *EnumDescriptor) TypeName() (s []string) {
+	if e.typename != nil {
+		return e.typename
+	}
+	name := e.GetName()
+	if e.parent == nil {
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	e.typename = s
+	return s
+}
+
+// Everything but the last element of the full type name, CamelCased.
+// The values of type Foo.Bar are call Foo_value1... not Foo_Bar_value1... .
+func (e *EnumDescriptor) prefix() string {
+	if e.parent == nil {
+		// If the enum is not part of a message, the prefix is just the type name.
+		return CamelCase(*e.Name) + "_"
+	}
+	typeName := e.TypeName()
+	return CamelCaseSlice(typeName[0:len(typeName)-1]) + "_"
+}
+
+// The integer value of the named constant in this enumerated type.
+func (e *EnumDescriptor) integerValueAsString(name string) string {
+	for _, c := range e.Value {
+		if c.GetName() == name {
+			return fmt.Sprint(c.GetNumber())
+		}
+	}
+	log.Fatal("cannot find value for enum constant")
+	return ""
+}
+
+// ExtensionDescriptor describes an extension. If it's at top level, its parent will be nil.
+// Otherwise it will be the descriptor of the message in which it is defined.
+type ExtensionDescriptor struct {
+	common
+	*descriptor.FieldDescriptorProto
+	parent *Descriptor // The containing message, if any.
+}
+
+// TypeName returns the elements of the dotted type name.
+// The package name is not part of this name.
+func (e *ExtensionDescriptor) TypeName() (s []string) {
+	name := e.GetName()
+	if e.parent == nil {
+		// top-level extension
+		s = make([]string, 1)
+	} else {
+		pname := e.parent.TypeName()
+		s = make([]string, len(pname)+1)
+		copy(s, pname)
+	}
+	s[len(s)-1] = name
+	return s
+}
+
+// DescName returns the variable name used for the generated descriptor.
+func (e *ExtensionDescriptor) DescName() string {
+	// The full type name.
+	typeName := e.TypeName()
+	// Each scope of the extension is individually CamelCased, and all are joined with "_" with an "E_" prefix.
+	for i, s := range typeName {
+		typeName[i] = CamelCase(s)
+	}
+	return "E_" + strings.Join(typeName, "_")
+}
+
+// ImportedDescriptor describes a type that has been publicly imported from another file.
+type ImportedDescriptor struct {
+	common
+	o Object
+}
+
+func (id *ImportedDescriptor) TypeName() []string { return id.o.TypeName() }
+
+// FileDescriptor describes an protocol buffer descriptor file (.proto).
+// It includes slices of all the messages and enums defined within it.
+// Those slices are constructed by WrapTypes.
+type FileDescriptor struct {
+	*descriptor.FileDescriptorProto
+	desc []*Descriptor          // All the messages defined in this file.
+	enum []*EnumDescriptor      // All the enums defined in this file.
+	ext  []*ExtensionDescriptor // All the top-level extensions defined in this file.
+	imp  []*ImportedDescriptor  // All types defined in files publicly imported by this file.
+
+	// Comments, stored as a map of path (comma-separated integers) to the comment.
+	comments map[string]*descriptor.SourceCodeInfo_Location
+
+	// The full list of symbols that are exported,
+	// as a map from the exported object to its symbols.
+	// This is used for supporting public imports.
+	exported map[Object][]symbol
+
+	importPath  GoImportPath  // Import path of this file's package.
+	packageName GoPackageName // Name of this file's Go package.
+
+	proto3 bool // whether to generate proto3 code for this file
+}
+
+// VarName is the variable name we'll use in the generated code to refer
+// to the compressed bytes of this descriptor. It is not exported, so
+// it is only valid inside the generated package.
+func (d *FileDescriptor) VarName() string {
+	h := sha256.Sum256([]byte(d.GetName()))
+	return fmt.Sprintf("fileDescriptor_%s", hex.EncodeToString(h[:8]))
+}
+
+// goPackageOption interprets the file's go_package option.
+// If there is no go_package, it returns ("", "", false).
+// If there's a simple name, it returns ("", pkg, true).
+// If the option implies an import path, it returns (impPath, pkg, true).
+func (d *FileDescriptor) goPackageOption() (impPath GoImportPath, pkg GoPackageName, ok bool) {
+	opt := d.GetOptions().GetGoPackage()
+	if opt == "" {
+		return "", "", false
+	}
+	// A semicolon-delimited suffix delimits the import path and package name.
+	sc := strings.Index(opt, ";")
+	if sc >= 0 {
+		return GoImportPath(opt[:sc]), cleanPackageName(opt[sc+1:]), true
+	}
+	// The presence of a slash implies there's an import path.
+	slash := strings.LastIndex(opt, "/")
+	if slash >= 0 {
+		return GoImportPath(opt), cleanPackageName(opt[slash+1:]), true
+	}
+	return "", cleanPackageName(opt), true
+}
+
+// goFileName returns the output name for the generated Go file.
+func (d *FileDescriptor) goFileName(pathType pathType) string {
+	name := *d.Name
+	if ext := path.Ext(name); ext == ".proto" || ext == ".protodevel" {
+		name = name[:len(name)-len(ext)]
+	}
+	name += ".pb.go"
+
+	if pathType == pathTypeSourceRelative {
+		return name
+	}
+
+	// Does the file have a "go_package" option?
+	// If it does, it may override the filename.
+	if impPath, _, ok := d.goPackageOption(); ok && impPath != "" {
+		// Replace the existing dirname with the declared import path.
+		_, name = path.Split(name)
+		name = path.Join(string(impPath), name)
+		return name
+	}
+
+	return name
+}
+
+func (d *FileDescriptor) addExport(obj Object, sym symbol) {
+	d.exported[obj] = append(d.exported[obj], sym)
+}
+
+// symbol is an interface representing an exported Go symbol.
+type symbol interface {
+	// GenerateAlias should generate an appropriate alias
+	// for the symbol from the named package.
+	GenerateAlias(g *Generator, filename string, pkg GoPackageName)
+}
+
+type messageSymbol struct {
+	sym                         string
+	hasExtensions, isMessageSet bool
+	oneofTypes                  []string
+}
+
+type getterSymbol struct {
+	name     string
+	typ      string
+	typeName string // canonical name in proto world; empty for proto.Message and similar
+	genType  bool   // whether typ contains a generated type (message/group/enum)
+}
+
+func (ms *messageSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	g.P("// ", ms.sym, " from public import ", filename)
+	g.P("type ", ms.sym, " = ", pkg, ".", ms.sym)
+	for _, name := range ms.oneofTypes {
+		g.P("type ", name, " = ", pkg, ".", name)
+	}
+}
+
+type enumSymbol struct {
+	name   string
+	proto3 bool // Whether this came from a proto3 file.
+}
+
+func (es enumSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	s := es.name
+	g.P("// ", s, " from public import ", filename)
+	g.P("type ", s, " = ", pkg, ".", s)
+	g.P("var ", s, "_name = ", pkg, ".", s, "_name")
+	g.P("var ", s, "_value = ", pkg, ".", s, "_value")
+}
+
+type constOrVarSymbol struct {
+	sym  string
+	typ  string // either "const" or "var"
+	cast string // if non-empty, a type cast is required (used for enums)
+}
+
+func (cs constOrVarSymbol) GenerateAlias(g *Generator, filename string, pkg GoPackageName) {
+	v := string(pkg) + "." + cs.sym
+	if cs.cast != "" {
+		v = cs.cast + "(" + v + ")"
+	}
+	g.P(cs.typ, " ", cs.sym, " = ", v)
+}
+
+// Object is an interface abstracting the abilities shared by enums, messages, extensions and imported objects.
+type Object interface {
+	GoImportPath() GoImportPath
+	TypeName() []string
+	File() *FileDescriptor
+}
+
+// Generator is the type whose methods generate the output, stored in the associated response structure.
+type Generator struct {
+	*bytes.Buffer
+
+	Request  *plugin.CodeGeneratorRequest  // The input.
+	Response *plugin.CodeGeneratorResponse // The output.
+
+	Param             map[string]string // Command-line parameters.
+	PackageImportPath string            // Go import path of the package we're generating code for
+	ImportPrefix      string            // String to prefix to imported package file names.
+	ImportMap         map[string]string // Mapping from .proto file name to import path
+
+	Pkg map[string]string // The names under which we import support packages
+
+	outputImportPath GoImportPath                   // Package we're generating code for.
+	allFiles         []*FileDescriptor              // All files in the tree
+	allFilesByName   map[string]*FileDescriptor     // All files by filename.
+	genFiles         []*FileDescriptor              // Those files we will generate output for.
+	file             *FileDescriptor                // The file we are compiling now.
+	packageNames     map[GoImportPath]GoPackageName // Imported package names in the current file.
+	usedPackages     map[GoImportPath]bool          // Packages used in current file.
+	usedPackageNames map[GoPackageName]bool         // Package names used in the current file.
+	addedImports     map[GoImportPath]bool          // Additional imports to emit.
+	typeNameToObject map[string]Object              // Key is a fully-qualified name in input syntax.
+	init             []string                       // Lines to emit in the init function.
+	indent           string
+	pathType         pathType // How to generate output filenames.
+	writeOutput      bool
+	annotateCode     bool                                       // whether to store annotations
+	annotations      []*descriptor.GeneratedCodeInfo_Annotation // annotations to store
+}
+
+type pathType int
+
+const (
+	pathTypeImport pathType = iota
+	pathTypeSourceRelative
+)
+
+// New creates a new generator and allocates the request and response protobufs.
+func New() *Generator {
+	g := new(Generator)
+	g.Buffer = new(bytes.Buffer)
+	g.Request = new(plugin.CodeGeneratorRequest)
+	g.Response = new(plugin.CodeGeneratorResponse)
+	return g
+}
+
+// Error reports a problem, including an error, and exits the program.
+func (g *Generator) Error(err error, msgs ...string) {
+	s := strings.Join(msgs, " ") + ":" + err.Error()
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// Fail reports a problem and exits the program.
+func (g *Generator) Fail(msgs ...string) {
+	s := strings.Join(msgs, " ")
+	log.Print("protoc-gen-go: error:", s)
+	os.Exit(1)
+}
+
+// CommandLineParameters breaks the comma-separated list of key=value pairs
+// in the parameter (a member of the request protobuf) into a key/value map.
+// It then sets file name mappings defined by those entries.
+func (g *Generator) CommandLineParameters(parameter string) {
+	g.Param = make(map[string]string)
+	for _, p := range strings.Split(parameter, ",") {
+		if i := strings.Index(p, "="); i < 0 {
+			g.Param[p] = ""
+		} else {
+			g.Param[p[0:i]] = p[i+1:]
+		}
+	}
+
+	g.ImportMap = make(map[string]string)
+	pluginList := "none" // Default list of plugin names to enable (empty means all).
+	for k, v := range g.Param {
+		switch k {
+		case "import_prefix":
+			g.ImportPrefix = v
+		case "import_path":
+			g.PackageImportPath = v
+		case "paths":
+			switch v {
+			case "import":
+				g.pathType = pathTypeImport
+			case "source_relative":
+				g.pathType = pathTypeSourceRelative
+			default:
+				g.Fail(fmt.Sprintf(`Unknown path type %q: want "import" or "source_relative".`, v))
+			}
+		case "plugins":
+			pluginList = v
+		case "annotate_code":
+			if v == "true" {
+				g.annotateCode = true
+			}
+		default:
+			if len(k) > 0 && k[0] == 'M' {
+				g.ImportMap[k[1:]] = v
+			}
+		}
+	}
+	if pluginList != "" {
+		// Amend the set of plugins.
+		enabled := make(map[string]bool)
+		for _, name := range strings.Split(pluginList, "+") {
+			enabled[name] = true
+		}
+		var nplugins []Plugin
+		for _, p := range plugins {
+			if enabled[p.Name()] {
+				nplugins = append(nplugins, p)
+			}
+		}
+		plugins = nplugins
+	}
+}
+
+// DefaultPackageName returns the package name printed for the object.
+// If its file is in a different package, it returns the package name we're using for this file, plus ".".
+// Otherwise it returns the empty string.
+func (g *Generator) DefaultPackageName(obj Object) string {
+	importPath := obj.GoImportPath()
+	if importPath == g.outputImportPath {
+		return ""
+	}
+	return string(g.GoPackageName(importPath)) + "."
+}
+
+// GoPackageName returns the name used for a package.
+func (g *Generator) GoPackageName(importPath GoImportPath) GoPackageName {
+	if name, ok := g.packageNames[importPath]; ok {
+		return name
+	}
+	name := cleanPackageName(baseName(string(importPath)))
+	for i, orig := 1, name; g.usedPackageNames[name] || isGoPredeclaredIdentifier[string(name)]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	g.packageNames[importPath] = name
+	g.usedPackageNames[name] = true
+	return name
+}
+
+// AddImport adds a package to the generated file's import section.
+// It returns the name used for the package.
+func (g *Generator) AddImport(importPath GoImportPath) GoPackageName {
+	g.addedImports[importPath] = true
+	return g.GoPackageName(importPath)
+}
+
+var globalPackageNames = map[GoPackageName]bool{
+	"fmt":   true,
+	"math":  true,
+	"proto": true,
+}
+
+// Create and remember a guaranteed unique package name. Pkg is the candidate name.
+// The FileDescriptor parameter is unused.
+func RegisterUniquePackageName(pkg string, f *FileDescriptor) string {
+	name := cleanPackageName(pkg)
+	for i, orig := 1, name; globalPackageNames[name]; i++ {
+		name = orig + GoPackageName(strconv.Itoa(i))
+	}
+	globalPackageNames[name] = true
+	return string(name)
+}
+
+var isGoKeyword = map[string]bool{
+	"break":       true,
+	"case":        true,
+	"chan":        true,
+	"const":       true,
+	"continue":    true,
+	"default":     true,
+	"else":        true,
+	"defer":       true,
+	"fallthrough": true,
+	"for":         true,
+	"func":        true,
+	"go":          true,
+	"goto":        true,
+	"if":          true,
+	"import":      true,
+	"interface":   true,
+	"map":         true,
+	"package":     true,
+	"range":       true,
+	"return":      true,
+	"select":      true,
+	"struct":      true,
+	"switch":      true,
+	"type":        true,
+	"var":         true,
+}
+
+var isGoPredeclaredIdentifier = map[string]bool{
+	"append":     true,
+	"bool":       true,
+	"byte":       true,
+	"cap":        true,
+	"close":      true,
+	"complex":    true,
+	"complex128": true,
+	"complex64":  true,
+	"copy":       true,
+	"delete":     true,
+	"error":      true,
+	"false":      true,
+	"float32":    true,
+	"float64":    true,
+	"imag":       true,
+	"int":        true,
+	"int16":      true,
+	"int32":      true,
+	"int64":      true,
+	"int8":       true,
+	"iota":       true,
+	"len":        true,
+	"make":       true,
+	"new":        true,
+	"nil":        true,
+	"panic":      true,
+	"print":      true,
+	"println":    true,
+	"real":       true,
+	"recover":    true,
+	"rune":       true,
+	"string":     true,
+	"true":       true,
+	"uint":       true,
+	"uint16":     true,
+	"uint32":     true,
+	"uint64":     true,
+	"uint8":      true,
+	"uintptr":    true,
+}
+
+func cleanPackageName(name string) GoPackageName {
+	name = strings.Map(badToUnderscore, name)
+	// Identifier must not be keyword or predeclared identifier: insert _.
+	if isGoKeyword[name] {
+		name = "_" + name
+	}
+	// Identifier must not begin with digit: insert _.
+	if r, _ := utf8.DecodeRuneInString(name); unicode.IsDigit(r) {
+		name = "_" + name
+	}
+	return GoPackageName(name)
+}
+
+// defaultGoPackage returns the package name to use,
+// derived from the import path of the package we're building code for.
+func (g *Generator) defaultGoPackage() GoPackageName {
+	p := g.PackageImportPath
+	if i := strings.LastIndex(p, "/"); i >= 0 {
+		p = p[i+1:]
+	}
+	return cleanPackageName(p)
+}
+
+// SetPackageNames sets the package name for this run.
+// The package name must agree across all files being generated.
+// It also defines unique package names for all imported files.
+func (g *Generator) SetPackageNames() {
+	g.outputImportPath = g.genFiles[0].importPath
+
+	defaultPackageNames := make(map[GoImportPath]GoPackageName)
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			defaultPackageNames[f.importPath] = p
+		}
+	}
+	for _, f := range g.genFiles {
+		if _, p, ok := f.goPackageOption(); ok {
+			// Source file: option go_package = "quux/bar";
+			f.packageName = p
+		} else if p, ok := defaultPackageNames[f.importPath]; ok {
+			// A go_package option in another file in the same package.
+			//
+			// This is a poor choice in general, since every source file should
+			// contain a go_package option. Supported mainly for historical
+			// compatibility.
+			f.packageName = p
+		} else if p := g.defaultGoPackage(); p != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets a package name for files which don't
+			// contain a go_package option.
+			f.packageName = p
+		} else if p := f.GetPackage(); p != "" {
+			// Source file: package quux.bar;
+			f.packageName = cleanPackageName(p)
+		} else {
+			// Source filename.
+			f.packageName = cleanPackageName(baseName(f.GetName()))
+		}
+	}
+
+	// Check that all files have a consistent package name and import path.
+	for _, f := range g.genFiles[1:] {
+		if a, b := g.genFiles[0].importPath, f.importPath; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package import paths: %v, %v", a, b))
+		}
+		if a, b := g.genFiles[0].packageName, f.packageName; a != b {
+			g.Fail(fmt.Sprintf("inconsistent package names: %v, %v", a, b))
+		}
+	}
+
+	// Names of support packages. These never vary (if there are conflicts,
+	// we rename the conflicting package), so this could be removed someday.
+	g.Pkg = map[string]string{
+		"fmt":   "fmt",
+		"math":  "math",
+		"proto": "proto",
+	}
+}
+
+// WrapTypes walks the incoming data, wrapping DescriptorProtos, EnumDescriptorProtos
+// and FileDescriptorProtos into file-referenced objects within the Generator.
+// It also creates the list of files to generate and so should be called before GenerateAllFiles.
+func (g *Generator) WrapTypes() {
+	g.allFiles = make([]*FileDescriptor, 0, len(g.Request.ProtoFile))
+	g.allFilesByName = make(map[string]*FileDescriptor, len(g.allFiles))
+	genFileNames := make(map[string]bool)
+	for _, n := range g.Request.FileToGenerate {
+		genFileNames[n] = true
+	}
+	for _, f := range g.Request.ProtoFile {
+		fd := &FileDescriptor{
+			FileDescriptorProto: f,
+			exported:            make(map[Object][]symbol),
+			proto3:              fileIsProto3(f),
+		}
+		// The import path may be set in a number of ways.
+		if substitution, ok := g.ImportMap[f.GetName()]; ok {
+			// Command-line: M=foo.proto=quux/bar.
+			//
+			// Explicit mapping of source file to import path.
+			fd.importPath = GoImportPath(substitution)
+		} else if genFileNames[f.GetName()] && g.PackageImportPath != "" {
+			// Command-line: import_path=quux/bar.
+			//
+			// The import_path flag sets the import path for every file that
+			// we generate code for.
+			fd.importPath = GoImportPath(g.PackageImportPath)
+		} else if p, _, _ := fd.goPackageOption(); p != "" {
+			// Source file: option go_package = "quux/bar";
+			//
+			// The go_package option sets the import path. Most users should use this.
+			fd.importPath = p
+		} else {
+			// Source filename.
+			//
+			// Last resort when nothing else is available.
+			fd.importPath = GoImportPath(path.Dir(f.GetName()))
+		}
+		// We must wrap the descriptors before we wrap the enums
+		fd.desc = wrapDescriptors(fd)
+		g.buildNestedDescriptors(fd.desc)
+		fd.enum = wrapEnumDescriptors(fd, fd.desc)
+		g.buildNestedEnums(fd.desc, fd.enum)
+		fd.ext = wrapExtensions(fd)
+		extractComments(fd)
+		g.allFiles = append(g.allFiles, fd)
+		g.allFilesByName[f.GetName()] = fd
+	}
+	for _, fd := range g.allFiles {
+		fd.imp = wrapImported(fd, g)
+	}
+
+	g.genFiles = make([]*FileDescriptor, 0, len(g.Request.FileToGenerate))
+	for _, fileName := range g.Request.FileToGenerate {
+		fd := g.allFilesByName[fileName]
+		if fd == nil {
+			g.Fail("could not find file named", fileName)
+		}
+		g.genFiles = append(g.genFiles, fd)
+	}
+}
+
+// Scan the descriptors in this file.  For each one, build the slice of nested descriptors
+func (g *Generator) buildNestedDescriptors(descs []*Descriptor) {
+	for _, desc := range descs {
+		if len(desc.NestedType) != 0 {
+			for _, nest := range descs {
+				if nest.parent == desc {
+					desc.nested = append(desc.nested, nest)
+				}
+			}
+			if len(desc.nested) != len(desc.NestedType) {
+				g.Fail("internal error: nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+func (g *Generator) buildNestedEnums(descs []*Descriptor, enums []*EnumDescriptor) {
+	for _, desc := range descs {
+		if len(desc.EnumType) != 0 {
+			for _, enum := range enums {
+				if enum.parent == desc {
+					desc.enums = append(desc.enums, enum)
+				}
+			}
+			if len(desc.enums) != len(desc.EnumType) {
+				g.Fail("internal error: enum nesting failure for", desc.GetName())
+			}
+		}
+	}
+}
+
+// Construct the Descriptor
+func newDescriptor(desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *Descriptor {
+	d := &Descriptor{
+		common:          common{file},
+		DescriptorProto: desc,
+		parent:          parent,
+		index:           index,
+	}
+	if parent == nil {
+		d.path = fmt.Sprintf("%d,%d", messagePath, index)
+	} else {
+		d.path = fmt.Sprintf("%s,%d,%d", parent.path, messageMessagePath, index)
+	}
+
+	// The only way to distinguish a group from a message is whether
+	// the containing message has a TYPE_GROUP field that matches.
+	if parent != nil {
+		parts := d.TypeName()
+		if file.Package != nil {
+			parts = append([]string{*file.Package}, parts...)
+		}
+		exp := "." + strings.Join(parts, ".")
+		for _, field := range parent.Field {
+			if field.GetType() == descriptor.FieldDescriptorProto_TYPE_GROUP && field.GetTypeName() == exp {
+				d.group = true
+				break
+			}
+		}
+	}
+
+	for _, field := range desc.Extension {
+		d.ext = append(d.ext, &ExtensionDescriptor{common{file}, field, d})
+	}
+
+	return d
+}
+
+// Return a slice of all the Descriptors defined within this file
+func wrapDescriptors(file *FileDescriptor) []*Descriptor {
+	sl := make([]*Descriptor, 0, len(file.MessageType)+10)
+	for i, desc := range file.MessageType {
+		sl = wrapThisDescriptor(sl, desc, nil, file, i)
+	}
+	return sl
+}
+
+// Wrap this Descriptor, recursively
+func wrapThisDescriptor(sl []*Descriptor, desc *descriptor.DescriptorProto, parent *Descriptor, file *FileDescriptor, index int) []*Descriptor {
+	sl = append(sl, newDescriptor(desc, parent, file, index))
+	me := sl[len(sl)-1]
+	for i, nested := range desc.NestedType {
+		sl = wrapThisDescriptor(sl, nested, me, file, i)
+	}
+	return sl
+}
+
+// Construct the EnumDescriptor
+func newEnumDescriptor(desc *descriptor.EnumDescriptorProto, parent *Descriptor, file *FileDescriptor, index int) *EnumDescriptor {
+	ed := &EnumDescriptor{
+		common:              common{file},
+		EnumDescriptorProto: desc,
+		parent:              parent,
+		index:               index,
+	}
+	if parent == nil {
+		ed.path = fmt.Sprintf("%d,%d", enumPath, index)
+	} else {
+		ed.path = fmt.Sprintf("%s,%d,%d", parent.path, messageEnumPath, index)
+	}
+	return ed
+}
+
+// Return a slice of all the EnumDescriptors defined within this file
+func wrapEnumDescriptors(file *FileDescriptor, descs []*Descriptor) []*EnumDescriptor {
+	sl := make([]*EnumDescriptor, 0, len(file.EnumType)+10)
+	// Top-level enums.
+	for i, enum := range file.EnumType {
+		sl = append(sl, newEnumDescriptor(enum, nil, file, i))
+	}
+	// Enums within messages. Enums within embedded messages appear in the outer-most message.
+	for _, nested := range descs {
+		for i, enum := range nested.EnumType {
+			sl = append(sl, newEnumDescriptor(enum, nested, file, i))
+		}
+	}
+	return sl
+}
+
+// Return a slice of all the top-level ExtensionDescriptors defined within this file.
+func wrapExtensions(file *FileDescriptor) []*ExtensionDescriptor {
+	var sl []*ExtensionDescriptor
+	for _, field := range file.Extension {
+		sl = append(sl, &ExtensionDescriptor{common{file}, field, nil})
+	}
+	return sl
+}
+
+// Return a slice of all the types that are publicly imported into this file.
+func wrapImported(file *FileDescriptor, g *Generator) (sl []*ImportedDescriptor) {
+	for _, index := range file.PublicDependency {
+		df := g.fileByName(file.Dependency[index])
+		for _, d := range df.desc {
+			if d.GetOptions().GetMapEntry() {
+				continue
+			}
+			sl = append(sl, &ImportedDescriptor{common{file}, d})
+		}
+		for _, e := range df.enum {
+			sl = append(sl, &ImportedDescriptor{common{file}, e})
+		}
+		for _, ext := range df.ext {
+			sl = append(sl, &ImportedDescriptor{common{file}, ext})
+		}
+	}
+	return
+}
+
+func extractComments(file *FileDescriptor) {
+	file.comments = make(map[string]*descriptor.SourceCodeInfo_Location)
+	for _, loc := range file.GetSourceCodeInfo().GetLocation() {
+		if loc.LeadingComments == nil {
+			continue
+		}
+		var p []string
+		for _, n := range loc.Path {
+			p = append(p, strconv.Itoa(int(n)))
+		}
+		file.comments[strings.Join(p, ",")] = loc
+	}
+}
+
+// BuildTypeNameMap builds the map from fully qualified type names to objects.
+// The key names for the map come from the input data, which puts a period at the beginning.
+// It should be called after SetPackageNames and before GenerateAllFiles.
+func (g *Generator) BuildTypeNameMap() {
+	g.typeNameToObject = make(map[string]Object)
+	for _, f := range g.allFiles {
+		// The names in this loop are defined by the proto world, not us, so the
+		// package name may be empty.  If so, the dotted package name of X will
+		// be ".X"; otherwise it will be ".pkg.X".
+		dottedPkg := "." + f.GetPackage()
+		if dottedPkg != "." {
+			dottedPkg += "."
+		}
+		for _, enum := range f.enum {
+			name := dottedPkg + dottedSlice(enum.TypeName())
+			g.typeNameToObject[name] = enum
+		}
+		for _, desc := range f.desc {
+			name := dottedPkg + dottedSlice(desc.TypeName())
+			g.typeNameToObject[name] = desc
+		}
+	}
+}
+
+// ObjectNamed, given a fully-qualified input type name as it appears in the input data,
+// returns the descriptor for the message or enum with that name.
+func (g *Generator) ObjectNamed(typeName string) Object {
+	o, ok := g.typeNameToObject[typeName]
+	if !ok {
+		g.Fail("can't find object with type", typeName)
+	}
+	return o
+}
+
+// AnnotatedAtoms is a list of atoms (as consumed by P) that records the file name and proto AST path from which they originated.
+type AnnotatedAtoms struct {
+	source string
+	path   string
+	atoms  []interface{}
+}
+
+// Annotate records the file name and proto AST path of a list of atoms
+// so that a later call to P can emit a link from each atom to its origin.
+func Annotate(file *FileDescriptor, path string, atoms ...interface{}) *AnnotatedAtoms {
+	return &AnnotatedAtoms{source: *file.Name, path: path, atoms: atoms}
+}
+
+// printAtom prints the (atomic, non-annotation) argument to the generated output.
+func (g *Generator) printAtom(v interface{}) {
+	switch v := v.(type) {
+	case string:
+		g.WriteString(v)
+	case *string:
+		g.WriteString(*v)
+	case bool:
+		fmt.Fprint(g, v)
+	case *bool:
+		fmt.Fprint(g, *v)
+	case int:
+		fmt.Fprint(g, v)
+	case *int32:
+		fmt.Fprint(g, *v)
+	case *int64:
+		fmt.Fprint(g, *v)
+	case float64:
+		fmt.Fprint(g, v)
+	case *float64:
+		fmt.Fprint(g, *v)
+	case GoPackageName:
+		g.WriteString(string(v))
+	case GoImportPath:
+		g.WriteString(strconv.Quote(string(v)))
+	default:
+		g.Fail(fmt.Sprintf("unknown type in printer: %T", v))
+	}
+}
+
+// P prints the arguments to the generated output.  It handles strings and int32s, plus
+// handling indirections because they may be *string, etc.  Any inputs of type AnnotatedAtoms may emit
+// annotations in a .meta file in addition to outputting the atoms themselves (if g.annotateCode
+// is true).
+func (g *Generator) P(str ...interface{}) {
+	if !g.writeOutput {
+		return
+	}
+	g.WriteString(g.indent)
+	for _, v := range str {
+		switch v := v.(type) {
+		case *AnnotatedAtoms:
+			begin := int32(g.Len())
+			for _, v := range v.atoms {
+				g.printAtom(v)
+			}
+			if g.annotateCode {
+				end := int32(g.Len())
+				var path []int32
+				for _, token := range strings.Split(v.path, ",") {
+					val, err := strconv.ParseInt(token, 10, 32)
+					if err != nil {
+						g.Fail("could not parse proto AST path: ", err.Error())
+					}
+					path = append(path, int32(val))
+				}
+				g.annotations = append(g.annotations, &descriptor.GeneratedCodeInfo_Annotation{
+					Path:       path,
+					SourceFile: &v.source,
+					Begin:      &begin,
+					End:        &end,
+				})
+			}
+		default:
+			g.printAtom(v)
+		}
+	}
+	g.WriteByte('\n')
+}
+
+// addInitf stores the given statement to be printed inside the file's init function.
+// The statement is given as a format specifier and arguments.
+func (g *Generator) addInitf(stmt string, a ...interface{}) {
+	g.init = append(g.init, fmt.Sprintf(stmt, a...))
+}
+
+// In Indents the output one tab stop.
+func (g *Generator) In() { g.indent += "\t" }
+
+// Out unindents the output one tab stop.
+func (g *Generator) Out() {
+	if len(g.indent) > 0 {
+		g.indent = g.indent[1:]
+	}
+}
+
+// GenerateAllFiles generates the output for all the files we're outputting.
+func (g *Generator) GenerateAllFiles() {
+	// Initialize the plugins
+	for _, p := range plugins {
+		p.Init(g)
+	}
+	// Generate the output. The generator runs for every file, even the files
+	// that we don't generate output for, so that we can collate the full list
+	// of exported symbols to support public imports.
+	genFileMap := make(map[*FileDescriptor]bool, len(g.genFiles))
+	for _, file := range g.genFiles {
+		genFileMap[file] = true
+	}
+	for _, file := range g.allFiles {
+		g.Reset()
+		g.annotations = nil
+		g.writeOutput = genFileMap[file]
+		g.generate(file)
+		if !g.writeOutput {
+			continue
+		}
+		fname := file.goFileName(g.pathType)
+		g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+			Name:    proto.String(fname),
+			Content: proto.String(g.String()),
+		})
+		if g.annotateCode {
+			// Store the generated code annotations in text, as the protoc plugin protocol requires that
+			// strings contain valid UTF-8.
+			g.Response.File = append(g.Response.File, &plugin.CodeGeneratorResponse_File{
+				Name:    proto.String(file.goFileName(g.pathType) + ".meta"),
+				Content: proto.String(proto.CompactTextString(&descriptor.GeneratedCodeInfo{Annotation: g.annotations})),
+			})
+		}
+	}
+}
+
+// Run all the plugins associated with the file.
+func (g *Generator) runPlugins(file *FileDescriptor) {
+	for _, p := range plugins {
+		p.Generate(file)
+	}
+}
+
+// Fill the response protocol buffer with the generated output for all the files we're
+// supposed to generate.
+func (g *Generator) generate(file *FileDescriptor) {
+	g.file = file
+	g.usedPackages = make(map[GoImportPath]bool)
+	g.packageNames = make(map[GoImportPath]GoPackageName)
+	g.usedPackageNames = make(map[GoPackageName]bool)
+	g.addedImports = make(map[GoImportPath]bool)
+	for name := range globalPackageNames {
+		g.usedPackageNames[name] = true
+	}
+
+	g.P("// This is a compile-time assertion to ensure that this generated file")
+	g.P("// is compatible with the proto package it is being compiled against.")
+	g.P("// A compilation error at this line likely means your copy of the")
+	g.P("// proto package needs to be updated.")
+	g.P("const _ = ", g.Pkg["proto"], ".ProtoPackageIsVersion", generatedCodeVersion, " // please upgrade the proto package")
+	g.P()
+
+	for _, td := range g.file.imp {
+		g.generateImported(td)
+	}
+	for _, enum := range g.file.enum {
+		g.generateEnum(enum)
+	}
+	for _, desc := range g.file.desc {
+		// Don't generate virtual messages for maps.
+		if desc.GetOptions().GetMapEntry() {
+			continue
+		}
+		g.generateMessage(desc)
+	}
+	for _, ext := range g.file.ext {
+		g.generateExtension(ext)
+	}
+	g.generateInitFunction()
+	g.generateFileDescriptor(file)
+
+	// Run the plugins before the imports so we know which imports are necessary.
+	g.runPlugins(file)
+
+	// Generate header and imports last, though they appear first in the output.
+	rem := g.Buffer
+	remAnno := g.annotations
+	g.Buffer = new(bytes.Buffer)
+	g.annotations = nil
+	g.generateHeader()
+	g.generateImports()
+	if !g.writeOutput {
+		return
+	}
+	// Adjust the offsets for annotations displaced by the header and imports.
+	for _, anno := range remAnno {
+		*anno.Begin += int32(g.Len())
+		*anno.End += int32(g.Len())
+		g.annotations = append(g.annotations, anno)
+	}
+	g.Write(rem.Bytes())
+
+	// Reformat generated code and patch annotation locations.
+	fset := token.NewFileSet()
+	original := g.Bytes()
+	if g.annotateCode {
+		// make a copy independent of g; we'll need it after Reset.
+		original = append([]byte(nil), original...)
+	}
+	fileAST, err := parser.ParseFile(fset, "", original, parser.ParseComments)
+	if err != nil {
+		// Print out the bad code with line numbers.
+		// This should never happen in practice, but it can while changing generated code,
+		// so consider this a debugging aid.
+		var src bytes.Buffer
+		s := bufio.NewScanner(bytes.NewReader(original))
+		for line := 1; s.Scan(); line++ {
+			fmt.Fprintf(&src, "%5d\t%s\n", line, s.Bytes())
+		}
+		g.Fail("bad Go source code was generated:", err.Error(), "\n"+src.String())
+	}
+	ast.SortImports(fset, fileAST)
+	g.Reset()
+	err = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(g, fset, fileAST)
+	if err != nil {
+		g.Fail("generated Go source code could not be reformatted:", err.Error())
+	}
+	if g.annotateCode {
+		m, err := remap.Compute(original, g.Bytes())
+		if err != nil {
+			g.Fail("formatted generated Go source code could not be mapped back to the original code:", err.Error())
+		}
+		for _, anno := range g.annotations {
+			new, ok := m.Find(int(*anno.Begin), int(*anno.End))
+			if !ok {
+				g.Fail("span in formatted generated Go source code could not be mapped back to the original code")
+			}
+			*anno.Begin = int32(new.Pos)
+			*anno.End = int32(new.End)
+		}
+	}
+}
+
+// Generate the header, including package definition
+func (g *Generator) generateHeader() {
+	g.P("// Code generated by protoc-gen-go. DO NOT EDIT.")
+	if g.file.GetOptions().GetDeprecated() {
+		g.P("// ", g.file.Name, " is a deprecated file.")
+	} else {
+		g.P("// source: ", g.file.Name)
+	}
+	g.P()
+	g.PrintComments(strconv.Itoa(packagePath))
+	g.P()
+	g.P("package ", g.file.packageName)
+	g.P()
+}
+
+// deprecationComment is the standard comment added to deprecated
+// messages, fields, enums, and enum values.
+var deprecationComment = "// Deprecated: Do not use."
+
+// PrintComments prints any comments from the source .proto file.
+// The path is a comma-separated list of integers.
+// It returns an indication of whether any comments were printed.
+// See descriptor.proto for its format.
+func (g *Generator) PrintComments(path string) bool {
+	if !g.writeOutput {
+		return false
+	}
+	if c, ok := g.makeComments(path); ok {
+		g.P(c)
+		return true
+	}
+	return false
+}
+
+// makeComments generates the comment string for the field, no "\n" at the end
+func (g *Generator) makeComments(path string) (string, bool) {
+	loc, ok := g.file.comments[path]
+	if !ok {
+		return "", false
+	}
+	w := new(bytes.Buffer)
+	nl := ""
+	for _, line := range strings.Split(strings.TrimSuffix(loc.GetLeadingComments(), "\n"), "\n") {
+		fmt.Fprintf(w, "%s//%s", nl, line)
+		nl = "\n"
+	}
+	return w.String(), true
+}
+
+func (g *Generator) fileByName(filename string) *FileDescriptor {
+	return g.allFilesByName[filename]
+}
+
+// weak returns whether the ith import of the current file is a weak import.
+func (g *Generator) weak(i int32) bool {
+	for _, j := range g.file.WeakDependency {
+		if j == i {
+			return true
+		}
+	}
+	return false
+}
+
+// Generate the imports
+func (g *Generator) generateImports() {
+	imports := make(map[GoImportPath]GoPackageName)
+	for i, s := range g.file.Dependency {
+		fd := g.fileByName(s)
+		importPath := fd.importPath
+		// Do not import our own package.
+		if importPath == g.file.importPath {
+			continue
+		}
+		// Do not import weak imports.
+		if g.weak(int32(i)) {
+			continue
+		}
+		// Do not import a package twice.
+		if _, ok := imports[importPath]; ok {
+			continue
+		}
+		// We need to import all the dependencies, even if we don't reference them,
+		// because other code and tools depend on having the full transitive closure
+		// of protocol buffer types in the binary.
+		packageName := g.GoPackageName(importPath)
+		if _, ok := g.usedPackages[importPath]; !ok {
+			packageName = "_"
+		}
+		imports[importPath] = packageName
+	}
+	for importPath := range g.addedImports {
+		imports[importPath] = g.GoPackageName(importPath)
+	}
+	// We almost always need a proto import.  Rather than computing when we
+	// do, which is tricky when there's a plugin, just import it and
+	// reference it later. The same argument applies to the fmt and math packages.
+	g.P("import (")
+	g.P(g.Pkg["fmt"] + ` "fmt"`)
+	g.P(g.Pkg["math"] + ` "math"`)
+	g.P(g.Pkg["proto"]+" ", GoImportPath(g.ImportPrefix)+"github.com/golang/protobuf/proto")
+	for importPath, packageName := range imports {
+		g.P(packageName, " ", GoImportPath(g.ImportPrefix)+importPath)
+	}
+	g.P(")")
+	g.P()
+	// TODO: may need to worry about uniqueness across plugins
+	for _, p := range plugins {
+		p.GenerateImports(g.file)
+		g.P()
+	}
+	g.P("// Reference imports to suppress errors if they are not otherwise used.")
+	g.P("var _ = ", g.Pkg["proto"], ".Marshal")
+	g.P("var _ = ", g.Pkg["fmt"], ".Errorf")
+	g.P("var _ = ", g.Pkg["math"], ".Inf")
+	g.P()
+}
+
+func (g *Generator) generateImported(id *ImportedDescriptor) {
+	df := id.o.File()
+	filename := *df.Name
+	if df.importPath == g.file.importPath {
+		// Don't generate type aliases for files in the same Go package as this one.
+		return
+	}
+	if !supportTypeAliases {
+		g.Fail(fmt.Sprintf("%s: public imports require at least go1.9", filename))
+	}
+	g.usedPackages[df.importPath] = true
+
+	for _, sym := range df.exported[id.o] {
+		sym.GenerateAlias(g, filename, g.GoPackageName(df.importPath))
+	}
+
+	g.P()
+}
+
+// Generate the enum definitions for this EnumDescriptor.
+func (g *Generator) generateEnum(enum *EnumDescriptor) {
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	ccPrefix := enum.prefix()
+
+	deprecatedEnum := ""
+	if enum.GetOptions().GetDeprecated() {
+		deprecatedEnum = deprecationComment
+	}
+	g.PrintComments(enum.path)
+	g.P("type ", Annotate(enum.file, enum.path, ccTypeName), " int32", deprecatedEnum)
+	g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()})
+	g.P("const (")
+	for i, e := range enum.Value {
+		etorPath := fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)
+		g.PrintComments(etorPath)
+
+		deprecatedValue := ""
+		if e.GetOptions().GetDeprecated() {
+			deprecatedValue = deprecationComment
+		}
+
+		name := ccPrefix + *e.Name
+		g.P(Annotate(enum.file, etorPath, name), " ", ccTypeName, " = ", e.Number, " ", deprecatedValue)
+		g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName})
+	}
+	g.P(")")
+	g.P()
+	g.P("var ", ccTypeName, "_name = map[int32]string{")
+	generated := make(map[int32]bool) // avoid duplicate values
+	for _, e := range enum.Value {
+		duplicate := ""
+		if _, present := generated[*e.Number]; present {
+			duplicate = "// Duplicate value: "
+		}
+		g.P(duplicate, e.Number, ": ", strconv.Quote(*e.Name), ",")
+		generated[*e.Number] = true
+	}
+	g.P("}")
+	g.P()
+	g.P("var ", ccTypeName, "_value = map[string]int32{")
+	for _, e := range enum.Value {
+		g.P(strconv.Quote(*e.Name), ": ", e.Number, ",")
+	}
+	g.P("}")
+	g.P()
+
+	if !enum.proto3() {
+		g.P("func (x ", ccTypeName, ") Enum() *", ccTypeName, " {")
+		g.P("p := new(", ccTypeName, ")")
+		g.P("*p = x")
+		g.P("return p")
+		g.P("}")
+		g.P()
+	}
+
+	g.P("func (x ", ccTypeName, ") String() string {")
+	g.P("return ", g.Pkg["proto"], ".EnumName(", ccTypeName, "_name, int32(x))")
+	g.P("}")
+	g.P()
+
+	if !enum.proto3() {
+		g.P("func (x *", ccTypeName, ") UnmarshalJSON(data []byte) error {")
+		g.P("value, err := ", g.Pkg["proto"], ".UnmarshalJSONEnum(", ccTypeName, `_value, data, "`, ccTypeName, `")`)
+		g.P("if err != nil {")
+		g.P("return err")
+		g.P("}")
+		g.P("*x = ", ccTypeName, "(value)")
+		g.P("return nil")
+		g.P("}")
+		g.P()
+	}
+
+	var indexes []string
+	for m := enum.parent; m != nil; m = m.parent {
+		// XXX: skip groups?
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	indexes = append(indexes, strconv.Itoa(enum.index))
+	g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	g.P()
+	if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" {
+		g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`)
+		g.P()
+	}
+
+	g.generateEnumRegistration(enum)
+}
+
+// The tag is a string like "varint,2,opt,name=fieldname,def=7" that
+// identifies details of the field for the protocol buffer marshaling and unmarshaling
+// code.  The fields are:
+//	wire encoding
+//	protocol tag number
+//	opt,req,rep for optional, required, or repeated
+//	packed whether the encoding is "packed" (optional; repeated primitives only)
+//	name= the original declared name
+//	enum= the name of the enum type if it is an enum-typed field.
+//	proto3 if this field is in a proto3 message
+//	def= string representation of the default value, if any.
+// The default value must be in a representation that can be used at run-time
+// to generate the default value. Thus bools become 0 and 1, for instance.
+func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptorProto, wiretype string) string {
+	optrepreq := ""
+	switch {
+	case isOptional(field):
+		optrepreq = "opt"
+	case isRequired(field):
+		optrepreq = "req"
+	case isRepeated(field):
+		optrepreq = "rep"
+	}
+	var defaultValue string
+	if dv := field.DefaultValue; dv != nil { // set means an explicit default
+		defaultValue = *dv
+		// Some types need tweaking.
+		switch *field.Type {
+		case descriptor.FieldDescriptorProto_TYPE_BOOL:
+			if defaultValue == "true" {
+				defaultValue = "1"
+			} else {
+				defaultValue = "0"
+			}
+		case descriptor.FieldDescriptorProto_TYPE_STRING,
+			descriptor.FieldDescriptorProto_TYPE_BYTES:
+			// Nothing to do. Quoting is done for the whole tag.
+		case descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// For enums we need to provide the integer constant.
+			obj := g.ObjectNamed(field.GetTypeName())
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// It is an enum that was publicly imported.
+				// We need the underlying type.
+				obj = id.o
+			}
+			enum, ok := obj.(*EnumDescriptor)
+			if !ok {
+				log.Printf("obj is a %T", obj)
+				if id, ok := obj.(*ImportedDescriptor); ok {
+					log.Printf("id.o is a %T", id.o)
+				}
+				g.Fail("unknown enum type", CamelCaseSlice(obj.TypeName()))
+			}
+			defaultValue = enum.integerValueAsString(defaultValue)
+		case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+				if f, err := strconv.ParseFloat(defaultValue, 32); err == nil {
+					defaultValue = fmt.Sprint(float32(f))
+				}
+			}
+		case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+			if def := defaultValue; def != "inf" && def != "-inf" && def != "nan" {
+				if f, err := strconv.ParseFloat(defaultValue, 64); err == nil {
+					defaultValue = fmt.Sprint(f)
+				}
+			}
+		}
+		defaultValue = ",def=" + defaultValue
+	}
+	enum := ""
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_ENUM {
+		// We avoid using obj.GoPackageName(), because we want to use the
+		// original (proto-world) package name.
+		obj := g.ObjectNamed(field.GetTypeName())
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			obj = id.o
+		}
+		enum = ",enum="
+		if pkg := obj.File().GetPackage(); pkg != "" {
+			enum += pkg + "."
+		}
+		enum += CamelCaseSlice(obj.TypeName())
+	}
+	packed := ""
+	if (field.Options != nil && field.Options.GetPacked()) ||
+		// Per https://developers.google.com/protocol-buffers/docs/proto3#simple:
+		// "In proto3, repeated fields of scalar numeric types use packed encoding by default."
+		(message.proto3() && (field.Options == nil || field.Options.Packed == nil) &&
+			isRepeated(field) && isScalar(field)) {
+		packed = ",packed"
+	}
+	fieldName := field.GetName()
+	name := fieldName
+	if *field.Type == descriptor.FieldDescriptorProto_TYPE_GROUP {
+		// We must use the type name for groups instead of
+		// the field name to preserve capitalization.
+		// type_name in FieldDescriptorProto is fully-qualified,
+		// but we only want the local part.
+		name = *field.TypeName
+		if i := strings.LastIndex(name, "."); i >= 0 {
+			name = name[i+1:]
+		}
+	}
+	if json := field.GetJsonName(); field.Extendee == nil && json != "" && json != name {
+		// TODO: escaping might be needed, in which case
+		// perhaps this should be in its own "json" tag.
+		name += ",json=" + json
+	}
+	name = ",name=" + name
+	if message.proto3() {
+		name += ",proto3"
+	}
+	oneof := ""
+	if field.OneofIndex != nil {
+		oneof = ",oneof"
+	}
+	return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s",
+		wiretype,
+		field.GetNumber(),
+		optrepreq,
+		packed,
+		name,
+		enum,
+		oneof,
+		defaultValue))
+}
+
+func needsStar(typ descriptor.FieldDescriptorProto_Type) bool {
+	switch typ {
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		return false
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return false
+	}
+	return true
+}
+
+// TypeName is the printed name appropriate for an item. If the object is in the current file,
+// TypeName drops the package name and underscores the rest.
+// Otherwise the object is from another package; and the result is the underscored
+// package name followed by the item name.
+// The result always has an initial capital.
+func (g *Generator) TypeName(obj Object) string {
+	return g.DefaultPackageName(obj) + CamelCaseSlice(obj.TypeName())
+}
+
+// GoType returns a string representing the type name, and the wire type
+func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescriptorProto) (typ string, wire string) {
+	// TODO: Options.
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+		typ, wire = "float64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FLOAT:
+		typ, wire = "float32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_INT64:
+		typ, wire = "int64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT64:
+		typ, wire = "uint64", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_INT32:
+		typ, wire = "int32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_UINT32:
+		typ, wire = "uint32", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED64:
+		typ, wire = "uint64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_FIXED32:
+		typ, wire = "uint32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		typ, wire = "bool", "varint"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		typ, wire = "string", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_GROUP:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "group"
+	case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = "*"+g.TypeName(desc), "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_BYTES:
+		typ, wire = "[]byte", "bytes"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		desc := g.ObjectNamed(field.GetTypeName())
+		typ, wire = g.TypeName(desc), "varint"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED32:
+		typ, wire = "int32", "fixed32"
+	case descriptor.FieldDescriptorProto_TYPE_SFIXED64:
+		typ, wire = "int64", "fixed64"
+	case descriptor.FieldDescriptorProto_TYPE_SINT32:
+		typ, wire = "int32", "zigzag32"
+	case descriptor.FieldDescriptorProto_TYPE_SINT64:
+		typ, wire = "int64", "zigzag64"
+	default:
+		g.Fail("unknown type for", field.GetName())
+	}
+	if isRepeated(field) {
+		typ = "[]" + typ
+	} else if message != nil && message.proto3() {
+		return
+	} else if field.OneofIndex != nil && message != nil {
+		return
+	} else if needsStar(*field.Type) {
+		typ = "*" + typ
+	}
+	return
+}
+
+func (g *Generator) RecordTypeUse(t string) {
+	if _, ok := g.typeNameToObject[t]; !ok {
+		return
+	}
+	importPath := g.ObjectNamed(t).GoImportPath()
+	if importPath == g.outputImportPath {
+		// Don't record use of objects in our package.
+		return
+	}
+	g.AddImport(importPath)
+	g.usedPackages[importPath] = true
+}
+
+// Method names that may be generated.  Fields with these names get an
+// underscore appended. Any change to this set is a potential incompatible
+// API change because it changes generated field names.
+var methodNames = [...]string{
+	"Reset",
+	"String",
+	"ProtoMessage",
+	"Marshal",
+	"Unmarshal",
+	"ExtensionRangeArray",
+	"ExtensionMap",
+	"Descriptor",
+}
+
+// Names of messages in the `google.protobuf` package for which
+// we will generate XXX_WellKnownType methods.
+var wellKnownTypes = map[string]bool{
+	"Any":       true,
+	"Duration":  true,
+	"Empty":     true,
+	"Struct":    true,
+	"Timestamp": true,
+
+	"Value":       true,
+	"ListValue":   true,
+	"DoubleValue": true,
+	"FloatValue":  true,
+	"Int64Value":  true,
+	"UInt64Value": true,
+	"Int32Value":  true,
+	"UInt32Value": true,
+	"BoolValue":   true,
+	"StringValue": true,
+	"BytesValue":  true,
+}
+
+// getterDefault finds the default value for the field to return from a getter,
+// regardless of if it's a built in default or explicit from the source. Returns e.g. "nil", `""`, "Default_MessageType_FieldName"
+func (g *Generator) getterDefault(field *descriptor.FieldDescriptorProto, goMessageType string) string {
+	if isRepeated(field) {
+		return "nil"
+	}
+	if def := field.GetDefaultValue(); def != "" {
+		defaultConstant := g.defaultConstantName(goMessageType, field.GetName())
+		if *field.Type != descriptor.FieldDescriptorProto_TYPE_BYTES {
+			return defaultConstant
+		}
+		return "append([]byte(nil), " + defaultConstant + "...)"
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_BOOL:
+		return "false"
+	case descriptor.FieldDescriptorProto_TYPE_STRING:
+		return `""`
+	case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE, descriptor.FieldDescriptorProto_TYPE_BYTES:
+		return "nil"
+	case descriptor.FieldDescriptorProto_TYPE_ENUM:
+		obj := g.ObjectNamed(field.GetTypeName())
+		var enum *EnumDescriptor
+		if id, ok := obj.(*ImportedDescriptor); ok {
+			// The enum type has been publicly imported.
+			enum, _ = id.o.(*EnumDescriptor)
+		} else {
+			enum, _ = obj.(*EnumDescriptor)
+		}
+		if enum == nil {
+			log.Printf("don't know how to generate getter for %s", field.GetName())
+			return "nil"
+		}
+		if len(enum.Value) == 0 {
+			return "0 // empty enum"
+		}
+		first := enum.Value[0].GetName()
+		return g.DefaultPackageName(obj) + enum.prefix() + first
+	default:
+		return "0"
+	}
+}
+
+// defaultConstantName builds the name of the default constant from the message
+// type name and the untouched field name, e.g. "Default_MessageType_FieldName"
+func (g *Generator) defaultConstantName(goMessageType, protoFieldName string) string {
+	return "Default_" + goMessageType + "_" + CamelCase(protoFieldName)
+}
+
+// The different types of fields in a message and how to actually print them
+// Most of the logic for generateMessage is in the methods of these types.
+//
+// Note that the content of the field is irrelevant, a simpleField can contain
+// anything from a scalar to a group (which is just a message).
+//
+// Extension fields (and message sets) are however handled separately.
+//
+// simpleField - a field that is neiter weak nor oneof, possibly repeated
+// oneofField - field containing list of subfields:
+// - oneofSubField - a field within the oneof
+
+// msgCtx contains the context for the generator functions.
+type msgCtx struct {
+	goName  string      // Go struct name of the message, e.g. MessageName
+	message *Descriptor // The descriptor for the message
+}
+
+// fieldCommon contains data common to all types of fields.
+type fieldCommon struct {
+	goName     string // Go name of field, e.g. "FieldName" or "Descriptor_"
+	protoName  string // Name of field in proto language, e.g. "field_name" or "descriptor"
+	getterName string // Name of the getter, e.g. "GetFieldName" or "GetDescriptor_"
+	goType     string // The Go type as a string, e.g. "*int32" or "*OtherMessage"
+	tags       string // The tag string/annotation for the type, e.g. `protobuf:"varint,8,opt,name=region_id,json=regionId"`
+	fullPath   string // The full path of the field as used by Annotate etc, e.g. "4,0,2,0"
+}
+
+// getProtoName gets the proto name of a field, e.g. "field_name" or "descriptor".
+func (f *fieldCommon) getProtoName() string {
+	return f.protoName
+}
+
+// getGoType returns the go type of the field  as a string, e.g. "*int32".
+func (f *fieldCommon) getGoType() string {
+	return f.goType
+}
+
+// simpleField is not weak, not a oneof, not an extension. Can be required, optional or repeated.
+type simpleField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	deprecated    string                               // Deprecation comment, if any, e.g. "// Deprecated: Do not use."
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+	comment       string                               // The full comment for the field, e.g. "// Useful information"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *simpleField) decl(g *Generator, mc *msgCtx) {
+	g.P(f.comment, Annotate(mc.message.file, f.fullPath, f.goName), "\t", f.goType, "\t`", f.tags, "`", f.deprecated)
+}
+
+// getter prints the getter for the field.
+func (f *simpleField) getter(g *Generator, mc *msgCtx) {
+	star := ""
+	tname := f.goType
+	if needsStar(f.protoType) && tname[0] == '*' {
+		tname = tname[1:]
+		star = "*"
+	}
+	if f.deprecated != "" {
+		g.P(f.deprecated)
+	}
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() "+tname+" {")
+	if f.getterDef == "nil" { // Simpler getter
+		g.P("if m != nil {")
+		g.P("return m." + f.goName)
+		g.P("}")
+		g.P("return nil")
+		g.P("}")
+		g.P()
+		return
+	}
+	if mc.message.proto3() {
+		g.P("if m != nil {")
+	} else {
+		g.P("if m != nil && m." + f.goName + " != nil {")
+	}
+	g.P("return " + star + "m." + f.goName)
+	g.P("}")
+	g.P("return ", f.getterDef)
+	g.P("}")
+	g.P()
+}
+
+// setter prints the setter method of the field.
+func (f *simpleField) setter(g *Generator, mc *msgCtx) {
+	// No setter for regular fields yet
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *simpleField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *simpleField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *simpleField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofSubFields are kept slize held by each oneofField. They do not appear in the top level slize of fields for the message.
+type oneofSubField struct {
+	fieldCommon
+	protoTypeName string                               // Proto type name, empty if primitive, e.g. ".google.protobuf.Duration"
+	protoType     descriptor.FieldDescriptorProto_Type // Actual type enum value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+	oneofTypeName string                               // Type name of the enclosing struct, e.g. "MessageName_FieldName"
+	fieldNumber   int                                  // Actual field number, as defined in proto, e.g. 12
+	getterDef     string                               // Default for getters, e.g. "nil", `""` or "Default_MessageType_FieldName"
+	protoDef      string                               // Default value as defined in the proto file, e.g "yoshi" or "5"
+	deprecated    string                               // Deprecation comment, if any.
+}
+
+// typedNil prints a nil casted to the pointer to this field.
+// - for XXX_OneofWrappers
+func (f *oneofSubField) typedNil(g *Generator) {
+	g.P("(*", f.oneofTypeName, ")(nil),")
+}
+
+// getProtoDef returns the default value explicitly stated in the proto file, e.g "yoshi" or "5".
+func (f *oneofSubField) getProtoDef() string {
+	return f.protoDef
+}
+
+// getProtoTypeName returns the protobuf type name for the field as returned by field.GetTypeName(), e.g. ".google.protobuf.Duration".
+func (f *oneofSubField) getProtoTypeName() string {
+	return f.protoTypeName
+}
+
+// getProtoType returns the *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64.
+func (f *oneofSubField) getProtoType() descriptor.FieldDescriptorProto_Type {
+	return f.protoType
+}
+
+// oneofField represents the oneof on top level.
+// The alternative fields within the oneof are represented by oneofSubField.
+type oneofField struct {
+	fieldCommon
+	subFields []*oneofSubField // All the possible oneof fields
+	comment   string           // The full comment for the field, e.g. "// Types that are valid to be assigned to MyOneof:\n\\"
+}
+
+// decl prints the declaration of the field in the struct (if any).
+func (f *oneofField) decl(g *Generator, mc *msgCtx) {
+	comment := f.comment
+	for _, sf := range f.subFields {
+		comment += "//\t*" + sf.oneofTypeName + "\n"
+	}
+	g.P(comment, Annotate(mc.message.file, f.fullPath, f.goName), " ", f.goType, " `", f.tags, "`")
+}
+
+// getter for a oneof field will print additional discriminators and interfaces for the oneof,
+// also it prints all the getters for the sub fields.
+func (f *oneofField) getter(g *Generator, mc *msgCtx) {
+	// The discriminator type
+	g.P("type ", f.goType, " interface {")
+	g.P(f.goType, "()")
+	g.P("}")
+	g.P()
+	// The subField types, fulfilling the discriminator type contract
+	for _, sf := range f.subFields {
+		g.P("type ", Annotate(mc.message.file, sf.fullPath, sf.oneofTypeName), " struct {")
+		g.P(Annotate(mc.message.file, sf.fullPath, sf.goName), " ", sf.goType, " `", sf.tags, "`")
+		g.P("}")
+		g.P()
+	}
+	for _, sf := range f.subFields {
+		g.P("func (*", sf.oneofTypeName, ") ", f.goType, "() {}")
+		g.P()
+	}
+	// Getter for the oneof field
+	g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, f.fullPath, f.getterName), "() ", f.goType, " {")
+	g.P("if m != nil { return m.", f.goName, " }")
+	g.P("return nil")
+	g.P("}")
+	g.P()
+	// Getters for each oneof
+	for _, sf := range f.subFields {
+		if sf.deprecated != "" {
+			g.P(sf.deprecated)
+		}
+		g.P("func (m *", mc.goName, ") ", Annotate(mc.message.file, sf.fullPath, sf.getterName), "() "+sf.goType+" {")
+		g.P("if x, ok := m.", f.getterName, "().(*", sf.oneofTypeName, "); ok {")
+		g.P("return x.", sf.goName)
+		g.P("}")
+		g.P("return ", sf.getterDef)
+		g.P("}")
+		g.P()
+	}
+}
+
+// setter prints the setter method of the field.
+func (f *oneofField) setter(g *Generator, mc *msgCtx) {
+	// No setters for oneof yet
+}
+
+// topLevelField interface implemented by all types of fields on the top level (not oneofSubField).
+type topLevelField interface {
+	decl(g *Generator, mc *msgCtx)   // print declaration within the struct
+	getter(g *Generator, mc *msgCtx) // print getter
+	setter(g *Generator, mc *msgCtx) // print setter if applicable
+}
+
+// defField interface implemented by all types of fields that can have defaults (not oneofField, but instead oneofSubField).
+type defField interface {
+	getProtoDef() string                                // default value explicitly stated in the proto file, e.g "yoshi" or "5"
+	getProtoName() string                               // proto name of a field, e.g. "field_name" or "descriptor"
+	getGoType() string                                  // go type of the field  as a string, e.g. "*int32"
+	getProtoTypeName() string                           // protobuf type name for the field, e.g. ".google.protobuf.Duration"
+	getProtoType() descriptor.FieldDescriptorProto_Type // *field.Type value, e.g. descriptor.FieldDescriptorProto_TYPE_FIXED64
+}
+
+// generateDefaultConstants adds constants for default values if needed, which is only if the default value is.
+// explicit in the proto.
+func (g *Generator) generateDefaultConstants(mc *msgCtx, topLevelFields []topLevelField) {
+	// Collect fields that can have defaults
+	dFields := []defField{}
+	for _, pf := range topLevelFields {
+		if f, ok := pf.(*oneofField); ok {
+			for _, osf := range f.subFields {
+				dFields = append(dFields, osf)
+			}
+			continue
+		}
+		dFields = append(dFields, pf.(defField))
+	}
+	for _, df := range dFields {
+		def := df.getProtoDef()
+		if def == "" {
+			continue
+		}
+		fieldname := g.defaultConstantName(mc.goName, df.getProtoName())
+		typename := df.getGoType()
+		if typename[0] == '*' {
+			typename = typename[1:]
+		}
+		kind := "const "
+		switch {
+		case typename == "bool":
+		case typename == "string":
+			def = strconv.Quote(def)
+		case typename == "[]byte":
+			def = "[]byte(" + strconv.Quote(unescape(def)) + ")"
+			kind = "var "
+		case def == "inf", def == "-inf", def == "nan":
+			// These names are known to, and defined by, the protocol language.
+			switch def {
+			case "inf":
+				def = "math.Inf(1)"
+			case "-inf":
+				def = "math.Inf(-1)"
+			case "nan":
+				def = "math.NaN()"
+			}
+			if df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT {
+				def = "float32(" + def + ")"
+			}
+			kind = "var "
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_FLOAT:
+			if f, err := strconv.ParseFloat(def, 32); err == nil {
+				def = fmt.Sprint(float32(f))
+			}
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_DOUBLE:
+			if f, err := strconv.ParseFloat(def, 64); err == nil {
+				def = fmt.Sprint(f)
+			}
+		case df.getProtoType() == descriptor.FieldDescriptorProto_TYPE_ENUM:
+			// Must be an enum.  Need to construct the prefixed name.
+			obj := g.ObjectNamed(df.getProtoTypeName())
+			var enum *EnumDescriptor
+			if id, ok := obj.(*ImportedDescriptor); ok {
+				// The enum type has been publicly imported.
+				enum, _ = id.o.(*EnumDescriptor)
+			} else {
+				enum, _ = obj.(*EnumDescriptor)
+			}
+			if enum == nil {
+				log.Printf("don't know how to generate constant for %s", fieldname)
+				continue
+			}
+			def = g.DefaultPackageName(obj) + enum.prefix() + def
+		}
+		g.P(kind, fieldname, " ", typename, " = ", def)
+		g.file.addExport(mc.message, constOrVarSymbol{fieldname, kind, ""})
+	}
+	g.P()
+}
+
+// generateInternalStructFields just adds the XXX_<something> fields to the message struct.
+func (g *Generator) generateInternalStructFields(mc *msgCtx, topLevelFields []topLevelField) {
+	g.P("XXX_NoUnkeyedLiteral\tstruct{} `json:\"-\"`") // prevent unkeyed struct literals
+	if len(mc.message.ExtensionRange) > 0 {
+		messageset := ""
+		if opts := mc.message.Options; opts != nil && opts.GetMessageSetWireFormat() {
+			messageset = "protobuf_messageset:\"1\" "
+		}
+		g.P(g.Pkg["proto"], ".XXX_InternalExtensions `", messageset, "json:\"-\"`")
+	}
+	g.P("XXX_unrecognized\t[]byte `json:\"-\"`")
+	g.P("XXX_sizecache\tint32 `json:\"-\"`")
+
+}
+
+// generateOneofFuncs adds all the utility functions for oneof, including marshalling, unmarshalling and sizer.
+func (g *Generator) generateOneofFuncs(mc *msgCtx, topLevelFields []topLevelField) {
+	ofields := []*oneofField{}
+	for _, f := range topLevelFields {
+		if o, ok := f.(*oneofField); ok {
+			ofields = append(ofields, o)
+		}
+	}
+	if len(ofields) == 0 {
+		return
+	}
+
+	// OneofFuncs
+	g.P("// XXX_OneofWrappers is for the internal use of the proto package.")
+	g.P("func (*", mc.goName, ") XXX_OneofWrappers() []interface{} {")
+	g.P("return []interface{}{")
+	for _, of := range ofields {
+		for _, sf := range of.subFields {
+			sf.typedNil(g)
+		}
+	}
+	g.P("}")
+	g.P("}")
+	g.P()
+}
+
+// generateMessageStruct adds the actual struct with it's members (but not methods) to the output.
+func (g *Generator) generateMessageStruct(mc *msgCtx, topLevelFields []topLevelField) {
+	comments := g.PrintComments(mc.message.path)
+
+	// Guarantee deprecation comments appear after user-provided comments.
+	if mc.message.GetOptions().GetDeprecated() {
+		if comments {
+			// Convention: Separate deprecation comments from original
+			// comments with an empty line.
+			g.P("//")
+		}
+		g.P(deprecationComment)
+	}
+
+	g.P("type ", Annotate(mc.message.file, mc.message.path, mc.goName), " struct {")
+	for _, pf := range topLevelFields {
+		pf.decl(g, mc)
+	}
+	g.generateInternalStructFields(mc, topLevelFields)
+	g.P("}")
+}
+
+// generateGetters adds getters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateGetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.getter(g, mc)
+	}
+}
+
+// generateSetters add setters for all fields, including oneofs and weak fields when applicable.
+func (g *Generator) generateSetters(mc *msgCtx, topLevelFields []topLevelField) {
+	for _, pf := range topLevelFields {
+		pf.setter(g, mc)
+	}
+}
+
+// generateCommonMethods adds methods to the message that are not on a per field basis.
+func (g *Generator) generateCommonMethods(mc *msgCtx) {
+	// Reset, String and ProtoMessage methods.
+	g.P("func (m *", mc.goName, ") Reset() { *m = ", mc.goName, "{} }")
+	g.P("func (m *", mc.goName, ") String() string { return ", g.Pkg["proto"], ".CompactTextString(m) }")
+	g.P("func (*", mc.goName, ") ProtoMessage() {}")
+	var indexes []string
+	for m := mc.message; m != nil; m = m.parent {
+		indexes = append([]string{strconv.Itoa(m.index)}, indexes...)
+	}
+	g.P("func (*", mc.goName, ") Descriptor() ([]byte, []int) {")
+	g.P("return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "}")
+	g.P("}")
+	g.P()
+	// TODO: Revisit the decision to use a XXX_WellKnownType method
+	// if we change proto.MessageName to work with multiple equivalents.
+	if mc.message.file.GetPackage() == "google.protobuf" && wellKnownTypes[mc.message.GetName()] {
+		g.P("func (*", mc.goName, `) XXX_WellKnownType() string { return "`, mc.message.GetName(), `" }`)
+		g.P()
+	}
+
+	// Extension support methods
+	if len(mc.message.ExtensionRange) > 0 {
+		g.P()
+		g.P("var extRange_", mc.goName, " = []", g.Pkg["proto"], ".ExtensionRange{")
+		for _, r := range mc.message.ExtensionRange {
+			end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends
+			g.P("{Start: ", r.Start, ", End: ", end, "},")
+		}
+		g.P("}")
+		g.P("func (*", mc.goName, ") ExtensionRangeArray() []", g.Pkg["proto"], ".ExtensionRange {")
+		g.P("return extRange_", mc.goName)
+		g.P("}")
+		g.P()
+	}
+
+	// TODO: It does not scale to keep adding another method for every
+	// operation on protos that we want to switch over to using the
+	// table-driven approach. Instead, we should only add a single method
+	// that allows getting access to the *InternalMessageInfo struct and then
+	// calling Unmarshal, Marshal, Merge, Size, and Discard directly on that.
+
+	// Wrapper for table-driven marshaling and unmarshaling.
+	g.P("func (m *", mc.goName, ") XXX_Unmarshal(b []byte) error {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Unmarshal(m, b)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {")
+	g.P("return xxx_messageInfo_", mc.goName, ".Marshal(b, m, deterministic)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Merge(src ", g.Pkg["proto"], ".Message) {")
+	g.P("xxx_messageInfo_", mc.goName, ".Merge(m, src)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_Size() int {") // avoid name clash with "Size" field in some message
+	g.P("return xxx_messageInfo_", mc.goName, ".Size(m)")
+	g.P("}")
+
+	g.P("func (m *", mc.goName, ") XXX_DiscardUnknown() {")
+	g.P("xxx_messageInfo_", mc.goName, ".DiscardUnknown(m)")
+	g.P("}")
+
+	g.P("var xxx_messageInfo_", mc.goName, " ", g.Pkg["proto"], ".InternalMessageInfo")
+	g.P()
+}
+
+// Generate the type, methods and default constant definitions for this Descriptor.
+func (g *Generator) generateMessage(message *Descriptor) {
+	topLevelFields := []topLevelField{}
+	oFields := make(map[int32]*oneofField)
+	// The full type name
+	typeName := message.TypeName()
+	// The full type name, CamelCased.
+	goTypeName := CamelCaseSlice(typeName)
+
+	usedNames := make(map[string]bool)
+	for _, n := range methodNames {
+		usedNames[n] = true
+	}
+
+	// allocNames finds a conflict-free variation of the given strings,
+	// consistently mutating their suffixes.
+	// It returns the same number of strings.
+	allocNames := func(ns ...string) []string {
+	Loop:
+		for {
+			for _, n := range ns {
+				if usedNames[n] {
+					for i := range ns {
+						ns[i] += "_"
+					}
+					continue Loop
+				}
+			}
+			for _, n := range ns {
+				usedNames[n] = true
+			}
+			return ns
+		}
+	}
+
+	mapFieldTypes := make(map[*descriptor.FieldDescriptorProto]string) // keep track of the map fields to be added later
+
+	// Build a structure more suitable for generating the text in one pass
+	for i, field := range message.Field {
+		// Allocate the getter and the field at the same time so name
+		// collisions create field/method consistent names.
+		// TODO: This allocation occurs based on the order of the fields
+		// in the proto file, meaning that a change in the field
+		// ordering can change generated Method/Field names.
+		base := CamelCase(*field.Name)
+		ns := allocNames(base, "Get"+base)
+		fieldName, fieldGetterName := ns[0], ns[1]
+		typename, wiretype := g.GoType(message, field)
+		jsonName := *field.Name
+		tag := fmt.Sprintf("protobuf:%s json:%q", g.goTag(message, field, wiretype), jsonName+",omitempty")
+
+		oneof := field.OneofIndex != nil
+		if oneof && oFields[*field.OneofIndex] == nil {
+			odp := message.OneofDecl[int(*field.OneofIndex)]
+			base := CamelCase(odp.GetName())
+			fname := allocNames(base)[0]
+
+			// This is the first field of a oneof we haven't seen before.
+			// Generate the union field.
+			oneofFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)
+			c, ok := g.makeComments(oneofFullPath)
+			if ok {
+				c += "\n//\n"
+			}
+			c += "// Types that are valid to be assigned to " + fname + ":\n"
+			// Generate the rest of this comment later,
+			// when we've computed any disambiguation.
+
+			dname := "is" + goTypeName + "_" + fname
+			tag := `protobuf_oneof:"` + odp.GetName() + `"`
+			of := oneofField{
+				fieldCommon: fieldCommon{
+					goName:     fname,
+					getterName: "Get"+fname,
+					goType:     dname,
+					tags:       tag,
+					protoName:  odp.GetName(),
+					fullPath:   oneofFullPath,
+				},
+				comment: c,
+			}
+			topLevelFields = append(topLevelFields, &of)
+			oFields[*field.OneofIndex] = &of
+		}
+
+		if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE {
+			desc := g.ObjectNamed(field.GetTypeName())
+			if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() {
+				// Figure out the Go types and tags for the key and value types.
+				keyField, valField := d.Field[0], d.Field[1]
+				keyType, keyWire := g.GoType(d, keyField)
+				valType, valWire := g.GoType(d, valField)
+				keyTag, valTag := g.goTag(d, keyField, keyWire), g.goTag(d, valField, valWire)
+
+				// We don't use stars, except for message-typed values.
+				// Message and enum types are the only two possibly foreign types used in maps,
+				// so record their use. They are not permitted as map keys.
+				keyType = strings.TrimPrefix(keyType, "*")
+				switch *valField.Type {
+				case descriptor.FieldDescriptorProto_TYPE_ENUM:
+					valType = strings.TrimPrefix(valType, "*")
+					g.RecordTypeUse(valField.GetTypeName())
+				case descriptor.FieldDescriptorProto_TYPE_MESSAGE:
+					g.RecordTypeUse(valField.GetTypeName())
+				default:
+					valType = strings.TrimPrefix(valType, "*")
+				}
+
+				typename = fmt.Sprintf("map[%s]%s", keyType, valType)
+				mapFieldTypes[field] = typename // record for the getter generation
+
+				tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", keyTag, valTag)
+			}
+		}
+
+		fieldDeprecated := ""
+		if field.GetOptions().GetDeprecated() {
+			fieldDeprecated = deprecationComment
+		}
+
+		dvalue := g.getterDefault(field, goTypeName)
+		if oneof {
+			tname := goTypeName + "_" + fieldName
+			// It is possible for this to collide with a message or enum
+			// nested in this message. Check for collisions.
+			for {
+				ok := true
+				for _, desc := range message.nested {
+					if CamelCaseSlice(desc.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				for _, enum := range message.enums {
+					if CamelCaseSlice(enum.TypeName()) == tname {
+						ok = false
+						break
+					}
+				}
+				if !ok {
+					tname += "_"
+					continue
+				}
+				break
+			}
+
+			oneofField := oFields[*field.OneofIndex]
+			tag := "protobuf:" + g.goTag(message, field, wiretype)
+			sf := oneofSubField{
+				fieldCommon: fieldCommon{
+					goName:     fieldName,
+					getterName: fieldGetterName,
+					goType:     typename,
+					tags:       tag,
+					protoName:  field.GetName(),
+					fullPath:   fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i),
+				},
+				protoTypeName: field.GetTypeName(),
+				fieldNumber:   int(*field.Number),
+				protoType:     *field.Type,
+				getterDef:     dvalue,
+				protoDef:      field.GetDefaultValue(),
+				oneofTypeName: tname,
+				deprecated:    fieldDeprecated,
+			}
+			oneofField.subFields = append(oneofField.subFields, &sf)
+			g.RecordTypeUse(field.GetTypeName())
+			continue
+		}
+
+		fieldFullPath := fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)
+		c, ok := g.makeComments(fieldFullPath)
+		if ok {
+			c += "\n"
+		}
+		rf := simpleField{
+			fieldCommon: fieldCommon{
+				goName:     fieldName,
+				getterName: fieldGetterName,
+				goType:     typename,
+				tags:       tag,
+				protoName:  field.GetName(),
+				fullPath:   fieldFullPath,
+			},
+			protoTypeName: field.GetTypeName(),
+			protoType:     *field.Type,
+			deprecated:    fieldDeprecated,
+			getterDef:     dvalue,
+			protoDef:      field.GetDefaultValue(),
+			comment:       c,
+		}
+		var pf topLevelField = &rf
+
+		topLevelFields = append(topLevelFields, pf)
+		g.RecordTypeUse(field.GetTypeName())
+	}
+
+	mc := &msgCtx{
+		goName:  goTypeName,
+		message: message,
+	}
+
+	g.generateMessageStruct(mc, topLevelFields)
+	g.P()
+	g.generateCommonMethods(mc)
+	g.P()
+	g.generateDefaultConstants(mc, topLevelFields)
+	g.P()
+	g.generateGetters(mc, topLevelFields)
+	g.P()
+	g.generateSetters(mc, topLevelFields)
+	g.P()
+	g.generateOneofFuncs(mc, topLevelFields)
+	g.P()
+
+	var oneofTypes []string
+	for _, f := range topLevelFields {
+		if of, ok := f.(*oneofField); ok {
+			for _, osf := range of.subFields {
+				oneofTypes = append(oneofTypes, osf.oneofTypeName)
+			}
+		}
+	}
+
+	opts := message.Options
+	ms := &messageSymbol{
+		sym:           goTypeName,
+		hasExtensions: len(message.ExtensionRange) > 0,
+		isMessageSet:  opts != nil && opts.GetMessageSetWireFormat(),
+		oneofTypes:    oneofTypes,
+	}
+	g.file.addExport(message, ms)
+
+	for _, ext := range message.ext {
+		g.generateExtension(ext)
+	}
+
+	fullName := strings.Join(message.TypeName(), ".")
+	if g.file.Package != nil {
+		fullName = *g.file.Package + "." + fullName
+	}
+
+	g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], goTypeName, fullName)
+	// Register types for native map types.
+	for _, k := range mapFieldKeys(mapFieldTypes) {
+		fullName := strings.TrimPrefix(*k.TypeName, ".")
+		g.addInitf("%s.RegisterMapType((%s)(nil), %q)", g.Pkg["proto"], mapFieldTypes[k], fullName)
+	}
+
+}
+
+type byTypeName []*descriptor.FieldDescriptorProto
+
+func (a byTypeName) Len() int           { return len(a) }
+func (a byTypeName) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTypeName) Less(i, j int) bool { return *a[i].TypeName < *a[j].TypeName }
+
+// mapFieldKeys returns the keys of m in a consistent order.
+func mapFieldKeys(m map[*descriptor.FieldDescriptorProto]string) []*descriptor.FieldDescriptorProto {
+	keys := make([]*descriptor.FieldDescriptorProto, 0, len(m))
+	for k := range m {
+		keys = append(keys, k)
+	}
+	sort.Sort(byTypeName(keys))
+	return keys
+}
+
+var escapeChars = [256]byte{
+	'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?',
+}
+
+// unescape reverses the "C" escaping that protoc does for default values of bytes fields.
+// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape
+// sequences are conveyed, unmodified, into the decoded result.
+func unescape(s string) string {
+	// NB: Sadly, we can't use strconv.Unquote because protoc will escape both
+	// single and double quotes, but strconv.Unquote only allows one or the
+	// other (based on actual surrounding quotes of its input argument).
+
+	var out []byte
+	for len(s) > 0 {
+		// regular character, or too short to be valid escape
+		if s[0] != '\\' || len(s) < 2 {
+			out = append(out, s[0])
+			s = s[1:]
+		} else if c := escapeChars[s[1]]; c != 0 {
+			// escape sequence
+			out = append(out, c)
+			s = s[2:]
+		} else if s[1] == 'x' || s[1] == 'X' {
+			// hex escape, e.g. "\x80
+			if len(s) < 4 {
+				// too short to be valid
+				out = append(out, s[:2]...)
+				s = s[2:]
+				continue
+			}
+			v, err := strconv.ParseUint(s[2:4], 16, 8)
+			if err != nil {
+				out = append(out, s[:4]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[4:]
+		} else if '0' <= s[1] && s[1] <= '7' {
+			// octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164"
+			// so consume up to 2 more bytes or up to end-of-string
+			n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567"))
+			if n > 3 {
+				n = 3
+			}
+			v, err := strconv.ParseUint(s[1:1+n], 8, 8)
+			if err != nil {
+				out = append(out, s[:1+n]...)
+			} else {
+				out = append(out, byte(v))
+			}
+			s = s[1+n:]
+		} else {
+			// bad escape, just propagate the slash as-is
+			out = append(out, s[0])
+			s = s[1:]
+		}
+	}
+
+	return string(out)
+}
+
+func (g *Generator) generateExtension(ext *ExtensionDescriptor) {
+	ccTypeName := ext.DescName()
+
+	extObj := g.ObjectNamed(*ext.Extendee)
+	var extDesc *Descriptor
+	if id, ok := extObj.(*ImportedDescriptor); ok {
+		// This is extending a publicly imported message.
+		// We need the underlying type for goTag.
+		extDesc = id.o.(*Descriptor)
+	} else {
+		extDesc = extObj.(*Descriptor)
+	}
+	extendedType := "*" + g.TypeName(extObj) // always use the original
+	field := ext.FieldDescriptorProto
+	fieldType, wireType := g.GoType(ext.parent, field)
+	tag := g.goTag(extDesc, field, wireType)
+	g.RecordTypeUse(*ext.Extendee)
+	if n := ext.FieldDescriptorProto.TypeName; n != nil {
+		// foreign extension type
+		g.RecordTypeUse(*n)
+	}
+
+	typeName := ext.TypeName()
+
+	// Special case for proto2 message sets: If this extension is extending
+	// proto2.bridge.MessageSet, and its final name component is "message_set_extension",
+	// then drop that last component.
+	//
+	// TODO: This should be implemented in the text formatter rather than the generator.
+	// In addition, the situation for when to apply this special case is implemented
+	// differently in other languages:
+	// https://github.com/google/protobuf/blob/aff10976/src/google/protobuf/text_format.cc#L1560
+	if extDesc.GetOptions().GetMessageSetWireFormat() && typeName[len(typeName)-1] == "message_set_extension" {
+		typeName = typeName[:len(typeName)-1]
+	}
+
+	// For text formatting, the package must be exactly what the .proto file declares,
+	// ignoring overrides such as the go_package option, and with no dot/underscore mapping.
+	extName := strings.Join(typeName, ".")
+	if g.file.Package != nil {
+		extName = *g.file.Package + "." + extName
+	}
+
+	g.P("var ", ccTypeName, " = &", g.Pkg["proto"], ".ExtensionDesc{")
+	g.P("ExtendedType: (", extendedType, ")(nil),")
+	g.P("ExtensionType: (", fieldType, ")(nil),")
+	g.P("Field: ", field.Number, ",")
+	g.P(`Name: "`, extName, `",`)
+	g.P("Tag: ", tag, ",")
+	g.P(`Filename: "`, g.file.GetName(), `",`)
+
+	g.P("}")
+	g.P()
+
+	g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName())
+
+	g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""})
+}
+
+func (g *Generator) generateInitFunction() {
+	if len(g.init) == 0 {
+		return
+	}
+	g.P("func init() {")
+	for _, l := range g.init {
+		g.P(l)
+	}
+	g.P("}")
+	g.init = nil
+}
+
+func (g *Generator) generateFileDescriptor(file *FileDescriptor) {
+	// Make a copy and trim source_code_info data.
+	// TODO: Trim this more when we know exactly what we need.
+	pb := proto.Clone(file.FileDescriptorProto).(*descriptor.FileDescriptorProto)
+	pb.SourceCodeInfo = nil
+
+	b, err := proto.Marshal(pb)
+	if err != nil {
+		g.Fail(err.Error())
+	}
+
+	var buf bytes.Buffer
+	w, _ := gzip.NewWriterLevel(&buf, gzip.BestCompression)
+	w.Write(b)
+	w.Close()
+	b = buf.Bytes()
+
+	v := file.VarName()
+	g.P()
+	g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }")
+	g.P("var ", v, " = []byte{")
+	g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto")
+	for len(b) > 0 {
+		n := 16
+		if n > len(b) {
+			n = len(b)
+		}
+
+		s := ""
+		for _, c := range b[:n] {
+			s += fmt.Sprintf("0x%02x,", c)
+		}
+		g.P(s)
+
+		b = b[n:]
+	}
+	g.P("}")
+}
+
+func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) {
+	// // We always print the full (proto-world) package name here.
+	pkg := enum.File().GetPackage()
+	if pkg != "" {
+		pkg += "."
+	}
+	// The full type name
+	typeName := enum.TypeName()
+	// The full type name, CamelCased.
+	ccTypeName := CamelCaseSlice(typeName)
+	g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName)
+}
+
+// And now lots of helper functions.
+
+// Is c an ASCII lower-case letter?
+func isASCIILower(c byte) bool {
+	return 'a' <= c && c <= 'z'
+}
+
+// Is c an ASCII digit?
+func isASCIIDigit(c byte) bool {
+	return '0' <= c && c <= '9'
+}
+
+// CamelCase returns the CamelCased name.
+// If there is an interior underscore followed by a lower case letter,
+// drop the underscore and convert the letter to upper case.
+// There is a remote possibility of this rewrite causing a name collision,
+// but it's so remote we're prepared to pretend it's nonexistent - since the
+// C++ generator lowercases names, it's extremely unlikely to have two fields
+// with different capitalizations.
+// In short, _my_field_name_2 becomes XMyFieldName_2.
+func CamelCase(s string) string {
+	if s == "" {
+		return ""
+	}
+	t := make([]byte, 0, 32)
+	i := 0
+	if s[0] == '_' {
+		// Need a capital letter; drop the '_'.
+		t = append(t, 'X')
+		i++
+	}
+	// Invariant: if the next letter is lower case, it must be converted
+	// to upper case.
+	// That is, we process a word at a time, where words are marked by _ or
+	// upper case letter. Digits are treated as words.
+	for ; i < len(s); i++ {
+		c := s[i]
+		if c == '_' && i+1 < len(s) && isASCIILower(s[i+1]) {
+			continue // Skip the underscore in s.
+		}
+		if isASCIIDigit(c) {
+			t = append(t, c)
+			continue
+		}
+		// Assume we have a letter now - if not, it's a bogus identifier.
+		// The next word is a sequence of characters that must start upper case.
+		if isASCIILower(c) {
+			c ^= ' ' // Make it a capital letter.
+		}
+		t = append(t, c) // Guaranteed not lower case.
+		// Accept lower case sequence that follows.
+		for i+1 < len(s) && isASCIILower(s[i+1]) {
+			i++
+			t = append(t, s[i])
+		}
+	}
+	return string(t)
+}
+
+// CamelCaseSlice is like CamelCase, but the argument is a slice of strings to
+// be joined with "_".
+func CamelCaseSlice(elem []string) string { return CamelCase(strings.Join(elem, "_")) }
+
+// dottedSlice turns a sliced name into a dotted name.
+func dottedSlice(elem []string) string { return strings.Join(elem, ".") }
+
+// Is this field optional?
+func isOptional(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+// Is this field required?
+func isRequired(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REQUIRED
+}
+
+// Is this field repeated?
+func isRepeated(field *descriptor.FieldDescriptorProto) bool {
+	return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED
+}
+
+// Is this field a scalar numeric type?
+func isScalar(field *descriptor.FieldDescriptorProto) bool {
+	if field.Type == nil {
+		return false
+	}
+	switch *field.Type {
+	case descriptor.FieldDescriptorProto_TYPE_DOUBLE,
+		descriptor.FieldDescriptorProto_TYPE_FLOAT,
+		descriptor.FieldDescriptorProto_TYPE_INT64,
+		descriptor.FieldDescriptorProto_TYPE_UINT64,
+		descriptor.FieldDescriptorProto_TYPE_INT32,
+		descriptor.FieldDescriptorProto_TYPE_FIXED64,
+		descriptor.FieldDescriptorProto_TYPE_FIXED32,
+		descriptor.FieldDescriptorProto_TYPE_BOOL,
+		descriptor.FieldDescriptorProto_TYPE_UINT32,
+		descriptor.FieldDescriptorProto_TYPE_ENUM,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED32,
+		descriptor.FieldDescriptorProto_TYPE_SFIXED64,
+		descriptor.FieldDescriptorProto_TYPE_SINT32,
+		descriptor.FieldDescriptorProto_TYPE_SINT64:
+		return true
+	default:
+		return false
+	}
+}
+
+// badToUnderscore is the mapping function used to generate Go names from package names,
+// which can be dotted in the input .proto file.  It replaces non-identifier characters such as
+// dot or dash with underscore.
+func badToUnderscore(r rune) rune {
+	if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
+		return r
+	}
+	return '_'
+}
+
+// baseName returns the last path element of the name, with the last dotted suffix removed.
+func baseName(name string) string {
+	// First, find the last element
+	if i := strings.LastIndex(name, "/"); i >= 0 {
+		name = name[i+1:]
+	}
+	// Now drop the suffix
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		name = name[0:i]
+	}
+	return name
+}
+
+// The SourceCodeInfo message describes the location of elements of a parsed
+// .proto file by way of a "path", which is a sequence of integers that
+// describe the route from a FileDescriptorProto to the relevant submessage.
+// The path alternates between a field number of a repeated field, and an index
+// into that repeated field. The constants below define the field numbers that
+// are used.
+//
+// See descriptor.proto for more information about this.
+const (
+	// tag numbers in FileDescriptorProto
+	packagePath = 2 // package
+	messagePath = 4 // message_type
+	enumPath    = 5 // enum_type
+	// tag numbers in DescriptorProto
+	messageFieldPath   = 2 // field
+	messageMessagePath = 3 // nested_type
+	messageEnumPath    = 4 // enum_type
+	messageOneofPath   = 8 // oneof_decl
+	// tag numbers in EnumDescriptorProto
+	enumValuePath = 2 // value
+)
+
+var supportTypeAliases bool
+
+func init() {
+	for _, tag := range build.Default.ReleaseTags {
+		if tag == "go1.9" {
+			supportTypeAliases = true
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
new file mode 100644
index 0000000..a9b6103
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/generator/internal/remap/remap.go
@@ -0,0 +1,117 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package remap handles tracking the locations of Go tokens in a source text
+across a rewrite by the Go formatter.
+*/
+package remap
+
+import (
+	"fmt"
+	"go/scanner"
+	"go/token"
+)
+
+// A Location represents a span of byte offsets in the source text.
+type Location struct {
+	Pos, End int // End is exclusive
+}
+
+// A Map represents a mapping between token locations in an input source text
+// and locations in the correspnding output text.
+type Map map[Location]Location
+
+// Find reports whether the specified span is recorded by m, and if so returns
+// the new location it was mapped to. If the input span was not found, the
+// returned location is the same as the input.
+func (m Map) Find(pos, end int) (Location, bool) {
+	key := Location{
+		Pos: pos,
+		End: end,
+	}
+	if loc, ok := m[key]; ok {
+		return loc, true
+	}
+	return key, false
+}
+
+func (m Map) add(opos, oend, npos, nend int) {
+	m[Location{Pos: opos, End: oend}] = Location{Pos: npos, End: nend}
+}
+
+// Compute constructs a location mapping from input to output.  An error is
+// reported if any of the tokens of output cannot be mapped.
+func Compute(input, output []byte) (Map, error) {
+	itok := tokenize(input)
+	otok := tokenize(output)
+	if len(itok) != len(otok) {
+		return nil, fmt.Errorf("wrong number of tokens, %d ≠ %d", len(itok), len(otok))
+	}
+	m := make(Map)
+	for i, ti := range itok {
+		to := otok[i]
+		if ti.Token != to.Token {
+			return nil, fmt.Errorf("token %d type mismatch: %s ≠ %s", i+1, ti, to)
+		}
+		m.add(ti.pos, ti.end, to.pos, to.end)
+	}
+	return m, nil
+}
+
+// tokinfo records the span and type of a source token.
+type tokinfo struct {
+	pos, end int
+	token.Token
+}
+
+func tokenize(src []byte) []tokinfo {
+	fs := token.NewFileSet()
+	var s scanner.Scanner
+	s.Init(fs.AddFile("src", fs.Base(), len(src)), src, nil, scanner.ScanComments)
+	var info []tokinfo
+	for {
+		pos, next, lit := s.Scan()
+		switch next {
+		case token.SEMICOLON:
+			continue
+		}
+		info = append(info, tokinfo{
+			pos:   int(pos - 1),
+			end:   int(pos + token.Pos(len(lit)) - 1),
+			Token: next,
+		})
+		if next == token.EOF {
+			break
+		}
+	}
+	return info
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
new file mode 100644
index 0000000..61bfc10
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.go
@@ -0,0 +1,369 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/compiler/plugin.proto
+
+/*
+Package plugin_go is a generated protocol buffer package.
+
+It is generated from these files:
+	google/protobuf/compiler/plugin.proto
+
+It has these top-level messages:
+	Version
+	CodeGeneratorRequest
+	CodeGeneratorResponse
+*/
+package plugin_go
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// The version number of protocol compiler.
+type Version struct {
+	Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"`
+	Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"`
+	Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"`
+	// A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+	// be empty for mainline stable releases.
+	Suffix               *string  `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Version) Reset()                    { *m = Version{} }
+func (m *Version) String() string            { return proto.CompactTextString(m) }
+func (*Version) ProtoMessage()               {}
+func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+func (m *Version) Unmarshal(b []byte) error {
+	return xxx_messageInfo_Version.Unmarshal(m, b)
+}
+func (m *Version) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Version.Marshal(b, m, deterministic)
+}
+func (dst *Version) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Version.Merge(dst, src)
+}
+func (m *Version) XXX_Size() int {
+	return xxx_messageInfo_Version.Size(m)
+}
+func (m *Version) XXX_DiscardUnknown() {
+	xxx_messageInfo_Version.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Version proto.InternalMessageInfo
+
+func (m *Version) GetMajor() int32 {
+	if m != nil && m.Major != nil {
+		return *m.Major
+	}
+	return 0
+}
+
+func (m *Version) GetMinor() int32 {
+	if m != nil && m.Minor != nil {
+		return *m.Minor
+	}
+	return 0
+}
+
+func (m *Version) GetPatch() int32 {
+	if m != nil && m.Patch != nil {
+		return *m.Patch
+	}
+	return 0
+}
+
+func (m *Version) GetSuffix() string {
+	if m != nil && m.Suffix != nil {
+		return *m.Suffix
+	}
+	return ""
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+type CodeGeneratorRequest struct {
+	// The .proto files that were explicitly listed on the command-line.  The
+	// code generator should generate code only for these files.  Each file's
+	// descriptor will be included in proto_file, below.
+	FileToGenerate []string `protobuf:"bytes,1,rep,name=file_to_generate,json=fileToGenerate" json:"file_to_generate,omitempty"`
+	// The generator parameter passed on the command-line.
+	Parameter *string `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	// FileDescriptorProtos for all files in files_to_generate and everything
+	// they import.  The files will appear in topological order, so each file
+	// appears before any file that imports it.
+	//
+	// protoc guarantees that all proto_files will be written after
+	// the fields above, even though this is not technically guaranteed by the
+	// protobuf wire format.  This theoretically could allow a plugin to stream
+	// in the FileDescriptorProtos and handle them one by one rather than read
+	// the entire set into memory at once.  However, as of this writing, this
+	// is not similarly optimized on protoc's end -- it will store all fields in
+	// memory at once before sending them to the plugin.
+	//
+	// Type names of fields and extensions in the FileDescriptorProto are always
+	// fully qualified.
+	ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"`
+	// The version number of protocol compiler.
+	CompilerVersion      *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorRequest) Reset()                    { *m = CodeGeneratorRequest{} }
+func (m *CodeGeneratorRequest) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorRequest) ProtoMessage()               {}
+func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+func (m *CodeGeneratorRequest) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorRequest.Unmarshal(m, b)
+}
+func (m *CodeGeneratorRequest) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorRequest.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorRequest.Merge(dst, src)
+}
+func (m *CodeGeneratorRequest) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorRequest.Size(m)
+}
+func (m *CodeGeneratorRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorRequest proto.InternalMessageInfo
+
+func (m *CodeGeneratorRequest) GetFileToGenerate() []string {
+	if m != nil {
+		return m.FileToGenerate
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetParameter() string {
+	if m != nil && m.Parameter != nil {
+		return *m.Parameter
+	}
+	return ""
+}
+
+func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorProto {
+	if m != nil {
+		return m.ProtoFile
+	}
+	return nil
+}
+
+func (m *CodeGeneratorRequest) GetCompilerVersion() *Version {
+	if m != nil {
+		return m.CompilerVersion
+	}
+	return nil
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+type CodeGeneratorResponse struct {
+	// Error message.  If non-empty, code generation failed.  The plugin process
+	// should exit with status code zero even if it reports an error in this way.
+	//
+	// This should be used to indicate errors in .proto files which prevent the
+	// code generator from generating correct code.  Errors which indicate a
+	// problem in protoc itself -- such as the input CodeGeneratorRequest being
+	// unparseable -- should be reported by writing a message to stderr and
+	// exiting with a non-zero status code.
+	Error                *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File                 []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                      `json:"-"`
+	XXX_unrecognized     []byte                        `json:"-"`
+	XXX_sizecache        int32                         `json:"-"`
+}
+
+func (m *CodeGeneratorResponse) Reset()                    { *m = CodeGeneratorResponse{} }
+func (m *CodeGeneratorResponse) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse) ProtoMessage()               {}
+func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+func (m *CodeGeneratorResponse) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse.Size(m)
+}
+func (m *CodeGeneratorResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse) GetError() string {
+	if m != nil && m.Error != nil {
+		return *m.Error
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse) GetFile() []*CodeGeneratorResponse_File {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Represents a single generated file.
+type CodeGeneratorResponse_File struct {
+	// The file name, relative to the output directory.  The name must not
+	// contain "." or ".." components and must be relative, not be absolute (so,
+	// the file cannot lie outside the output directory).  "/" must be used as
+	// the path separator, not "\".
+	//
+	// If the name is omitted, the content will be appended to the previous
+	// file.  This allows the generator to break large files into small chunks,
+	// and allows the generated text to be streamed back to protoc so that large
+	// files need not reside completely in memory at one time.  Note that as of
+	// this writing protoc does not optimize for this -- it will read the entire
+	// CodeGeneratorResponse before writing files to disk.
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// If non-empty, indicates that the named file should already exist, and the
+	// content here is to be inserted into that file at a defined insertion
+	// point.  This feature allows a code generator to extend the output
+	// produced by another code generator.  The original generator may provide
+	// insertion points by placing special annotations in the file that look
+	// like:
+	//   @@protoc_insertion_point(NAME)
+	// The annotation can have arbitrary text before and after it on the line,
+	// which allows it to be placed in a comment.  NAME should be replaced with
+	// an identifier naming the point -- this is what other generators will use
+	// as the insertion_point.  Code inserted at this point will be placed
+	// immediately above the line containing the insertion point (thus multiple
+	// insertions to the same point will come out in the order they were added).
+	// The double-@ is intended to make it unlikely that the generated code
+	// could contain things that look like insertion points by accident.
+	//
+	// For example, the C++ code generator places the following line in the
+	// .pb.h files that it generates:
+	//   // @@protoc_insertion_point(namespace_scope)
+	// This line appears within the scope of the file's package namespace, but
+	// outside of any particular class.  Another plugin can then specify the
+	// insertion_point "namespace_scope" to generate additional classes or
+	// other declarations that should be placed in this scope.
+	//
+	// Note that if the line containing the insertion point begins with
+	// whitespace, the same whitespace will be added to every line of the
+	// inserted text.  This is useful for languages like Python, where
+	// indentation matters.  In these languages, the insertion point comment
+	// should be indented the same amount as any inserted code will need to be
+	// in order to work correctly in that context.
+	//
+	// The code generator that generates the initial file and the one which
+	// inserts into it must both run as part of a single invocation of protoc.
+	// Code generators are executed in the order in which they appear on the
+	// command line.
+	//
+	// If |insertion_point| is present, |name| must also be present.
+	InsertionPoint *string `protobuf:"bytes,2,opt,name=insertion_point,json=insertionPoint" json:"insertion_point,omitempty"`
+	// The file contents.
+	Content              *string  `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CodeGeneratorResponse_File) Reset()                    { *m = CodeGeneratorResponse_File{} }
+func (m *CodeGeneratorResponse_File) String() string            { return proto.CompactTextString(m) }
+func (*CodeGeneratorResponse_File) ProtoMessage()               {}
+func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} }
+func (m *CodeGeneratorResponse_File) Unmarshal(b []byte) error {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Unmarshal(m, b)
+}
+func (m *CodeGeneratorResponse_File) Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Marshal(b, m, deterministic)
+}
+func (dst *CodeGeneratorResponse_File) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CodeGeneratorResponse_File.Merge(dst, src)
+}
+func (m *CodeGeneratorResponse_File) XXX_Size() int {
+	return xxx_messageInfo_CodeGeneratorResponse_File.Size(m)
+}
+func (m *CodeGeneratorResponse_File) XXX_DiscardUnknown() {
+	xxx_messageInfo_CodeGeneratorResponse_File.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CodeGeneratorResponse_File proto.InternalMessageInfo
+
+func (m *CodeGeneratorResponse_File) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if m != nil && m.InsertionPoint != nil {
+		return *m.InsertionPoint
+	}
+	return ""
+}
+
+func (m *CodeGeneratorResponse_File) GetContent() string {
+	if m != nil && m.Content != nil {
+		return *m.Content
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version")
+	proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest")
+	proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse")
+	proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File")
+}
+
+func init() { proto.RegisterFile("google/protobuf/compiler/plugin.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcf, 0x6a, 0x14, 0x41,
+	0x10, 0xc6, 0x19, 0x77, 0x63, 0x98, 0x8a, 0x64, 0x43, 0x13, 0xa5, 0x09, 0x39, 0x8c, 0x8b, 0xe2,
+	0x5c, 0x32, 0x0b, 0xc1, 0x8b, 0x78, 0x4b, 0x44, 0x3d, 0x78, 0x58, 0x1a, 0xf1, 0x20, 0xc8, 0x30,
+	0x99, 0xd4, 0x74, 0x5a, 0x66, 0xba, 0xc6, 0xee, 0x1e, 0xf1, 0x49, 0x7d, 0x0f, 0xdf, 0x40, 0xfa,
+	0xcf, 0x24, 0xb2, 0xb8, 0xa7, 0xee, 0xef, 0x57, 0xd5, 0xd5, 0x55, 0x1f, 0x05, 0x2f, 0x25, 0x91,
+	0xec, 0x71, 0x33, 0x1a, 0x72, 0x74, 0x33, 0x75, 0x9b, 0x96, 0x86, 0x51, 0xf5, 0x68, 0x36, 0x63,
+	0x3f, 0x49, 0xa5, 0xab, 0x10, 0x60, 0x3c, 0xa6, 0x55, 0x73, 0x5a, 0x35, 0xa7, 0x9d, 0x15, 0xbb,
+	0x05, 0x6e, 0xd1, 0xb6, 0x46, 0x8d, 0x8e, 0x4c, 0xcc, 0x5e, 0xb7, 0x70, 0xf8, 0x05, 0x8d, 0x55,
+	0xa4, 0xd9, 0x29, 0x1c, 0x0c, 0xcd, 0x77, 0x32, 0x3c, 0x2b, 0xb2, 0xf2, 0x40, 0x44, 0x11, 0xa8,
+	0xd2, 0x64, 0xf8, 0xa3, 0x44, 0xbd, 0xf0, 0x74, 0x6c, 0x5c, 0x7b, 0xc7, 0x17, 0x91, 0x06, 0xc1,
+	0x9e, 0xc1, 0x63, 0x3b, 0x75, 0x9d, 0xfa, 0xc5, 0x97, 0x45, 0x56, 0xe6, 0x22, 0xa9, 0xf5, 0x9f,
+	0x0c, 0x4e, 0xaf, 0xe9, 0x16, 0x3f, 0xa0, 0x46, 0xd3, 0x38, 0x32, 0x02, 0x7f, 0x4c, 0x68, 0x1d,
+	0x2b, 0xe1, 0xa4, 0x53, 0x3d, 0xd6, 0x8e, 0x6a, 0x19, 0x63, 0xc8, 0xb3, 0x62, 0x51, 0xe6, 0xe2,
+	0xd8, 0xf3, 0xcf, 0x94, 0x5e, 0x20, 0x3b, 0x87, 0x7c, 0x6c, 0x4c, 0x33, 0xa0, 0xc3, 0xd8, 0x4a,
+	0x2e, 0x1e, 0x00, 0xbb, 0x06, 0x08, 0xe3, 0xd4, 0xfe, 0x15, 0x5f, 0x15, 0x8b, 0xf2, 0xe8, 0xf2,
+	0x45, 0xb5, 0x6b, 0xcb, 0x7b, 0xd5, 0xe3, 0xbb, 0x7b, 0x03, 0xb6, 0x1e, 0x8b, 0x3c, 0x44, 0x7d,
+	0x84, 0x7d, 0x82, 0x93, 0xd9, 0xb8, 0xfa, 0x67, 0xf4, 0x24, 0x8c, 0x77, 0x74, 0xf9, 0xbc, 0xda,
+	0xe7, 0x70, 0x95, 0xcc, 0x13, 0xab, 0x99, 0x24, 0xb0, 0xfe, 0x9d, 0xc1, 0xd3, 0x9d, 0x99, 0xed,
+	0x48, 0xda, 0xa2, 0xf7, 0x0e, 0x8d, 0x49, 0x3e, 0xe7, 0x22, 0x0a, 0xf6, 0x11, 0x96, 0xff, 0x34,
+	0xff, 0x7a, 0xff, 0x8f, 0xff, 0x2d, 0x1a, 0x66, 0x13, 0xa1, 0xc2, 0xd9, 0x37, 0x58, 0x86, 0x79,
+	0x18, 0x2c, 0x75, 0x33, 0x60, 0xfa, 0x26, 0xdc, 0xd9, 0x2b, 0x58, 0x29, 0x6d, 0xd1, 0x38, 0x45,
+	0xba, 0x1e, 0x49, 0x69, 0x97, 0xcc, 0x3c, 0xbe, 0xc7, 0x5b, 0x4f, 0x19, 0x87, 0xc3, 0x96, 0xb4,
+	0x43, 0xed, 0xf8, 0x2a, 0x24, 0xcc, 0xf2, 0x4a, 0xc2, 0x79, 0x4b, 0xc3, 0xde, 0xfe, 0xae, 0x9e,
+	0x6c, 0xc3, 0x6e, 0x06, 0x7b, 0xed, 0xd7, 0x37, 0x52, 0xb9, 0xbb, 0xe9, 0xc6, 0x87, 0x37, 0x92,
+	0xfa, 0x46, 0xcb, 0x87, 0x65, 0x0c, 0x97, 0xf6, 0x42, 0xa2, 0xbe, 0x90, 0x94, 0x56, 0xfa, 0x6d,
+	0x3c, 0x6a, 0x49, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x15, 0x40, 0xc5, 0xfe, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
new file mode 100644
index 0000000..8953d0f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.pb.golden
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/compiler/plugin.proto
+// DO NOT EDIT!
+
+package google_protobuf_compiler
+
+import proto "github.com/golang/protobuf/proto"
+import "math"
+import google_protobuf "github.com/golang/protobuf/protoc-gen-go/descriptor"
+
+// Reference proto and math imports to suppress error if they are not otherwise used.
+var _ = proto.GetString
+var _ = math.Inf
+
+type CodeGeneratorRequest struct {
+	FileToGenerate   []string                               `protobuf:"bytes,1,rep,name=file_to_generate" json:"file_to_generate,omitempty"`
+	Parameter        *string                                `protobuf:"bytes,2,opt,name=parameter" json:"parameter,omitempty"`
+	ProtoFile        []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file" json:"proto_file,omitempty"`
+	XXX_unrecognized []byte                                 `json:"-"`
+}
+
+func (this *CodeGeneratorRequest) Reset()         { *this = CodeGeneratorRequest{} }
+func (this *CodeGeneratorRequest) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorRequest) ProtoMessage()       {}
+
+func (this *CodeGeneratorRequest) GetParameter() string {
+	if this != nil && this.Parameter != nil {
+		return *this.Parameter
+	}
+	return ""
+}
+
+type CodeGeneratorResponse struct {
+	Error            *string                       `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"`
+	File             []*CodeGeneratorResponse_File `protobuf:"bytes,15,rep,name=file" json:"file,omitempty"`
+	XXX_unrecognized []byte                        `json:"-"`
+}
+
+func (this *CodeGeneratorResponse) Reset()         { *this = CodeGeneratorResponse{} }
+func (this *CodeGeneratorResponse) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse) GetError() string {
+	if this != nil && this.Error != nil {
+		return *this.Error
+	}
+	return ""
+}
+
+type CodeGeneratorResponse_File struct {
+	Name             *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	InsertionPoint   *string `protobuf:"bytes,2,opt,name=insertion_point" json:"insertion_point,omitempty"`
+	Content          *string `protobuf:"bytes,15,opt,name=content" json:"content,omitempty"`
+	XXX_unrecognized []byte  `json:"-"`
+}
+
+func (this *CodeGeneratorResponse_File) Reset()         { *this = CodeGeneratorResponse_File{} }
+func (this *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(this) }
+func (*CodeGeneratorResponse_File) ProtoMessage()       {}
+
+func (this *CodeGeneratorResponse_File) GetName() string {
+	if this != nil && this.Name != nil {
+		return *this.Name
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetInsertionPoint() string {
+	if this != nil && this.InsertionPoint != nil {
+		return *this.InsertionPoint
+	}
+	return ""
+}
+
+func (this *CodeGeneratorResponse_File) GetContent() string {
+	if this != nil && this.Content != nil {
+		return *this.Content
+	}
+	return ""
+}
+
+func init() {
+}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
new file mode 100644
index 0000000..5b55745
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/plugin/plugin.proto
@@ -0,0 +1,167 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+//
+// WARNING:  The plugin interface is currently EXPERIMENTAL and is subject to
+//   change.
+//
+// protoc (aka the Protocol Compiler) can be extended via plugins.  A plugin is
+// just a program that reads a CodeGeneratorRequest from stdin and writes a
+// CodeGeneratorResponse to stdout.
+//
+// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead
+// of dealing with the raw protocol defined here.
+//
+// A plugin executable needs only to be placed somewhere in the path.  The
+// plugin should be named "protoc-gen-$NAME", and will then be used when the
+// flag "--${NAME}_out" is passed to protoc.
+
+syntax = "proto2";
+package google.protobuf.compiler;
+option java_package = "com.google.protobuf.compiler";
+option java_outer_classname = "PluginProtos";
+
+option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";
+
+import "google/protobuf/descriptor.proto";
+
+// The version number of protocol compiler.
+message Version {
+  optional int32 major = 1;
+  optional int32 minor = 2;
+  optional int32 patch = 3;
+  // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should
+  // be empty for mainline stable releases.
+  optional string suffix = 4;
+}
+
+// An encoded CodeGeneratorRequest is written to the plugin's stdin.
+message CodeGeneratorRequest {
+  // The .proto files that were explicitly listed on the command-line.  The
+  // code generator should generate code only for these files.  Each file's
+  // descriptor will be included in proto_file, below.
+  repeated string file_to_generate = 1;
+
+  // The generator parameter passed on the command-line.
+  optional string parameter = 2;
+
+  // FileDescriptorProtos for all files in files_to_generate and everything
+  // they import.  The files will appear in topological order, so each file
+  // appears before any file that imports it.
+  //
+  // protoc guarantees that all proto_files will be written after
+  // the fields above, even though this is not technically guaranteed by the
+  // protobuf wire format.  This theoretically could allow a plugin to stream
+  // in the FileDescriptorProtos and handle them one by one rather than read
+  // the entire set into memory at once.  However, as of this writing, this
+  // is not similarly optimized on protoc's end -- it will store all fields in
+  // memory at once before sending them to the plugin.
+  //
+  // Type names of fields and extensions in the FileDescriptorProto are always
+  // fully qualified.
+  repeated FileDescriptorProto proto_file = 15;
+
+  // The version number of protocol compiler.
+  optional Version compiler_version = 3;
+
+}
+
+// The plugin writes an encoded CodeGeneratorResponse to stdout.
+message CodeGeneratorResponse {
+  // Error message.  If non-empty, code generation failed.  The plugin process
+  // should exit with status code zero even if it reports an error in this way.
+  //
+  // This should be used to indicate errors in .proto files which prevent the
+  // code generator from generating correct code.  Errors which indicate a
+  // problem in protoc itself -- such as the input CodeGeneratorRequest being
+  // unparseable -- should be reported by writing a message to stderr and
+  // exiting with a non-zero status code.
+  optional string error = 1;
+
+  // Represents a single generated file.
+  message File {
+    // The file name, relative to the output directory.  The name must not
+    // contain "." or ".." components and must be relative, not be absolute (so,
+    // the file cannot lie outside the output directory).  "/" must be used as
+    // the path separator, not "\".
+    //
+    // If the name is omitted, the content will be appended to the previous
+    // file.  This allows the generator to break large files into small chunks,
+    // and allows the generated text to be streamed back to protoc so that large
+    // files need not reside completely in memory at one time.  Note that as of
+    // this writing protoc does not optimize for this -- it will read the entire
+    // CodeGeneratorResponse before writing files to disk.
+    optional string name = 1;
+
+    // If non-empty, indicates that the named file should already exist, and the
+    // content here is to be inserted into that file at a defined insertion
+    // point.  This feature allows a code generator to extend the output
+    // produced by another code generator.  The original generator may provide
+    // insertion points by placing special annotations in the file that look
+    // like:
+    //   @@protoc_insertion_point(NAME)
+    // The annotation can have arbitrary text before and after it on the line,
+    // which allows it to be placed in a comment.  NAME should be replaced with
+    // an identifier naming the point -- this is what other generators will use
+    // as the insertion_point.  Code inserted at this point will be placed
+    // immediately above the line containing the insertion point (thus multiple
+    // insertions to the same point will come out in the order they were added).
+    // The double-@ is intended to make it unlikely that the generated code
+    // could contain things that look like insertion points by accident.
+    //
+    // For example, the C++ code generator places the following line in the
+    // .pb.h files that it generates:
+    //   // @@protoc_insertion_point(namespace_scope)
+    // This line appears within the scope of the file's package namespace, but
+    // outside of any particular class.  Another plugin can then specify the
+    // insertion_point "namespace_scope" to generate additional classes or
+    // other declarations that should be placed in this scope.
+    //
+    // Note that if the line containing the insertion point begins with
+    // whitespace, the same whitespace will be added to every line of the
+    // inserted text.  This is useful for languages like Python, where
+    // indentation matters.  In these languages, the insertion point comment
+    // should be indented the same amount as any inserted code will need to be
+    // in order to work correctly in that context.
+    //
+    // The code generator that generates the initial file and the one which
+    // inserts into it must both run as part of a single invocation of protoc.
+    // Code generators are executed in the order in which they appear on the
+    // command line.
+    //
+    // If |insertion_point| is present, |name| must also be present.
+    optional string insertion_point = 2;
+
+    // The file contents.
+    optional string content = 15;
+  }
+  repeated File file = 15;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..70276e8
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,141 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements functions to marshal proto.Message to/from
+// google.protobuf.Any message.
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+)
+
+const googleApis = "type.googleapis.com/"
+
+// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
+//
+// Note that regular type assertions should be done using the Is
+// function. AnyMessageName is provided for less common use cases like filtering a
+// sequence of Any messages based on a set of allowed message type names.
+func AnyMessageName(any *any.Any) (string, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	slash := strings.LastIndex(any.TypeUrl, "/")
+	if slash < 0 {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return any.TypeUrl[slash+1:], nil
+}
+
+// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
+func MarshalAny(pb proto.Message) (*any.Any, error) {
+	value, err := proto.Marshal(pb)
+	if err != nil {
+		return nil, err
+	}
+	return &any.Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in a google.protobuf.Any
+// message. The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+type DynamicAny struct {
+	proto.Message
+}
+
+// Empty returns a new proto.Message of the type specified in a
+// google.protobuf.Any message. It returns an error if corresponding message
+// type isn't linked in.
+func Empty(any *any.Any) (proto.Message, error) {
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+
+	t := proto.MessageType(aname)
+	if t == nil {
+		return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
+	}
+	return reflect.New(t.Elem()).Interface().(proto.Message), nil
+}
+
+// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
+// message and places the decoded result in pb. It returns an error if type of
+// contents of Any message does not match type of pb message.
+//
+// pb can be a proto.Message, or a *DynamicAny.
+func UnmarshalAny(any *any.Any, pb proto.Message) error {
+	if d, ok := pb.(*DynamicAny); ok {
+		if d.Message == nil {
+			var err error
+			d.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		return UnmarshalAny(any, d.Message)
+	}
+
+	aname, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+
+	mname := proto.MessageName(pb)
+	if aname != mname {
+		return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
+	}
+	return proto.Unmarshal(any.Value, pb)
+}
+
+// Is returns true if any value contains a given message type.
+func Is(any *any.Any, pb proto.Message) bool {
+	// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
+	// but it avoids scanning TypeUrl for the slash.
+	if any == nil {
+		return false
+	}
+	name := proto.MessageName(pb)
+	prefix := len(any.TypeUrl) - len(name)
+	return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..78ee523
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,200 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
+
+package any
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+type Any struct {
+	// A URL/resource name that uniquely identifies the type of the serialized
+	// protocol buffer message. The last segment of the URL's path must represent
+	// the fully qualified name of the type (as in
+	// `path/google.protobuf.Duration`). The name should be in a canonical form
+	// (e.g., leading "." is not accepted).
+	//
+	// In practice, teams usually precompile into the binary all types that they
+	// expect it to use in the context of Any. However, for URLs which use the
+	// scheme `http`, `https`, or no scheme, one can optionally set up a type
+	// server that maps type URLs to message definitions as follows:
+	//
+	// * If no scheme is provided, `https` is assumed.
+	// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+	//   value in binary format, or produce an error.
+	// * Applications are allowed to cache lookup results based on the
+	//   URL, or have them precompiled into a binary to avoid any
+	//   lookup. Therefore, binary compatibility needs to be preserved
+	//   on changes to types. (Use versioned type names to manage
+	//   breaking changes.)
+	//
+	// Note: this functionality is not currently available in the official
+	// protobuf release, and it is not used for type URLs beginning with
+	// type.googleapis.com.
+	//
+	// Schemes other than `http`, `https` (or the empty scheme) might be
+	// used with implementation specific semantics.
+	//
+	TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
+	// Must be a valid serialized protocol buffer of the above specified type.
+	Value                []byte   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Any) Reset()         { *m = Any{} }
+func (m *Any) String() string { return proto.CompactTextString(m) }
+func (*Any) ProtoMessage()    {}
+func (*Any) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b53526c13ae22eb4, []int{0}
+}
+
+func (*Any) XXX_WellKnownType() string { return "Any" }
+
+func (m *Any) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Any.Unmarshal(m, b)
+}
+func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Any.Marshal(b, m, deterministic)
+}
+func (m *Any) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Any.Merge(m, src)
+}
+func (m *Any) XXX_Size() int {
+	return xxx_messageInfo_Any.Size(m)
+}
+func (m *Any) XXX_DiscardUnknown() {
+	xxx_messageInfo_Any.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Any proto.InternalMessageInfo
+
+func (m *Any) GetTypeUrl() string {
+	if m != nil {
+		return m.TypeUrl
+	}
+	return ""
+}
+
+func (m *Any) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*Any)(nil), "google.protobuf.Any")
+}
+
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
+
+var fileDescriptor_b53526c13ae22eb4 = []byte{
+	// 185 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+	0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+	0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+	0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+	0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+	0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+	0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+	0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+	0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+	0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+	0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
new file mode 100644
index 0000000..4932942
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -0,0 +1,154 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/any";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "AnyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// `Any` contains an arbitrary serialized protocol buffer message along with a
+// URL that describes the type of the serialized message.
+//
+// Protobuf library provides support to pack/unpack Any values in the form
+// of utility functions or additional generated methods of the Any type.
+//
+// Example 1: Pack and unpack a message in C++.
+//
+//     Foo foo = ...;
+//     Any any;
+//     any.PackFrom(foo);
+//     ...
+//     if (any.UnpackTo(&foo)) {
+//       ...
+//     }
+//
+// Example 2: Pack and unpack a message in Java.
+//
+//     Foo foo = ...;
+//     Any any = Any.pack(foo);
+//     ...
+//     if (any.is(Foo.class)) {
+//       foo = any.unpack(Foo.class);
+//     }
+//
+//  Example 3: Pack and unpack a message in Python.
+//
+//     foo = Foo(...)
+//     any = Any()
+//     any.Pack(foo)
+//     ...
+//     if any.Is(Foo.DESCRIPTOR):
+//       any.Unpack(foo)
+//       ...
+//
+//  Example 4: Pack and unpack a message in Go
+//
+//      foo := &pb.Foo{...}
+//      any, err := ptypes.MarshalAny(foo)
+//      ...
+//      foo := &pb.Foo{}
+//      if err := ptypes.UnmarshalAny(any, foo); err != nil {
+//        ...
+//      }
+//
+// The pack methods provided by protobuf library will by default use
+// 'type.googleapis.com/full.type.name' as the type URL and the unpack
+// methods only use the fully qualified type name after the last '/'
+// in the type URL, for example "foo.bar.com/x/y.z" will yield type
+// name "y.z".
+//
+//
+// JSON
+// ====
+// The JSON representation of an `Any` value uses the regular
+// representation of the deserialized, embedded message, with an
+// additional field `@type` which contains the type URL. Example:
+//
+//     package google.profile;
+//     message Person {
+//       string first_name = 1;
+//       string last_name = 2;
+//     }
+//
+//     {
+//       "@type": "type.googleapis.com/google.profile.Person",
+//       "firstName": <string>,
+//       "lastName": <string>
+//     }
+//
+// If the embedded message type is well-known and has a custom JSON
+// representation, that representation will be embedded adding a field
+// `value` which holds the custom JSON in addition to the `@type`
+// field. Example (for message [google.protobuf.Duration][]):
+//
+//     {
+//       "@type": "type.googleapis.com/google.protobuf.Duration",
+//       "value": "1.212s"
+//     }
+//
+message Any {
+  // A URL/resource name that uniquely identifies the type of the serialized
+  // protocol buffer message. The last segment of the URL's path must represent
+  // the fully qualified name of the type (as in
+  // `path/google.protobuf.Duration`). The name should be in a canonical form
+  // (e.g., leading "." is not accepted).
+  //
+  // In practice, teams usually precompile into the binary all types that they
+  // expect it to use in the context of Any. However, for URLs which use the
+  // scheme `http`, `https`, or no scheme, one can optionally set up a type
+  // server that maps type URLs to message definitions as follows:
+  //
+  // * If no scheme is provided, `https` is assumed.
+  // * An HTTP GET on the URL must yield a [google.protobuf.Type][]
+  //   value in binary format, or produce an error.
+  // * Applications are allowed to cache lookup results based on the
+  //   URL, or have them precompiled into a binary to avoid any
+  //   lookup. Therefore, binary compatibility needs to be preserved
+  //   on changes to types. (Use versioned type names to manage
+  //   breaking changes.)
+  //
+  // Note: this functionality is not currently available in the official
+  // protobuf release, and it is not used for type URLs beginning with
+  // type.googleapis.com.
+  //
+  // Schemes other than `http`, `https` (or the empty scheme) might be
+  // used with implementation specific semantics.
+  //
+  string type_url = 1;
+
+  // Must be a valid serialized protocol buffer of the above specified type.
+  bytes value = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..c0d595d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,35 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package ptypes contains code for interacting with well-known types.
+*/
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..26d1ca2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,102 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+const (
+	// Range of a durpb.Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the durpb.Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid durpb.Duration
+// may still be too large to fit into a time.Duration (the range of durpb.Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *durpb.Duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// Duration converts a durpb.Duration to a time.Duration. Duration
+// returns an error if the durpb.Duration is invalid or is too large to be
+// represented in a time.Duration.
+func Duration(p *durpb.Duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos) * time.Nanosecond
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durpb.Duration.
+func DurationProto(d time.Duration) *durpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durpb.Duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..0d681ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,161 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
+
+package duration
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+type Duration struct {
+	// Signed seconds of the span of time. Must be from -315,576,000,000
+	// to +315,576,000,000 inclusive. Note: these bounds are computed from:
+	// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Signed fractions of a second at nanosecond resolution of the span
+	// of time. Durations less than one second are represented with a 0
+	// `seconds` field and a positive or negative `nanos` field. For durations
+	// of one second or more, a non-zero value for the `nanos` field must be
+	// of the same sign as the `seconds` field. Must be from -999,999,999
+	// to +999,999,999 inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Duration) Reset()         { *m = Duration{} }
+func (m *Duration) String() string { return proto.CompactTextString(m) }
+func (*Duration) ProtoMessage()    {}
+func (*Duration) Descriptor() ([]byte, []int) {
+	return fileDescriptor_23597b2ebd7ac6c5, []int{0}
+}
+
+func (*Duration) XXX_WellKnownType() string { return "Duration" }
+
+func (m *Duration) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Duration.Unmarshal(m, b)
+}
+func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
+}
+func (m *Duration) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Duration.Merge(m, src)
+}
+func (m *Duration) XXX_Size() int {
+	return xxx_messageInfo_Duration.Size(m)
+}
+func (m *Duration) XXX_DiscardUnknown() {
+	xxx_messageInfo_Duration.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Duration proto.InternalMessageInfo
+
+func (m *Duration) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+}
+
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
+
+var fileDescriptor_23597b2ebd7ac6c5 = []byte{
+	// 190 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+	0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+	0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+	0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+	0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+	0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+	0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+	0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+	0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+	0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+	0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
new file mode 100644
index 0000000..975fce4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -0,0 +1,117 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/duration";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DurationProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Duration represents a signed, fixed-length span of time represented
+// as a count of seconds and fractions of seconds at nanosecond
+// resolution. It is independent of any calendar and concepts like "day"
+// or "month". It is related to Timestamp in that the difference between
+// two Timestamp values is a Duration and it can be added or subtracted
+// from a Timestamp. Range is approximately +-10,000 years.
+//
+// # Examples
+//
+// Example 1: Compute Duration from two Timestamps in pseudo code.
+//
+//     Timestamp start = ...;
+//     Timestamp end = ...;
+//     Duration duration = ...;
+//
+//     duration.seconds = end.seconds - start.seconds;
+//     duration.nanos = end.nanos - start.nanos;
+//
+//     if (duration.seconds < 0 && duration.nanos > 0) {
+//       duration.seconds += 1;
+//       duration.nanos -= 1000000000;
+//     } else if (durations.seconds > 0 && duration.nanos < 0) {
+//       duration.seconds -= 1;
+//       duration.nanos += 1000000000;
+//     }
+//
+// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
+//
+//     Timestamp start = ...;
+//     Duration duration = ...;
+//     Timestamp end = ...;
+//
+//     end.seconds = start.seconds + duration.seconds;
+//     end.nanos = start.nanos + duration.nanos;
+//
+//     if (end.nanos < 0) {
+//       end.seconds -= 1;
+//       end.nanos += 1000000000;
+//     } else if (end.nanos >= 1000000000) {
+//       end.seconds += 1;
+//       end.nanos -= 1000000000;
+//     }
+//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+//     td = datetime.timedelta(days=3, minutes=10)
+//     duration = Duration()
+//     duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
+//
+message Duration {
+
+  // Signed seconds of the span of time. Must be from -315,576,000,000
+  // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+  // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
+  int64 seconds = 1;
+
+  // Signed fractions of a second at nanosecond resolution of the span
+  // of time. Durations less than one second are represented with a 0
+  // `seconds` field and a positive or negative `nanos` field. For durations
+  // of one second or more, a non-zero value for the `nanos` field must be
+  // of the same sign as the `seconds` field. Must be from -999,999,999
+  // to +999,999,999 inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..b4eb03e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,83 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/empty.proto
+
+package empty
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+type Empty struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Empty) Reset()         { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage()    {}
+func (*Empty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_900544acb223d5b8, []int{0}
+}
+
+func (*Empty) XXX_WellKnownType() string { return "Empty" }
+
+func (m *Empty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Empty.Unmarshal(m, b)
+}
+func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
+}
+func (m *Empty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Empty.Merge(m, src)
+}
+func (m *Empty) XXX_Size() int {
+	return xxx_messageInfo_Empty.Size(m)
+}
+func (m *Empty) XXX_DiscardUnknown() {
+	xxx_messageInfo_Empty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Empty proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
+}
+
+func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
+
+var fileDescriptor_900544acb223d5b8 = []byte{
+	// 148 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
+	0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
+	0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36,
+	0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf,
+	0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c,
+	0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10,
+	0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40,
+	0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6,
+	0xb7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
new file mode 100644
index 0000000..03cacd2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto
@@ -0,0 +1,52 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option go_package = "github.com/golang/protobuf/ptypes/empty";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "EmptyProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+option cc_enable_arenas = true;
+
+// A generic empty message that you can re-use to avoid defining duplicated
+// empty messages in your APIs. A typical example is to use it as the request
+// or the response type of an API method. For instance:
+//
+//     service Foo {
+//       rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
+//     }
+//
+// The JSON representation for `Empty` is empty JSON object `{}`.
+message Empty {}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
new file mode 100644
index 0000000..33daa73
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
@@ -0,0 +1,336 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/struct.proto
+
+package structpb
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+type NullValue int32
+
+const (
+	// Null value.
+	NullValue_NULL_VALUE NullValue = 0
+)
+
+var NullValue_name = map[int32]string{
+	0: "NULL_VALUE",
+}
+
+var NullValue_value = map[string]int32{
+	"NULL_VALUE": 0,
+}
+
+func (x NullValue) String() string {
+	return proto.EnumName(NullValue_name, int32(x))
+}
+
+func (NullValue) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (NullValue) XXX_WellKnownType() string { return "NullValue" }
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+type Struct struct {
+	// Unordered map of dynamically typed values.
+	Fields               map[string]*Value `protobuf:"bytes,1,rep,name=fields,proto3" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Struct) Reset()         { *m = Struct{} }
+func (m *Struct) String() string { return proto.CompactTextString(m) }
+func (*Struct) ProtoMessage()    {}
+func (*Struct) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{0}
+}
+
+func (*Struct) XXX_WellKnownType() string { return "Struct" }
+
+func (m *Struct) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Struct.Unmarshal(m, b)
+}
+func (m *Struct) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Struct.Marshal(b, m, deterministic)
+}
+func (m *Struct) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Struct.Merge(m, src)
+}
+func (m *Struct) XXX_Size() int {
+	return xxx_messageInfo_Struct.Size(m)
+}
+func (m *Struct) XXX_DiscardUnknown() {
+	xxx_messageInfo_Struct.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Struct proto.InternalMessageInfo
+
+func (m *Struct) GetFields() map[string]*Value {
+	if m != nil {
+		return m.Fields
+	}
+	return nil
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+type Value struct {
+	// The kind of value.
+	//
+	// Types that are valid to be assigned to Kind:
+	//	*Value_NullValue
+	//	*Value_NumberValue
+	//	*Value_StringValue
+	//	*Value_BoolValue
+	//	*Value_StructValue
+	//	*Value_ListValue
+	Kind                 isValue_Kind `protobuf_oneof:"kind"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *Value) Reset()         { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage()    {}
+func (*Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{1}
+}
+
+func (*Value) XXX_WellKnownType() string { return "Value" }
+
+func (m *Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Value.Unmarshal(m, b)
+}
+func (m *Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Value.Marshal(b, m, deterministic)
+}
+func (m *Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Value.Merge(m, src)
+}
+func (m *Value) XXX_Size() int {
+	return xxx_messageInfo_Value.Size(m)
+}
+func (m *Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Value proto.InternalMessageInfo
+
+type isValue_Kind interface {
+	isValue_Kind()
+}
+
+type Value_NullValue struct {
+	NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
+}
+
+type Value_NumberValue struct {
+	NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"`
+}
+
+type Value_StringValue struct {
+	StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type Value_BoolValue struct {
+	BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type Value_StructValue struct {
+	StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,proto3,oneof"`
+}
+
+type Value_ListValue struct {
+	ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,proto3,oneof"`
+}
+
+func (*Value_NullValue) isValue_Kind() {}
+
+func (*Value_NumberValue) isValue_Kind() {}
+
+func (*Value_StringValue) isValue_Kind() {}
+
+func (*Value_BoolValue) isValue_Kind() {}
+
+func (*Value_StructValue) isValue_Kind() {}
+
+func (*Value_ListValue) isValue_Kind() {}
+
+func (m *Value) GetKind() isValue_Kind {
+	if m != nil {
+		return m.Kind
+	}
+	return nil
+}
+
+func (m *Value) GetNullValue() NullValue {
+	if x, ok := m.GetKind().(*Value_NullValue); ok {
+		return x.NullValue
+	}
+	return NullValue_NULL_VALUE
+}
+
+func (m *Value) GetNumberValue() float64 {
+	if x, ok := m.GetKind().(*Value_NumberValue); ok {
+		return x.NumberValue
+	}
+	return 0
+}
+
+func (m *Value) GetStringValue() string {
+	if x, ok := m.GetKind().(*Value_StringValue); ok {
+		return x.StringValue
+	}
+	return ""
+}
+
+func (m *Value) GetBoolValue() bool {
+	if x, ok := m.GetKind().(*Value_BoolValue); ok {
+		return x.BoolValue
+	}
+	return false
+}
+
+func (m *Value) GetStructValue() *Struct {
+	if x, ok := m.GetKind().(*Value_StructValue); ok {
+		return x.StructValue
+	}
+	return nil
+}
+
+func (m *Value) GetListValue() *ListValue {
+	if x, ok := m.GetKind().(*Value_ListValue); ok {
+		return x.ListValue
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Value) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Value_NullValue)(nil),
+		(*Value_NumberValue)(nil),
+		(*Value_StringValue)(nil),
+		(*Value_BoolValue)(nil),
+		(*Value_StructValue)(nil),
+		(*Value_ListValue)(nil),
+	}
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+type ListValue struct {
+	// Repeated field of dynamically typed values.
+	Values               []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ListValue) Reset()         { *m = ListValue{} }
+func (m *ListValue) String() string { return proto.CompactTextString(m) }
+func (*ListValue) ProtoMessage()    {}
+func (*ListValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_df322afd6c9fb402, []int{2}
+}
+
+func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
+
+func (m *ListValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ListValue.Unmarshal(m, b)
+}
+func (m *ListValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ListValue.Marshal(b, m, deterministic)
+}
+func (m *ListValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ListValue.Merge(m, src)
+}
+func (m *ListValue) XXX_Size() int {
+	return xxx_messageInfo_ListValue.Size(m)
+}
+func (m *ListValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_ListValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ListValue proto.InternalMessageInfo
+
+func (m *ListValue) GetValues() []*Value {
+	if m != nil {
+		return m.Values
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
+	proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
+	proto.RegisterMapType((map[string]*Value)(nil), "google.protobuf.Struct.FieldsEntry")
+	proto.RegisterType((*Value)(nil), "google.protobuf.Value")
+	proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor_df322afd6c9fb402) }
+
+var fileDescriptor_df322afd6c9fb402 = []byte{
+	// 417 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40,
+	0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09,
+	0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94,
+	0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa,
+	0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff,
+	0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc,
+	0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15,
+	0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d,
+	0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce,
+	0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39,
+	0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab,
+	0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84,
+	0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48,
+	0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f,
+	0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59,
+	0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a,
+	0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64,
+	0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92,
+	0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25,
+	0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37,
+	0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6,
+	0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4,
+	0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda,
+	0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9,
+	0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53,
+	0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00,
+	0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
new file mode 100644
index 0000000..7d7808e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
@@ -0,0 +1,96 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "StructProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is JSON object.
+message Struct {
+  // Unordered map of dynamically typed values.
+  map<string, Value> fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of that
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is JSON value.
+message Value {
+  // The kind of value.
+  oneof kind {
+    // Represents a null value.
+    NullValue null_value = 1;
+    // Represents a double value.
+    double number_value = 2;
+    // Represents a string value.
+    string string_value = 3;
+    // Represents a boolean value.
+    bool bool_value = 4;
+    // Represents a structured value.
+    Struct struct_value = 5;
+    // Represents a repeated `Value`.
+    ListValue list_value = 6;
+  }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+//  The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+  // Null value.
+  NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is JSON array.
+message ListValue {
+  // Repeated field of dynamically typed values.
+  repeated Value values = 1;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8da0df0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,132 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package ptypes
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	tspb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *tspb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// Timestamp converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
+	ts := &tspb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
+// Timestamps, it returns an error message in parentheses.
+func TimestampString(ts *tspb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..31cd846
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,179 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
+
+package timestamp
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+type Timestamp struct {
+	// Represents seconds of UTC time since Unix epoch
+	// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+	// 9999-12-31T23:59:59Z inclusive.
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	// Non-negative fractions of a second at nanosecond resolution. Negative
+	// second values with fractions must still have non-negative nanos values
+	// that count forward in time. Must be from 0 to 999,999,999
+	// inclusive.
+	Nanos                int32    `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Timestamp) Reset()         { *m = Timestamp{} }
+func (m *Timestamp) String() string { return proto.CompactTextString(m) }
+func (*Timestamp) ProtoMessage()    {}
+func (*Timestamp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_292007bbfe81227e, []int{0}
+}
+
+func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
+
+func (m *Timestamp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Timestamp.Unmarshal(m, b)
+}
+func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
+}
+func (m *Timestamp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Timestamp.Merge(m, src)
+}
+func (m *Timestamp) XXX_Size() int {
+	return xxx_messageInfo_Timestamp.Size(m)
+}
+func (m *Timestamp) XXX_DiscardUnknown() {
+	xxx_messageInfo_Timestamp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Timestamp proto.InternalMessageInfo
+
+func (m *Timestamp) GetSeconds() int64 {
+	if m != nil {
+		return m.Seconds
+	}
+	return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+	if m != nil {
+		return m.Nanos
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+}
+
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
+
+var fileDescriptor_292007bbfe81227e = []byte{
+	// 191 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+	0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+	0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+	0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+	0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+	0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+	0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+	0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+	0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+	0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+	0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
new file mode 100644
index 0000000..eafb3fa
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -0,0 +1,135 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/timestamp";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "TimestampProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// A Timestamp represents a point in time independent of any time zone
+// or calendar, represented as seconds and fractions of seconds at
+// nanosecond resolution in UTC Epoch time. It is encoded using the
+// Proleptic Gregorian Calendar which extends the Gregorian calendar
+// backwards to year one. It is encoded assuming all minutes are 60
+// seconds long, i.e. leap seconds are "smeared" so that no leap second
+// table is needed for interpretation. Range is from
+// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
+// By restricting to that range, we ensure that we can convert to
+// and from  RFC 3339 date strings.
+// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
+//
+// # Examples
+//
+// Example 1: Compute Timestamp from POSIX `time()`.
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(time(NULL));
+//     timestamp.set_nanos(0);
+//
+// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
+//
+//     struct timeval tv;
+//     gettimeofday(&tv, NULL);
+//
+//     Timestamp timestamp;
+//     timestamp.set_seconds(tv.tv_sec);
+//     timestamp.set_nanos(tv.tv_usec * 1000);
+//
+// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
+//
+//     FILETIME ft;
+//     GetSystemTimeAsFileTime(&ft);
+//     UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
+//
+//     // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
+//     // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
+//     Timestamp timestamp;
+//     timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
+//     timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
+//
+// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
+//
+//     long millis = System.currentTimeMillis();
+//
+//     Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
+//         .setNanos((int) ((millis % 1000) * 1000000)).build();
+//
+//
+// Example 5: Compute Timestamp from current time in Python.
+//
+//     timestamp = Timestamp()
+//     timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required. A proto3 JSON serializer should always use UTC (as indicated by
+// "Z") when printing the Timestamp type and a proto3 JSON parser should be
+// able to accept both UTC and other timezones (as indicated by an offset).
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--
+// ) to obtain a formatter capable of generating timestamps in this format.
+//
+//
+message Timestamp {
+
+  // Represents seconds of UTC time since Unix epoch
+  // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
+  // 9999-12-31T23:59:59Z inclusive.
+  int64 seconds = 1;
+
+  // Non-negative fractions of a second at nanosecond resolution. Negative
+  // second values with fractions must still have non-negative nanos values
+  // that count forward in time. Must be from 0 to 999,999,999
+  // inclusive.
+  int32 nanos = 2;
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
new file mode 100644
index 0000000..add19a1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
@@ -0,0 +1,461 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/wrappers.proto
+
+package wrappers
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+type DoubleValue struct {
+	// The double value.
+	Value                float64  `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DoubleValue) Reset()         { *m = DoubleValue{} }
+func (m *DoubleValue) String() string { return proto.CompactTextString(m) }
+func (*DoubleValue) ProtoMessage()    {}
+func (*DoubleValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{0}
+}
+
+func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" }
+
+func (m *DoubleValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DoubleValue.Unmarshal(m, b)
+}
+func (m *DoubleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DoubleValue.Marshal(b, m, deterministic)
+}
+func (m *DoubleValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DoubleValue.Merge(m, src)
+}
+func (m *DoubleValue) XXX_Size() int {
+	return xxx_messageInfo_DoubleValue.Size(m)
+}
+func (m *DoubleValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_DoubleValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DoubleValue proto.InternalMessageInfo
+
+func (m *DoubleValue) GetValue() float64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+type FloatValue struct {
+	// The float value.
+	Value                float32  `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FloatValue) Reset()         { *m = FloatValue{} }
+func (m *FloatValue) String() string { return proto.CompactTextString(m) }
+func (*FloatValue) ProtoMessage()    {}
+func (*FloatValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{1}
+}
+
+func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" }
+
+func (m *FloatValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FloatValue.Unmarshal(m, b)
+}
+func (m *FloatValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FloatValue.Marshal(b, m, deterministic)
+}
+func (m *FloatValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FloatValue.Merge(m, src)
+}
+func (m *FloatValue) XXX_Size() int {
+	return xxx_messageInfo_FloatValue.Size(m)
+}
+func (m *FloatValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_FloatValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FloatValue proto.InternalMessageInfo
+
+func (m *FloatValue) GetValue() float32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+type Int64Value struct {
+	// The int64 value.
+	Value                int64    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int64Value) Reset()         { *m = Int64Value{} }
+func (m *Int64Value) String() string { return proto.CompactTextString(m) }
+func (*Int64Value) ProtoMessage()    {}
+func (*Int64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{2}
+}
+
+func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" }
+
+func (m *Int64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int64Value.Unmarshal(m, b)
+}
+func (m *Int64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int64Value.Marshal(b, m, deterministic)
+}
+func (m *Int64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int64Value.Merge(m, src)
+}
+func (m *Int64Value) XXX_Size() int {
+	return xxx_messageInfo_Int64Value.Size(m)
+}
+func (m *Int64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int64Value proto.InternalMessageInfo
+
+func (m *Int64Value) GetValue() int64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+type UInt64Value struct {
+	// The uint64 value.
+	Value                uint64   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt64Value) Reset()         { *m = UInt64Value{} }
+func (m *UInt64Value) String() string { return proto.CompactTextString(m) }
+func (*UInt64Value) ProtoMessage()    {}
+func (*UInt64Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{3}
+}
+
+func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" }
+
+func (m *UInt64Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt64Value.Unmarshal(m, b)
+}
+func (m *UInt64Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt64Value.Marshal(b, m, deterministic)
+}
+func (m *UInt64Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt64Value.Merge(m, src)
+}
+func (m *UInt64Value) XXX_Size() int {
+	return xxx_messageInfo_UInt64Value.Size(m)
+}
+func (m *UInt64Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt64Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt64Value proto.InternalMessageInfo
+
+func (m *UInt64Value) GetValue() uint64 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+type Int32Value struct {
+	// The int32 value.
+	Value                int32    `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Int32Value) Reset()         { *m = Int32Value{} }
+func (m *Int32Value) String() string { return proto.CompactTextString(m) }
+func (*Int32Value) ProtoMessage()    {}
+func (*Int32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{4}
+}
+
+func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" }
+
+func (m *Int32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Int32Value.Unmarshal(m, b)
+}
+func (m *Int32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Int32Value.Marshal(b, m, deterministic)
+}
+func (m *Int32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Int32Value.Merge(m, src)
+}
+func (m *Int32Value) XXX_Size() int {
+	return xxx_messageInfo_Int32Value.Size(m)
+}
+func (m *Int32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_Int32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Int32Value proto.InternalMessageInfo
+
+func (m *Int32Value) GetValue() int32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+type UInt32Value struct {
+	// The uint32 value.
+	Value                uint32   `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UInt32Value) Reset()         { *m = UInt32Value{} }
+func (m *UInt32Value) String() string { return proto.CompactTextString(m) }
+func (*UInt32Value) ProtoMessage()    {}
+func (*UInt32Value) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{5}
+}
+
+func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" }
+
+func (m *UInt32Value) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UInt32Value.Unmarshal(m, b)
+}
+func (m *UInt32Value) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UInt32Value.Marshal(b, m, deterministic)
+}
+func (m *UInt32Value) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UInt32Value.Merge(m, src)
+}
+func (m *UInt32Value) XXX_Size() int {
+	return xxx_messageInfo_UInt32Value.Size(m)
+}
+func (m *UInt32Value) XXX_DiscardUnknown() {
+	xxx_messageInfo_UInt32Value.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UInt32Value proto.InternalMessageInfo
+
+func (m *UInt32Value) GetValue() uint32 {
+	if m != nil {
+		return m.Value
+	}
+	return 0
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+type BoolValue struct {
+	// The bool value.
+	Value                bool     `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BoolValue) Reset()         { *m = BoolValue{} }
+func (m *BoolValue) String() string { return proto.CompactTextString(m) }
+func (*BoolValue) ProtoMessage()    {}
+func (*BoolValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{6}
+}
+
+func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" }
+
+func (m *BoolValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BoolValue.Unmarshal(m, b)
+}
+func (m *BoolValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BoolValue.Marshal(b, m, deterministic)
+}
+func (m *BoolValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BoolValue.Merge(m, src)
+}
+func (m *BoolValue) XXX_Size() int {
+	return xxx_messageInfo_BoolValue.Size(m)
+}
+func (m *BoolValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BoolValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BoolValue proto.InternalMessageInfo
+
+func (m *BoolValue) GetValue() bool {
+	if m != nil {
+		return m.Value
+	}
+	return false
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+type StringValue struct {
+	// The string value.
+	Value                string   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StringValue) Reset()         { *m = StringValue{} }
+func (m *StringValue) String() string { return proto.CompactTextString(m) }
+func (*StringValue) ProtoMessage()    {}
+func (*StringValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{7}
+}
+
+func (*StringValue) XXX_WellKnownType() string { return "StringValue" }
+
+func (m *StringValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_StringValue.Unmarshal(m, b)
+}
+func (m *StringValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_StringValue.Marshal(b, m, deterministic)
+}
+func (m *StringValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StringValue.Merge(m, src)
+}
+func (m *StringValue) XXX_Size() int {
+	return xxx_messageInfo_StringValue.Size(m)
+}
+func (m *StringValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_StringValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StringValue proto.InternalMessageInfo
+
+func (m *StringValue) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+type BytesValue struct {
+	// The bytes value.
+	Value                []byte   `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *BytesValue) Reset()         { *m = BytesValue{} }
+func (m *BytesValue) String() string { return proto.CompactTextString(m) }
+func (*BytesValue) ProtoMessage()    {}
+func (*BytesValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_5377b62bda767935, []int{8}
+}
+
+func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" }
+
+func (m *BytesValue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_BytesValue.Unmarshal(m, b)
+}
+func (m *BytesValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_BytesValue.Marshal(b, m, deterministic)
+}
+func (m *BytesValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_BytesValue.Merge(m, src)
+}
+func (m *BytesValue) XXX_Size() int {
+	return xxx_messageInfo_BytesValue.Size(m)
+}
+func (m *BytesValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_BytesValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_BytesValue proto.InternalMessageInfo
+
+func (m *BytesValue) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue")
+	proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue")
+	proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value")
+	proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value")
+	proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value")
+	proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value")
+	proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue")
+	proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue")
+	proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue")
+}
+
+func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor_5377b62bda767935) }
+
+var fileDescriptor_5377b62bda767935 = []byte{
+	// 259 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+	0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c,
+	0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca,
+	0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c,
+	0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5,
+	0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13,
+	0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8,
+	0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca,
+	0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a,
+	0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d,
+	0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24,
+	0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f,
+	0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c,
+	0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e,
+	0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b,
+	0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe,
+	0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
new file mode 100644
index 0000000..0194763
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
@@ -0,0 +1,118 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc.  All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Wrappers for primitive (non-message) types. These types are useful
+// for embedding primitives in the `google.protobuf.Any` type and for places
+// where we need to distinguish between the absence of a primitive
+// typed field and its default value.
+
+syntax = "proto3";
+
+package google.protobuf;
+
+option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
+option go_package = "github.com/golang/protobuf/ptypes/wrappers";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "WrappersProto";
+option java_multiple_files = true;
+option objc_class_prefix = "GPB";
+
+// Wrapper message for `double`.
+//
+// The JSON representation for `DoubleValue` is JSON number.
+message DoubleValue {
+  // The double value.
+  double value = 1;
+}
+
+// Wrapper message for `float`.
+//
+// The JSON representation for `FloatValue` is JSON number.
+message FloatValue {
+  // The float value.
+  float value = 1;
+}
+
+// Wrapper message for `int64`.
+//
+// The JSON representation for `Int64Value` is JSON string.
+message Int64Value {
+  // The int64 value.
+  int64 value = 1;
+}
+
+// Wrapper message for `uint64`.
+//
+// The JSON representation for `UInt64Value` is JSON string.
+message UInt64Value {
+  // The uint64 value.
+  uint64 value = 1;
+}
+
+// Wrapper message for `int32`.
+//
+// The JSON representation for `Int32Value` is JSON number.
+message Int32Value {
+  // The int32 value.
+  int32 value = 1;
+}
+
+// Wrapper message for `uint32`.
+//
+// The JSON representation for `UInt32Value` is JSON number.
+message UInt32Value {
+  // The uint32 value.
+  uint32 value = 1;
+}
+
+// Wrapper message for `bool`.
+//
+// The JSON representation for `BoolValue` is JSON `true` and `false`.
+message BoolValue {
+  // The bool value.
+  bool value = 1;
+}
+
+// Wrapper message for `string`.
+//
+// The JSON representation for `StringValue` is JSON string.
+message StringValue {
+  // The string value.
+  string value = 1;
+}
+
+// Wrapper message for `bytes`.
+//
+// The JSON representation for `BytesValue` is JSON string.
+message BytesValue {
+  // The bytes value.
+  bytes value = 1;
+}
diff --git a/vendor/github.com/golang/snappy/.gitignore b/vendor/github.com/golang/snappy/.gitignore
new file mode 100644
index 0000000..042091d
--- /dev/null
+++ b/vendor/github.com/golang/snappy/.gitignore
@@ -0,0 +1,16 @@
+cmd/snappytool/snappytool
+testdata/bench
+
+# These explicitly listed benchmark data files are for an obsolete version of
+# snappy_test.go.
+testdata/alice29.txt
+testdata/asyoulik.txt
+testdata/fireworks.jpeg
+testdata/geo.protodata
+testdata/html
+testdata/html_x_4
+testdata/kppkn.gtb
+testdata/lcet10.txt
+testdata/paper-100k.pdf
+testdata/plrabn12.txt
+testdata/urls.10K
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+#	Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people.  For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+#     http://code.google.com/legal/individual-cla-v1.0.html
+#     http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+#     Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8         2.19GB/s ± 0%  html
+_UFlat1-8         1.41GB/s ± 0%  urls
+_UFlat2-8         23.5GB/s ± 2%  jpg
+_UFlat3-8         1.91GB/s ± 0%  jpg_200
+_UFlat4-8         14.0GB/s ± 1%  pdf
+_UFlat5-8         1.97GB/s ± 0%  html4
+_UFlat6-8          814MB/s ± 0%  txt1
+_UFlat7-8          785MB/s ± 0%  txt2
+_UFlat8-8          857MB/s ± 0%  txt3
+_UFlat9-8          719MB/s ± 1%  txt4
+_UFlat10-8        2.84GB/s ± 0%  pb
+_UFlat11-8        1.05GB/s ± 0%  gaviota
+
+_ZFlat0-8         1.04GB/s ± 0%  html
+_ZFlat1-8          534MB/s ± 0%  urls
+_ZFlat2-8         15.7GB/s ± 1%  jpg
+_ZFlat3-8          740MB/s ± 3%  jpg_200
+_ZFlat4-8         9.20GB/s ± 1%  pdf
+_ZFlat5-8          991MB/s ± 0%  html4
+_ZFlat6-8          379MB/s ± 0%  txt1
+_ZFlat7-8          352MB/s ± 0%  txt2
+_ZFlat8-8          396MB/s ± 1%  txt3
+_ZFlat9-8          327MB/s ± 1%  txt4
+_ZFlat10-8        1.33GB/s ± 1%  pb
+_ZFlat11-8         605MB/s ± 1%  gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8          621MB/s ± 2%  html
+_UFlat1-8          494MB/s ± 1%  urls
+_UFlat2-8         23.2GB/s ± 1%  jpg
+_UFlat3-8         1.12GB/s ± 1%  jpg_200
+_UFlat4-8         4.35GB/s ± 1%  pdf
+_UFlat5-8          609MB/s ± 0%  html4
+_UFlat6-8          296MB/s ± 0%  txt1
+_UFlat7-8          288MB/s ± 0%  txt2
+_UFlat8-8          309MB/s ± 1%  txt3
+_UFlat9-8          280MB/s ± 1%  txt4
+_UFlat10-8         753MB/s ± 0%  pb
+_UFlat11-8         400MB/s ± 0%  gaviota
+
+_ZFlat0-8          409MB/s ± 1%  html
+_ZFlat1-8          250MB/s ± 1%  urls
+_ZFlat2-8         12.3GB/s ± 1%  jpg
+_ZFlat3-8          132MB/s ± 0%  jpg_200
+_ZFlat4-8         2.92GB/s ± 0%  pdf
+_ZFlat5-8          405MB/s ± 1%  html4
+_ZFlat6-8          179MB/s ± 1%  txt1
+_ZFlat7-8          170MB/s ± 1%  txt2
+_ZFlat8-8          189MB/s ± 1%  txt3
+_ZFlat9-8          164MB/s ± 1%  txt4
+_ZFlat10-8         479MB/s ± 1%  pb
+_ZFlat11-8         270MB/s ± 1%  gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0     2.4GB/s  html
+BM_UFlat/1     1.4GB/s  urls
+BM_UFlat/2    21.8GB/s  jpg
+BM_UFlat/3     1.5GB/s  jpg_200
+BM_UFlat/4    13.3GB/s  pdf
+BM_UFlat/5     2.1GB/s  html4
+BM_UFlat/6     1.0GB/s  txt1
+BM_UFlat/7   959.4MB/s  txt2
+BM_UFlat/8     1.0GB/s  txt3
+BM_UFlat/9   864.5MB/s  txt4
+BM_UFlat/10    2.9GB/s  pb
+BM_UFlat/11    1.2GB/s  gaviota
+
+BM_ZFlat/0   944.3MB/s  html (22.31 %)
+BM_ZFlat/1   501.6MB/s  urls (47.78 %)
+BM_ZFlat/2    14.3GB/s  jpg (99.95 %)
+BM_ZFlat/3   538.3MB/s  jpg_200 (73.00 %)
+BM_ZFlat/4     8.3GB/s  pdf (83.30 %)
+BM_ZFlat/5   903.5MB/s  html4 (22.52 %)
+BM_ZFlat/6   336.0MB/s  txt1 (57.88 %)
+BM_ZFlat/7   312.3MB/s  txt2 (61.91 %)
+BM_ZFlat/8   353.1MB/s  txt3 (54.99 %)
+BM_ZFlat/9   289.9MB/s  txt4 (66.26 %)
+BM_ZFlat/10    1.2GB/s  pb (19.68 %)
+BM_ZFlat/11  527.4MB/s  gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+var (
+	// ErrCorrupt reports that the input is invalid.
+	ErrCorrupt = errors.New("snappy: corrupt input")
+	// ErrTooLarge reports that the uncompressed length is too large.
+	ErrTooLarge = errors.New("snappy: decoded block is too large")
+	// ErrUnsupported reports that the input isn't supported.
+	ErrUnsupported = errors.New("snappy: unsupported input")
+
+	errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+	v, _, err := decodedLen(src)
+	return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+	v, n := binary.Uvarint(src)
+	if n <= 0 || v > 0xffffffff {
+		return 0, 0, ErrCorrupt
+	}
+
+	const wordSize = 32 << (^uint(0) >> 32 & 1)
+	if wordSize == 32 && v > 0x7fffffff {
+		return 0, 0, ErrTooLarge
+	}
+	return int(v), n, nil
+}
+
+const (
+	decodeErrCodeCorrupt                  = 1
+	decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+	dLen, s, err := decodedLen(src)
+	if err != nil {
+		return nil, err
+	}
+	if dLen <= len(dst) {
+		dst = dst[:dLen]
+	} else {
+		dst = make([]byte, dLen)
+	}
+	switch decode(dst, src[s:]) {
+	case 0:
+		return dst, nil
+	case decodeErrCodeUnsupportedLiteralLength:
+		return nil, errUnsupportedLiteralLength
+	}
+	return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+	return &Reader{
+		r:       r,
+		decoded: make([]byte, maxBlockSize),
+		buf:     make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+	}
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+	r       io.Reader
+	err     error
+	decoded []byte
+	buf     []byte
+	// decoded[i:j] contains decoded bytes that have not yet been passed on.
+	i, j       int
+	readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+	r.r = reader
+	r.err = nil
+	r.i = 0
+	r.j = 0
+	r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+	if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+		if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+			r.err = ErrCorrupt
+		}
+		return false
+	}
+	return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+	if r.err != nil {
+		return 0, r.err
+	}
+	for {
+		if r.i < r.j {
+			n := copy(p, r.decoded[r.i:r.j])
+			r.i += n
+			return n, nil
+		}
+		if !r.readFull(r.buf[:4], true) {
+			return 0, r.err
+		}
+		chunkType := r.buf[0]
+		if !r.readHeader {
+			if chunkType != chunkTypeStreamIdentifier {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.readHeader = true
+		}
+		chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+		if chunkLen > len(r.buf) {
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+
+		// The chunk types are specified at
+		// https://github.com/google/snappy/blob/master/framing_format.txt
+		switch chunkType {
+		case chunkTypeCompressedData:
+			// Section 4.2. Compressed data (chunk type 0x00).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:chunkLen]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			buf = buf[checksumSize:]
+
+			n, err := DecodedLen(buf)
+			if err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if _, err := Decode(r.decoded, buf); err != nil {
+				r.err = err
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeUncompressedData:
+			// Section 4.3. Uncompressed data (chunk type 0x01).
+			if chunkLen < checksumSize {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			buf := r.buf[:checksumSize]
+			if !r.readFull(buf, false) {
+				return 0, r.err
+			}
+			checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+			// Read directly into r.decoded instead of via r.buf.
+			n := chunkLen - checksumSize
+			if n > len(r.decoded) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.decoded[:n], false) {
+				return 0, r.err
+			}
+			if crc(r.decoded[:n]) != checksum {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			r.i, r.j = 0, n
+			continue
+
+		case chunkTypeStreamIdentifier:
+			// Section 4.1. Stream identifier (chunk type 0xff).
+			if chunkLen != len(magicBody) {
+				r.err = ErrCorrupt
+				return 0, r.err
+			}
+			if !r.readFull(r.buf[:len(magicBody)], false) {
+				return 0, r.err
+			}
+			for i := 0; i < len(magicBody); i++ {
+				if r.buf[i] != magicBody[i] {
+					r.err = ErrCorrupt
+					return 0, r.err
+				}
+			}
+			continue
+		}
+
+		if chunkType <= 0x7f {
+			// Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+			r.err = ErrUnsupported
+			return 0, r.err
+		}
+		// Section 4.4 Padding (chunk type 0xfe).
+		// Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+		if !r.readFull(r.buf[:chunkLen], false) {
+			return 0, r.err
+		}
+	}
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+//	- AX	scratch
+//	- BX	scratch
+//	- CX	length or x
+//	- DX	offset
+//	- SI	&src[s]
+//	- DI	&dst[d]
+//	+ R8	dst_base
+//	+ R9	dst_len
+//	+ R10	dst_base + dst_len
+//	+ R11	src_base
+//	+ R12	src_len
+//	+ R13	src_base + src_len
+//	- R14	used by doCopy
+//	- R15	used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8,  and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+	// Initialize SI, DI and R8-R13.
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, DI
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, SI
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+loop:
+	// for s < len(src)
+	CMPQ SI, R13
+	JEQ  end
+
+	// CX = uint32(src[s])
+	//
+	// switch src[s] & 0x03
+	MOVBLZX (SI), CX
+	MOVL    CX, BX
+	ANDL    $3, BX
+	CMPL    BX, $1
+	JAE     tagCopy
+
+	// ----------------------------------------
+	// The code below handles literal tags.
+
+	// case tagLiteral:
+	// x := uint32(src[s] >> 2)
+	// switch
+	SHRL $2, CX
+	CMPL CX, $60
+	JAE  tagLit60Plus
+
+	// case x < 60:
+	// s++
+	INCQ SI
+
+doLit:
+	// This is the end of the inner "switch", when we have a literal tag.
+	//
+	// We assume that CX == x and x fits in a uint32, where x is the variable
+	// used in the pure Go decode_other.go code.
+
+	// length = int(x) + 1
+	//
+	// Unlike the pure Go code, we don't need to check if length <= 0 because
+	// CX can hold 64 bits, so the increment cannot overflow.
+	INCQ CX
+
+	// Prepare to check if copying length bytes will run past the end of dst or
+	// src.
+	//
+	// AX = len(dst) - d
+	// BX = len(src) - s
+	MOVQ R10, AX
+	SUBQ DI, AX
+	MOVQ R13, BX
+	SUBQ SI, BX
+
+	// !!! Try a faster technique for short (16 or fewer bytes) copies.
+	//
+	// if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+	//   goto callMemmove // Fall back on calling runtime·memmove.
+	// }
+	//
+	// The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+	// against 21 instead of 16, because it cannot assume that all of its input
+	// is contiguous in memory and so it needs to leave enough source bytes to
+	// read the next tag without refilling buffers, but Go's Decode assumes
+	// contiguousness (the src argument is a []byte).
+	CMPQ CX, $16
+	JGT  callMemmove
+	CMPQ AX, $16
+	JLT  callMemmove
+	CMPQ BX, $16
+	JLT  callMemmove
+
+	// !!! Implement the copy from src to dst as a 16-byte load and store.
+	// (Decode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only length bytes, but that's
+	// OK. If the input is a valid Snappy encoding then subsequent iterations
+	// will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+	// non-nil error), so the overrun will be ignored.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(SI), X0
+	MOVOU X0, 0(DI)
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+callMemmove:
+	// if length > len(dst)-d || length > len(src)-s { etc }
+	CMPQ CX, AX
+	JGT  errCorrupt
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// copy(dst[d:], src[s:s+length])
+	//
+	// This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+	// DI, SI and CX as arguments. Coincidentally, we also need to spill those
+	// three registers to the stack, to save local variables across the CALL.
+	MOVQ DI, 0(SP)
+	MOVQ SI, 8(SP)
+	MOVQ CX, 16(SP)
+	MOVQ DI, 24(SP)
+	MOVQ SI, 32(SP)
+	MOVQ CX, 40(SP)
+	CALL runtime·memmove(SB)
+
+	// Restore local variables: unspill registers from the stack and
+	// re-calculate R8-R13.
+	MOVQ 24(SP), DI
+	MOVQ 32(SP), SI
+	MOVQ 40(SP), CX
+	MOVQ dst_base+0(FP), R8
+	MOVQ dst_len+8(FP), R9
+	MOVQ R8, R10
+	ADDQ R9, R10
+	MOVQ src_base+24(FP), R11
+	MOVQ src_len+32(FP), R12
+	MOVQ R11, R13
+	ADDQ R12, R13
+
+	// d += length
+	// s += length
+	ADDQ CX, DI
+	ADDQ CX, SI
+	JMP  loop
+
+tagLit60Plus:
+	// !!! This fragment does the
+	//
+	// s += x - 58; if uint(s) > uint(len(src)) { etc }
+	//
+	// checks. In the asm version, we code it once instead of once per switch case.
+	ADDQ CX, SI
+	SUBQ $58, SI
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// case x == 60:
+	CMPL CX, $61
+	JEQ  tagLit61
+	JA   tagLit62Plus
+
+	// x = uint32(src[s-1])
+	MOVBLZX -1(SI), CX
+	JMP     doLit
+
+tagLit61:
+	// case x == 61:
+	// x = uint32(src[s-2]) | uint32(src[s-1])<<8
+	MOVWLZX -2(SI), CX
+	JMP     doLit
+
+tagLit62Plus:
+	CMPL CX, $62
+	JA   tagLit63
+
+	// case x == 62:
+	// x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+	MOVWLZX -3(SI), CX
+	MOVBLZX -1(SI), BX
+	SHLL    $16, BX
+	ORL     BX, CX
+	JMP     doLit
+
+tagLit63:
+	// case x == 63:
+	// x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+	MOVL -4(SI), CX
+	JMP  doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+	// case tagCopy4:
+	// s += 5
+	ADDQ $5, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-5])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+	MOVLQZX -4(SI), DX
+	JMP     doCopy
+
+tagCopy2:
+	// case tagCopy2:
+	// s += 3
+	ADDQ $3, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// length = 1 + int(src[s-3])>>2
+	SHRQ $2, CX
+	INCQ CX
+
+	// offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+	MOVWQZX -2(SI), DX
+	JMP     doCopy
+
+tagCopy:
+	// We have a copy tag. We assume that:
+	//	- BX == src[s] & 0x03
+	//	- CX == src[s]
+	CMPQ BX, $2
+	JEQ  tagCopy2
+	JA   tagCopy4
+
+	// case tagCopy1:
+	// s += 2
+	ADDQ $2, SI
+
+	// if uint(s) > uint(len(src)) { etc }
+	MOVQ SI, BX
+	SUBQ R11, BX
+	CMPQ BX, R12
+	JA   errCorrupt
+
+	// offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+	MOVQ    CX, DX
+	ANDQ    $0xe0, DX
+	SHLQ    $3, DX
+	MOVBQZX -1(SI), BX
+	ORQ     BX, DX
+
+	// length = 4 + int(src[s-2])>>2&0x7
+	SHRQ $2, CX
+	ANDQ $7, CX
+	ADDQ $4, CX
+
+doCopy:
+	// This is the end of the outer "switch", when we have a copy tag.
+	//
+	// We assume that:
+	//	- CX == length && CX > 0
+	//	- DX == offset
+
+	// if offset <= 0 { etc }
+	CMPQ DX, $0
+	JLE  errCorrupt
+
+	// if d < offset { etc }
+	MOVQ DI, BX
+	SUBQ R8, BX
+	CMPQ BX, DX
+	JLT  errCorrupt
+
+	// if length > len(dst)-d { etc }
+	MOVQ R10, BX
+	SUBQ DI, BX
+	CMPQ CX, BX
+	JGT  errCorrupt
+
+	// forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+	//
+	// Set:
+	//	- R14 = len(dst)-d
+	//	- R15 = &dst[d-offset]
+	MOVQ R10, R14
+	SUBQ DI, R14
+	MOVQ DI, R15
+	SUBQ DX, R15
+
+	// !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+	//
+	// First, try using two 8-byte load/stores, similar to the doLit technique
+	// above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+	// still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+	// and not one 16-byte load/store, and the first store has to be before the
+	// second load, due to the overlap if offset is in the range [8, 16).
+	//
+	// if length > 16 || offset < 8 || len(dst)-d < 16 {
+	//   goto slowForwardCopy
+	// }
+	// copy 16 bytes
+	// d += length
+	CMPQ CX, $16
+	JGT  slowForwardCopy
+	CMPQ DX, $8
+	JLT  slowForwardCopy
+	CMPQ R14, $16
+	JLT  slowForwardCopy
+	MOVQ 0(R15), AX
+	MOVQ AX, 0(DI)
+	MOVQ 8(R15), BX
+	MOVQ BX, 8(DI)
+	ADDQ CX, DI
+	JMP  loop
+
+slowForwardCopy:
+	// !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+	// can still try 8-byte load stores, provided we can overrun up to 10 extra
+	// bytes. As above, the overrun will be fixed up by subsequent iterations
+	// of the outermost loop.
+	//
+	// The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+	// commentary says:
+	//
+	// ----
+	//
+	// The main part of this loop is a simple copy of eight bytes at a time
+	// until we've copied (at least) the requested amount of bytes.  However,
+	// if d and d-offset are less than eight bytes apart (indicating a
+	// repeating pattern of length < 8), we first need to expand the pattern in
+	// order to get the correct results. For instance, if the buffer looks like
+	// this, with the eight-byte <d-offset> and <d> patterns marked as
+	// intervals:
+	//
+	//    abxxxxxxxxxxxx
+	//    [------]           d-offset
+	//      [------]         d
+	//
+	// a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+	// once, after which we can move <d> two bytes without moving <d-offset>:
+	//
+	//    ababxxxxxxxxxx
+	//    [------]           d-offset
+	//        [------]       d
+	//
+	// and repeat the exercise until the two no longer overlap.
+	//
+	// This allows us to do very well in the special case of one single byte
+	// repeated many times, without taking a big hit for more general cases.
+	//
+	// The worst case of extra writing past the end of the match occurs when
+	// offset == 1 and length == 1; the last copy will read from byte positions
+	// [0..7] and write to [4..11], whereas it was only supposed to write to
+	// position 1. Thus, ten excess bytes.
+	//
+	// ----
+	//
+	// That "10 byte overrun" worst case is confirmed by Go's
+	// TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+	// and finishSlowForwardCopy algorithm.
+	//
+	// if length > len(dst)-d-10 {
+	//   goto verySlowForwardCopy
+	// }
+	SUBQ $10, R14
+	CMPQ CX, R14
+	JGT  verySlowForwardCopy
+
+makeOffsetAtLeast8:
+	// !!! As above, expand the pattern so that offset >= 8 and we can use
+	// 8-byte load/stores.
+	//
+	// for offset < 8 {
+	//   copy 8 bytes from dst[d-offset:] to dst[d:]
+	//   length -= offset
+	//   d      += offset
+	//   offset += offset
+	//   // The two previous lines together means that d-offset, and therefore
+	//   // R15, is unchanged.
+	// }
+	CMPQ DX, $8
+	JGE  fixUpSlowForwardCopy
+	MOVQ (R15), BX
+	MOVQ BX, (DI)
+	SUBQ DX, CX
+	ADDQ DX, DI
+	ADDQ DX, DX
+	JMP  makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+	// !!! Add length (which might be negative now) to d (implied by DI being
+	// &dst[d]) so that d ends up at the right place when we jump back to the
+	// top of the loop. Before we do that, though, we save DI to AX so that, if
+	// length is positive, copying the remaining length bytes will write to the
+	// right place.
+	MOVQ DI, AX
+	ADDQ CX, DI
+
+finishSlowForwardCopy:
+	// !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+	// length means that we overrun, but as above, that will be fixed up by
+	// subsequent iterations of the outermost loop.
+	CMPQ CX, $0
+	JLE  loop
+	MOVQ (R15), BX
+	MOVQ BX, (AX)
+	ADDQ $8, R15
+	ADDQ $8, AX
+	SUBQ $8, CX
+	JMP  finishSlowForwardCopy
+
+verySlowForwardCopy:
+	// verySlowForwardCopy is a simple implementation of forward copy. In C
+	// parlance, this is a do/while loop instead of a while loop, since we know
+	// that length > 0. In Go syntax:
+	//
+	// for {
+	//   dst[d] = dst[d - offset]
+	//   d++
+	//   length--
+	//   if length == 0 {
+	//     break
+	//   }
+	// }
+	MOVB (R15), BX
+	MOVB BX, (DI)
+	INCQ R15
+	INCQ DI
+	DECQ CX
+	JNZ  verySlowForwardCopy
+	JMP  loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+	// This is the end of the "for s < len(src)".
+	//
+	// if d != len(dst) { etc }
+	CMPQ DI, R10
+	JNE  errCorrupt
+
+	// return 0
+	MOVQ $0, ret+48(FP)
+	RET
+
+errCorrupt:
+	// return decodeErrCodeCorrupt
+	MOVQ $1, ret+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go
new file mode 100644
index 0000000..8c9f204
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_other.go
@@ -0,0 +1,101 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+// decode writes the decoding of src to dst. It assumes that the varint-encoded
+// length of the decompressed bytes has already been read, and that len(dst)
+// equals that length.
+//
+// It returns 0 on success or a decodeErrCodeXxx error code on failure.
+func decode(dst, src []byte) int {
+	var d, s, offset, length int
+	for s < len(src) {
+		switch src[s] & 0x03 {
+		case tagLiteral:
+			x := uint32(src[s] >> 2)
+			switch {
+			case x < 60:
+				s++
+			case x == 60:
+				s += 2
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-1])
+			case x == 61:
+				s += 3
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-2]) | uint32(src[s-1])<<8
+			case x == 62:
+				s += 4
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+			case x == 63:
+				s += 5
+				if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+					return decodeErrCodeCorrupt
+				}
+				x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+			}
+			length = int(x) + 1
+			if length <= 0 {
+				return decodeErrCodeUnsupportedLiteralLength
+			}
+			if length > len(dst)-d || length > len(src)-s {
+				return decodeErrCodeCorrupt
+			}
+			copy(dst[d:], src[s:s+length])
+			d += length
+			s += length
+			continue
+
+		case tagCopy1:
+			s += 2
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 4 + int(src[s-2])>>2&0x7
+			offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+
+		case tagCopy2:
+			s += 3
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-3])>>2
+			offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+
+		case tagCopy4:
+			s += 5
+			if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line.
+				return decodeErrCodeCorrupt
+			}
+			length = 1 + int(src[s-5])>>2
+			offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+		}
+
+		if offset <= 0 || d < offset || length > len(dst)-d {
+			return decodeErrCodeCorrupt
+		}
+		// Copy from an earlier sub-slice of dst to a later sub-slice. Unlike
+		// the built-in copy function, this byte-by-byte copy always runs
+		// forwards, even if the slices overlap. Conceptually, this is:
+		//
+		// d += forwardCopy(dst[d:d+length], dst[d-offset:])
+		for end := d + length; d != end; d++ {
+			dst[d] = dst[d-offset]
+		}
+	}
+	if d != len(dst) {
+		return decodeErrCodeCorrupt
+	}
+	return 0
+}
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+	"encoding/binary"
+	"errors"
+	"io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+	if n := MaxEncodedLen(len(src)); n < 0 {
+		panic(ErrTooLarge)
+	} else if len(dst) < n {
+		dst = make([]byte, n)
+	}
+
+	// The block starts with the varint-encoded length of the decompressed bytes.
+	d := binary.PutUvarint(dst, uint64(len(src)))
+
+	for len(src) > 0 {
+		p := src
+		src = nil
+		if len(p) > maxBlockSize {
+			p, src = p[:maxBlockSize], p[maxBlockSize:]
+		}
+		if len(p) < minNonLiteralBlockSize {
+			d += emitLiteral(dst[d:], p)
+		} else {
+			d += encodeBlock(dst[d:], p)
+		}
+	}
+	return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+	n := uint64(srcLen)
+	if n > 0xffffffff {
+		return -1
+	}
+	// Compressed data can be defined as:
+	//    compressed := item* literal*
+	//    item       := literal* copy
+	//
+	// The trailing literal sequence has a space blowup of at most 62/60
+	// since a literal of length 60 needs one tag byte + one extra byte
+	// for length information.
+	//
+	// Item blowup is trickier to measure. Suppose the "copy" op copies
+	// 4 bytes of data. Because of a special check in the encoding code,
+	// we produce a 4-byte copy only if the offset is < 65536. Therefore
+	// the copy op takes 3 bytes to encode, and this type of item leads
+	// to at most the 62/60 blowup for representing literals.
+	//
+	// Suppose the "copy" op copies 5 bytes of data. If the offset is big
+	// enough, it will take 5 bytes to encode the copy op. Therefore the
+	// worst case here is a one-byte literal followed by a five-byte copy.
+	// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+	//
+	// This last factor dominates the blowup, so the final estimate is:
+	n = 32 + n + n/6
+	if n > 0xffffffff {
+		return -1
+	}
+	return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+	return &Writer{
+		w:    w,
+		ibuf: make([]byte, 0, maxBlockSize),
+		obuf: make([]byte, obufLen),
+	}
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+	w   io.Writer
+	err error
+
+	// ibuf is a buffer for the incoming (uncompressed) bytes.
+	//
+	// Its use is optional. For backwards compatibility, Writers created by the
+	// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+	// therefore do not need to be Flush'ed or Close'd.
+	ibuf []byte
+
+	// obuf is a buffer for the outgoing (compressed) bytes.
+	obuf []byte
+
+	// wroteStreamHeader is whether we have written the stream header.
+	wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+	w.w = writer
+	w.err = nil
+	if w.ibuf != nil {
+		w.ibuf = w.ibuf[:0]
+	}
+	w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+	if w.ibuf == nil {
+		// Do not buffer incoming bytes. This does not perform or compress well
+		// if the caller of Writer.Write writes many small slices. This
+		// behavior is therefore deprecated, but still supported for backwards
+		// compatibility with code that doesn't explicitly Flush or Close.
+		return w.write(p)
+	}
+
+	// The remainder of this method is based on bufio.Writer.Write from the
+	// standard library.
+
+	for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+		var n int
+		if len(w.ibuf) == 0 {
+			// Large write, empty buffer.
+			// Write directly from p to avoid copy.
+			n, _ = w.write(p)
+		} else {
+			n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+			w.ibuf = w.ibuf[:len(w.ibuf)+n]
+			w.Flush()
+		}
+		nRet += n
+		p = p[n:]
+	}
+	if w.err != nil {
+		return nRet, w.err
+	}
+	n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+	w.ibuf = w.ibuf[:len(w.ibuf)+n]
+	nRet += n
+	return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+	if w.err != nil {
+		return 0, w.err
+	}
+	for len(p) > 0 {
+		obufStart := len(magicChunk)
+		if !w.wroteStreamHeader {
+			w.wroteStreamHeader = true
+			copy(w.obuf, magicChunk)
+			obufStart = 0
+		}
+
+		var uncompressed []byte
+		if len(p) > maxBlockSize {
+			uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+		} else {
+			uncompressed, p = p, nil
+		}
+		checksum := crc(uncompressed)
+
+		// Compress the buffer, discarding the result if the improvement
+		// isn't at least 12.5%.
+		compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+		chunkType := uint8(chunkTypeCompressedData)
+		chunkLen := 4 + len(compressed)
+		obufEnd := obufHeaderLen + len(compressed)
+		if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+			chunkType = chunkTypeUncompressedData
+			chunkLen = 4 + len(uncompressed)
+			obufEnd = obufHeaderLen
+		}
+
+		// Fill in the per-chunk header that comes before the body.
+		w.obuf[len(magicChunk)+0] = chunkType
+		w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+		w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+		w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+		w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+		w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+		w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+		w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+		if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+			w.err = err
+			return nRet, err
+		}
+		if chunkType == chunkTypeUncompressedData {
+			if _, err := w.w.Write(uncompressed); err != nil {
+				w.err = err
+				return nRet, err
+			}
+		}
+		nRet += len(uncompressed)
+	}
+	return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+	if w.err != nil {
+		return w.err
+	}
+	if len(w.ibuf) == 0 {
+		return nil
+	}
+	w.write(w.ibuf)
+	w.ibuf = w.ibuf[:0]
+	return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+	w.Flush()
+	ret := w.err
+	if w.err == nil {
+		w.err = errClosed
+	}
+	return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+//	4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	len(lit)
+//	- BX	n
+//	- DX	return value
+//	- DI	&dst[i]
+//	- R10	&lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ lit_base+24(FP), R10
+	MOVQ lit_len+32(FP), AX
+	MOVQ AX, DX
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  oneByte
+	CMPL BX, $256
+	JLT  twoBytes
+
+threeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	ADDQ $3, DX
+	JMP  memmove
+
+twoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	ADDQ $2, DX
+	JMP  memmove
+
+oneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+	ADDQ $1, DX
+
+memmove:
+	MOVQ DX, ret+48(FP)
+
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	CALL runtime·memmove(SB)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- AX	length
+//	- SI	&dst[0]
+//	- DI	&dst[i]
+//	- R11	offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+	MOVQ dst_base+0(FP), DI
+	MOVQ DI, SI
+	MOVQ offset+24(FP), R11
+	MOVQ length+32(FP), AX
+
+loop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  step1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  loop0
+
+step1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  step2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+step2:
+	// if length >= 12 || offset >= 2048 { goto step3 }
+	CMPL AX, $12
+	JGE  step3
+	CMPL R11, $2048
+	JGE  step3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+step3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+	// Return the number of bytes written.
+	SUBQ SI, DI
+	MOVQ DI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+//	- DX	&src[0]
+//	- SI	&src[j]
+//	- R13	&src[len(src) - 8]
+//	- R14	&src[len(src)]
+//	- R15	&src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+	MOVQ src_base+0(FP), DX
+	MOVQ src_len+8(FP), R14
+	MOVQ i+24(FP), R15
+	MOVQ j+32(FP), SI
+	ADDQ DX, R14
+	ADDQ DX, R15
+	ADDQ DX, SI
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+cmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   cmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  bsf
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  cmp8
+
+bsf:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+cmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  extendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  extendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  cmp1
+
+extendMatchEnd:
+	// Convert from &src[ret] to ret.
+	SUBQ DX, SI
+	MOVQ SI, ret+40(FP)
+	RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+//	- AX	.	.
+//	- BX	.	.
+//	- CX	56	shift (note that amd64 shifts by non-immediates must use CX).
+//	- DX	64	&src[0], tableSize
+//	- SI	72	&src[s]
+//	- DI	80	&dst[d]
+//	- R9	88	sLimit
+//	- R10	.	&src[nextEmit]
+//	- R11	96	prevHash, currHash, nextHash, offset
+//	- R12	104	&src[base], skip
+//	- R13	.	&src[nextS], &src[len(src) - 8]
+//	- R14	.	len(src), bytesBetweenHashLookups, &src[len(src)], x
+//	- R15	112	candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+	MOVQ dst_base+0(FP), DI
+	MOVQ src_base+24(FP), SI
+	MOVQ src_len+32(FP), R14
+
+	// shift, tableSize := uint32(32-8), 1<<8
+	MOVQ $24, CX
+	MOVQ $256, DX
+
+calcShift:
+	// for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+	//	shift--
+	// }
+	CMPQ DX, $16384
+	JGE  varTable
+	CMPQ DX, R14
+	JGE  varTable
+	SUBQ $1, CX
+	SHLQ $1, DX
+	JMP  calcShift
+
+varTable:
+	// var table [maxTableSize]uint16
+	//
+	// In the asm code, unlike the Go code, we can zero-initialize only the
+	// first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+	// writes 16 bytes, so we can do only tableSize/8 writes instead of the
+	// 2048 writes that would zero-initialize all of table's 32768 bytes.
+	SHRQ $3, DX
+	LEAQ table-32768(SP), BX
+	PXOR X0, X0
+
+memclr:
+	MOVOU X0, 0(BX)
+	ADDQ  $16, BX
+	SUBQ  $1, DX
+	JNZ   memclr
+
+	// !!! DX = &src[0]
+	MOVQ SI, DX
+
+	// sLimit := len(src) - inputMargin
+	MOVQ R14, R9
+	SUBQ $15, R9
+
+	// !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+	// change for the rest of the function.
+	MOVQ CX, 56(SP)
+	MOVQ DX, 64(SP)
+	MOVQ R9, 88(SP)
+
+	// nextEmit := 0
+	MOVQ DX, R10
+
+	// s := 1
+	ADDQ $1, SI
+
+	// nextHash := hash(load32(src, s), shift)
+	MOVL  0(SI), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+outer:
+	// for { etc }
+
+	// skip := 32
+	MOVQ $32, R12
+
+	// nextS := s
+	MOVQ SI, R13
+
+	// candidate := 0
+	MOVQ $0, R15
+
+inner0:
+	// for { etc }
+
+	// s := nextS
+	MOVQ R13, SI
+
+	// bytesBetweenHashLookups := skip >> 5
+	MOVQ R12, R14
+	SHRQ $5, R14
+
+	// nextS = s + bytesBetweenHashLookups
+	ADDQ R14, R13
+
+	// skip += bytesBetweenHashLookups
+	ADDQ R14, R12
+
+	// if nextS > sLimit { goto emitRemainder }
+	MOVQ R13, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JA   emitRemainder
+
+	// candidate = int(table[nextHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[nextHash] = uint16(s)
+	MOVQ SI, AX
+	SUBQ DX, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// nextHash = hash(load32(src, nextS), shift)
+	MOVL  0(R13), R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// if load32(src, s) != load32(src, candidate) { continue } break
+	MOVL 0(SI), AX
+	MOVL (DX)(R15*1), BX
+	CMPL AX, BX
+	JNE  inner0
+
+fourByteMatch:
+	// As per the encode_other.go code:
+	//
+	// A 4-byte match has been found. We'll later see etc.
+
+	// !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+	// on inputMargin in encode.go.
+	MOVQ SI, AX
+	SUBQ R10, AX
+	CMPQ AX, $16
+	JLE  emitLiteralFastPath
+
+	// ----------------------------------------
+	// Begin inline of the emitLiteral call.
+	//
+	// d += emitLiteral(dst[d:], src[nextEmit:s])
+
+	MOVL AX, BX
+	SUBL $1, BX
+
+	CMPL BX, $60
+	JLT  inlineEmitLiteralOneByte
+	CMPL BX, $256
+	JLT  inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+	MOVB $0xf4, 0(DI)
+	MOVW BX, 1(DI)
+	ADDQ $3, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+	MOVB $0xf0, 0(DI)
+	MOVB BX, 1(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+	SHLB $2, BX
+	MOVB BX, 0(DI)
+	ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+	// Spill local variables (registers) onto the stack; call; unspill.
+	//
+	// copy(dst[i:], lit)
+	//
+	// This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+	// DI, R10 and AX as arguments.
+	MOVQ DI, 0(SP)
+	MOVQ R10, 8(SP)
+	MOVQ AX, 16(SP)
+	ADDQ AX, DI              // Finish the "d +=" part of "d += emitLiteral(etc)".
+	MOVQ SI, 72(SP)
+	MOVQ DI, 80(SP)
+	MOVQ R15, 112(SP)
+	CALL runtime·memmove(SB)
+	MOVQ 56(SP), CX
+	MOVQ 64(SP), DX
+	MOVQ 72(SP), SI
+	MOVQ 80(SP), DI
+	MOVQ 88(SP), R9
+	MOVQ 112(SP), R15
+	JMP  inner1
+
+inlineEmitLiteralEnd:
+	// End inline of the emitLiteral call.
+	// ----------------------------------------
+
+emitLiteralFastPath:
+	// !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+	MOVB AX, BX
+	SUBB $1, BX
+	SHLB $2, BX
+	MOVB BX, (DI)
+	ADDQ $1, DI
+
+	// !!! Implement the copy from lit to dst as a 16-byte load and store.
+	// (Encode's documentation says that dst and src must not overlap.)
+	//
+	// This always copies 16 bytes, instead of only len(lit) bytes, but that's
+	// OK. Subsequent iterations will fix up the overrun.
+	//
+	// Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+	// 16-byte loads and stores. This technique probably wouldn't be as
+	// effective on architectures that are fussier about alignment.
+	MOVOU 0(R10), X0
+	MOVOU X0, 0(DI)
+	ADDQ  AX, DI
+
+inner1:
+	// for { etc }
+
+	// base := s
+	MOVQ SI, R12
+
+	// !!! offset := base - candidate
+	MOVQ R12, R11
+	SUBQ R15, R11
+	SUBQ DX, R11
+
+	// ----------------------------------------
+	// Begin inline of the extendMatch call.
+	//
+	// s = extendMatch(src, candidate+4, s+4)
+
+	// !!! R14 = &src[len(src)]
+	MOVQ src_len+32(FP), R14
+	ADDQ DX, R14
+
+	// !!! R13 = &src[len(src) - 8]
+	MOVQ R14, R13
+	SUBQ $8, R13
+
+	// !!! R15 = &src[candidate + 4]
+	ADDQ $4, R15
+	ADDQ DX, R15
+
+	// !!! s += 4
+	ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+	// As long as we are 8 or more bytes before the end of src, we can load and
+	// compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+	CMPQ SI, R13
+	JA   inlineExtendMatchCmp1
+	MOVQ (R15), AX
+	MOVQ (SI), BX
+	CMPQ AX, BX
+	JNE  inlineExtendMatchBSF
+	ADDQ $8, R15
+	ADDQ $8, SI
+	JMP  inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+	// If those 8 bytes were not equal, XOR the two 8 byte values, and return
+	// the index of the first byte that differs. The BSF instruction finds the
+	// least significant 1 bit, the amd64 architecture is little-endian, and
+	// the shift by 3 converts a bit index to a byte index.
+	XORQ AX, BX
+	BSFQ BX, BX
+	SHRQ $3, BX
+	ADDQ BX, SI
+	JMP  inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+	// In src's tail, compare 1 byte at a time.
+	CMPQ SI, R14
+	JAE  inlineExtendMatchEnd
+	MOVB (R15), AX
+	MOVB (SI), BX
+	CMPB AX, BX
+	JNE  inlineExtendMatchEnd
+	ADDQ $1, R15
+	ADDQ $1, SI
+	JMP  inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+	// End inline of the extendMatch call.
+	// ----------------------------------------
+
+	// ----------------------------------------
+	// Begin inline of the emitCopy call.
+	//
+	// d += emitCopy(dst[d:], base-candidate, s-base)
+
+	// !!! length := s - base
+	MOVQ SI, AX
+	SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+	// for length >= 68 { etc }
+	CMPL AX, $68
+	JLT  inlineEmitCopyStep1
+
+	// Emit a length 64 copy, encoded as 3 bytes.
+	MOVB $0xfe, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $64, AX
+	JMP  inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+	// if length > 64 { etc }
+	CMPL AX, $64
+	JLE  inlineEmitCopyStep2
+
+	// Emit a length 60 copy, encoded as 3 bytes.
+	MOVB $0xee, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+	SUBL $60, AX
+
+inlineEmitCopyStep2:
+	// if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+	CMPL AX, $12
+	JGE  inlineEmitCopyStep3
+	CMPL R11, $2048
+	JGE  inlineEmitCopyStep3
+
+	// Emit the remaining copy, encoded as 2 bytes.
+	MOVB R11, 1(DI)
+	SHRL $8, R11
+	SHLB $5, R11
+	SUBB $4, AX
+	SHLB $2, AX
+	ORB  AX, R11
+	ORB  $1, R11
+	MOVB R11, 0(DI)
+	ADDQ $2, DI
+	JMP  inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+	// Emit the remaining copy, encoded as 3 bytes.
+	SUBL $1, AX
+	SHLB $2, AX
+	ORB  $2, AX
+	MOVB AX, 0(DI)
+	MOVW R11, 1(DI)
+	ADDQ $3, DI
+
+inlineEmitCopyEnd:
+	// End inline of the emitCopy call.
+	// ----------------------------------------
+
+	// nextEmit = s
+	MOVQ SI, R10
+
+	// if s >= sLimit { goto emitRemainder }
+	MOVQ SI, AX
+	SUBQ DX, AX
+	CMPQ AX, R9
+	JAE  emitRemainder
+
+	// As per the encode_other.go code:
+	//
+	// We could immediately etc.
+
+	// x := load64(src, s-1)
+	MOVQ -1(SI), R14
+
+	// prevHash := hash(uint32(x>>0), shift)
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// table[prevHash] = uint16(s-1)
+	MOVQ SI, AX
+	SUBQ DX, AX
+	SUBQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// currHash := hash(uint32(x>>8), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// candidate = int(table[currHash])
+	// XXX: MOVWQZX table-32768(SP)(R11*2), R15
+	// XXX: 4e 0f b7 7c 5c 78       movzwq 0x78(%rsp,%r11,2),%r15
+	BYTE $0x4e
+	BYTE $0x0f
+	BYTE $0xb7
+	BYTE $0x7c
+	BYTE $0x5c
+	BYTE $0x78
+
+	// table[currHash] = uint16(s)
+	ADDQ $1, AX
+
+	// XXX: MOVW AX, table-32768(SP)(R11*2)
+	// XXX: 66 42 89 44 5c 78       mov    %ax,0x78(%rsp,%r11,2)
+	BYTE $0x66
+	BYTE $0x42
+	BYTE $0x89
+	BYTE $0x44
+	BYTE $0x5c
+	BYTE $0x78
+
+	// if uint32(x>>8) == load32(src, candidate) { continue }
+	MOVL (DX)(R15*1), BX
+	CMPL R14, BX
+	JEQ  inner1
+
+	// nextHash = hash(uint32(x>>16), shift)
+	SHRQ  $8, R14
+	MOVL  R14, R11
+	IMULL $0x1e35a7bd, R11
+	SHRL  CX, R11
+
+	// s++
+	ADDQ $1, SI
+
+	// break out of the inner1 for loop, i.e. continue the outer loop.
+	JMP outer
+
+emitRemainder:
+	// if nextEmit < len(src) { etc }
+	MOVQ src_len+32(FP), AX
+	ADDQ DX, AX
+	CMPQ R10, AX
+	JEQ  encodeBlockEnd
+
+	// d += emitLiteral(dst[d:], src[nextEmit:])
+	//
+	// Push args.
+	MOVQ DI, 0(SP)
+	MOVQ $0, 8(SP)   // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ $0, 16(SP)  // Unnecessary, as the callee ignores it, but conservative.
+	MOVQ R10, 24(SP)
+	SUBQ R10, AX
+	MOVQ AX, 32(SP)
+	MOVQ AX, 40(SP)  // Unnecessary, as the callee ignores it, but conservative.
+
+	// Spill local variables (registers) onto the stack; call; unspill.
+	MOVQ DI, 80(SP)
+	CALL ·emitLiteral(SB)
+	MOVQ 80(SP), DI
+
+	// Finish the "d +=" part of "d += emitLiteral(etc)".
+	ADDQ 48(SP), DI
+
+encodeBlockEnd:
+	MOVQ dst_base+0(FP), AX
+	SUBQ AX, DI
+	MOVQ DI, d+48(FP)
+	RET
diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go
new file mode 100644
index 0000000..dbcae90
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_other.go
@@ -0,0 +1,238 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !amd64 appengine !gc noasm
+
+package snappy
+
+func load32(b []byte, i int) uint32 {
+	b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+}
+
+func load64(b []byte, i int) uint64 {
+	b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line.
+	return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
+		uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+}
+
+// emitLiteral writes a literal chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= len(lit) && len(lit) <= 65536
+func emitLiteral(dst, lit []byte) int {
+	i, n := 0, uint(len(lit)-1)
+	switch {
+	case n < 60:
+		dst[0] = uint8(n)<<2 | tagLiteral
+		i = 1
+	case n < 1<<8:
+		dst[0] = 60<<2 | tagLiteral
+		dst[1] = uint8(n)
+		i = 2
+	default:
+		dst[0] = 61<<2 | tagLiteral
+		dst[1] = uint8(n)
+		dst[2] = uint8(n >> 8)
+		i = 3
+	}
+	return i + copy(dst[i:], lit)
+}
+
+// emitCopy writes a copy chunk and returns the number of bytes written.
+//
+// It assumes that:
+//	dst is long enough to hold the encoded bytes
+//	1 <= offset && offset <= 65535
+//	4 <= length && length <= 65535
+func emitCopy(dst []byte, offset, length int) int {
+	i := 0
+	// The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The
+	// threshold for this loop is a little higher (at 68 = 64 + 4), and the
+	// length emitted down below is is a little lower (at 60 = 64 - 4), because
+	// it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed
+	// by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as
+	// a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as
+	// 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a
+	// tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an
+	// encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1.
+	for length >= 68 {
+		// Emit a length 64 copy, encoded as 3 bytes.
+		dst[i+0] = 63<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 64
+	}
+	if length > 64 {
+		// Emit a length 60 copy, encoded as 3 bytes.
+		dst[i+0] = 59<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		i += 3
+		length -= 60
+	}
+	if length >= 12 || offset >= 2048 {
+		// Emit the remaining copy, encoded as 3 bytes.
+		dst[i+0] = uint8(length-1)<<2 | tagCopy2
+		dst[i+1] = uint8(offset)
+		dst[i+2] = uint8(offset >> 8)
+		return i + 3
+	}
+	// Emit the remaining copy, encoded as 2 bytes.
+	dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1
+	dst[i+1] = uint8(offset)
+	return i + 2
+}
+
+// extendMatch returns the largest k such that k <= len(src) and that
+// src[i:i+k-j] and src[j:k] have the same contents.
+//
+// It assumes that:
+//	0 <= i && i < j && j <= len(src)
+func extendMatch(src []byte, i, j int) int {
+	for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
+	}
+	return j
+}
+
+func hash(u, shift uint32) uint32 {
+	return (u * 0x1e35a7bd) >> shift
+}
+
+// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
+// assumes that the varint-encoded length of the decompressed bytes has already
+// been written.
+//
+// It also assumes that:
+//	len(dst) >= MaxEncodedLen(len(src)) &&
+// 	minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
+func encodeBlock(dst, src []byte) (d int) {
+	// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
+	// The table element type is uint16, as s < sLimit and sLimit < len(src)
+	// and len(src) <= maxBlockSize and maxBlockSize == 65536.
+	const (
+		maxTableSize = 1 << 14
+		// tableMask is redundant, but helps the compiler eliminate bounds
+		// checks.
+		tableMask = maxTableSize - 1
+	)
+	shift := uint32(32 - 8)
+	for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+		shift--
+	}
+	// In Go, all array elements are zero-initialized, so there is no advantage
+	// to a smaller tableSize per se. However, it matches the C++ algorithm,
+	// and in the asm versions of this code, we can get away with zeroing only
+	// the first tableSize elements.
+	var table [maxTableSize]uint16
+
+	// sLimit is when to stop looking for offset/length copies. The inputMargin
+	// lets us use a fast path for emitLiteral in the main loop, while we are
+	// looking for copies.
+	sLimit := len(src) - inputMargin
+
+	// nextEmit is where in src the next emitLiteral should start from.
+	nextEmit := 0
+
+	// The encoded form must start with a literal, as there are no previous
+	// bytes to copy, so we start looking for hash matches at s == 1.
+	s := 1
+	nextHash := hash(load32(src, s), shift)
+
+	for {
+		// Copied from the C++ snappy implementation:
+		//
+		// Heuristic match skipping: If 32 bytes are scanned with no matches
+		// found, start looking only at every other byte. If 32 more bytes are
+		// scanned (or skipped), look at every third byte, etc.. When a match
+		// is found, immediately go back to looking at every byte. This is a
+		// small loss (~5% performance, ~0.1% density) for compressible data
+		// due to more bookkeeping, but for non-compressible data (such as
+		// JPEG) it's a huge win since the compressor quickly "realizes" the
+		// data is incompressible and doesn't bother looking for matches
+		// everywhere.
+		//
+		// The "skip" variable keeps track of how many bytes there are since
+		// the last match; dividing it by 32 (ie. right-shifting by five) gives
+		// the number of bytes to move ahead for each iteration.
+		skip := 32
+
+		nextS := s
+		candidate := 0
+		for {
+			s = nextS
+			bytesBetweenHashLookups := skip >> 5
+			nextS = s + bytesBetweenHashLookups
+			skip += bytesBetweenHashLookups
+			if nextS > sLimit {
+				goto emitRemainder
+			}
+			candidate = int(table[nextHash&tableMask])
+			table[nextHash&tableMask] = uint16(s)
+			nextHash = hash(load32(src, nextS), shift)
+			if load32(src, s) == load32(src, candidate) {
+				break
+			}
+		}
+
+		// A 4-byte match has been found. We'll later see if more than 4 bytes
+		// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
+		// them as literal bytes.
+		d += emitLiteral(dst[d:], src[nextEmit:s])
+
+		// Call emitCopy, and then see if another emitCopy could be our next
+		// move. Repeat until we find no match for the input immediately after
+		// what was consumed by the last emitCopy call.
+		//
+		// If we exit this loop normally then we need to call emitLiteral next,
+		// though we don't yet know how big the literal will be. We handle that
+		// by proceeding to the next iteration of the main loop. We also can
+		// exit this loop via goto if we get close to exhausting the input.
+		for {
+			// Invariant: we have a 4-byte match at s, and no need to emit any
+			// literal bytes prior to s.
+			base := s
+
+			// Extend the 4-byte match as long as possible.
+			//
+			// This is an inlined version of:
+			//	s = extendMatch(src, candidate+4, s+4)
+			s += 4
+			for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 {
+			}
+
+			d += emitCopy(dst[d:], base-candidate, s-base)
+			nextEmit = s
+			if s >= sLimit {
+				goto emitRemainder
+			}
+
+			// We could immediately start working at s now, but to improve
+			// compression we first update the hash table at s-1 and at s. If
+			// another emitCopy is not our next move, also calculate nextHash
+			// at s+1. At least on GOARCH=amd64, these three hash calculations
+			// are faster as one load64 call (with some shifts) instead of
+			// three load32 calls.
+			x := load64(src, s-1)
+			prevHash := hash(uint32(x>>0), shift)
+			table[prevHash&tableMask] = uint16(s - 1)
+			currHash := hash(uint32(x>>8), shift)
+			candidate = int(table[currHash&tableMask])
+			table[currHash&tableMask] = uint16(s)
+			if uint32(x>>8) != load32(src, candidate) {
+				nextHash = hash(uint32(x>>16), shift)
+				s++
+				break
+			}
+		}
+	}
+
+emitRemainder:
+	if nextEmit < len(src) {
+		d += emitLiteral(dst[d:], src[nextEmit:])
+	}
+	return d
+}
diff --git a/vendor/github.com/golang/snappy/go.mod b/vendor/github.com/golang/snappy/go.mod
new file mode 100644
index 0000000..f6406bb
--- /dev/null
+++ b/vendor/github.com/golang/snappy/go.mod
@@ -0,0 +1 @@
+module github.com/golang/snappy
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..ece692e
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,98 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the Snappy compression format. It aims for very
+// high speeds and reasonable compression.
+//
+// There are actually two Snappy formats: block and stream. They are related,
+// but different: trying to decompress block-compressed data as a Snappy stream
+// will fail, and vice versa. The block format is the Decode and Encode
+// functions and the stream format is the Reader and Writer types.
+//
+// The block format, the more common case, is used when the complete size (the
+// number of bytes) of the original data is known upfront, at the time
+// compression starts. The stream format, also known as the framing format, is
+// for when that isn't always true.
+//
+// The canonical, C++ implementation is at https://github.com/google/snappy and
+// it only implements the block format.
+package snappy // import "github.com/golang/snappy"
+
+import (
+	"hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+  - If m < 60, the next 1 + m bytes are literal bytes.
+  - Otherwise, let n be the little-endian unsigned integer denoted by the next
+    m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+  - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+    The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+    of the offset. The next byte is bits 0-7 of the offset.
+  - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+    The length is 1 + m. The offset is the little-endian unsigned integer
+    denoted by the next 2 bytes.
+  - For l == 3, this tag is a legacy format that is no longer issued by most
+    encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+    [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+    integer denoted by the next 4 bytes.
+*/
+const (
+	tagLiteral = 0x00
+	tagCopy1   = 0x01
+	tagCopy2   = 0x02
+	tagCopy4   = 0x03
+)
+
+const (
+	checksumSize    = 4
+	chunkHeaderSize = 4
+	magicChunk      = "\xff\x06\x00\x00" + magicBody
+	magicBody       = "sNaPpY"
+
+	// maxBlockSize is the maximum size of the input to encodeBlock. It is not
+	// part of the wire format per se, but some parts of the encoder assume
+	// that an offset fits into a uint16.
+	//
+	// Also, for the framing format (Writer type instead of Encode function),
+	// https://github.com/google/snappy/blob/master/framing_format.txt says
+	// that "the uncompressed data in a chunk must be no longer than 65536
+	// bytes".
+	maxBlockSize = 65536
+
+	// maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+	// hard coded to be a const instead of a variable, so that obufLen can also
+	// be a const. Their equivalence is confirmed by
+	// TestMaxEncodedLenOfMaxBlockSize.
+	maxEncodedLenOfMaxBlockSize = 76490
+
+	obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+	obufLen       = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+	chunkTypeCompressedData   = 0x00
+	chunkTypeUncompressedData = 0x01
+	chunkTypePadding          = 0xfe
+	chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+	c := crc32.Update(0, crcTable, b)
+	return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
new file mode 100644
index 0000000..3645162
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
@@ -0,0 +1,27 @@
+Copyright (c) 2015, Gengo, Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+    * Redistributions of source code must retain the above copyright notice,
+      this list of conditions and the following disclaimer.
+
+    * Redistributions in binary form must reproduce the above copyright notice,
+      this list of conditions and the following disclaimer in the documentation
+      and/or other materials provided with the distribution.
+
+    * Neither the name of Gengo, Inc. nor the names of its
+      contributors may be used to endorse or promote products derived from this
+      software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
new file mode 100644
index 0000000..76cafe6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel
@@ -0,0 +1,22 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
+load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library")
+
+package(default_visibility = ["//visibility:public"])
+
+proto_library(
+    name = "internal_proto",
+    srcs = ["stream_chunk.proto"],
+    deps = ["@com_google_protobuf//:any_proto"],
+)
+
+go_proto_library(
+    name = "internal_go_proto",
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+    proto = ":internal_proto",
+)
+
+go_library(
+    name = "go_default_library",
+    embed = [":internal_go_proto"],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/internal",
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
new file mode 100644
index 0000000..8858f06
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.pb.go
@@ -0,0 +1,118 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: internal/stream_chunk.proto
+
+package internal
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+import any "github.com/golang/protobuf/ptypes/any"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+type StreamError struct {
+	GrpcCode             int32      `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"`
+	HttpCode             int32      `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"`
+	Message              string     `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"`
+	HttpStatus           string     `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"`
+	Details              []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *StreamError) Reset()         { *m = StreamError{} }
+func (m *StreamError) String() string { return proto.CompactTextString(m) }
+func (*StreamError) ProtoMessage()    {}
+func (*StreamError) Descriptor() ([]byte, []int) {
+	return fileDescriptor_stream_chunk_a2afb657504565d7, []int{0}
+}
+func (m *StreamError) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_StreamError.Unmarshal(m, b)
+}
+func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_StreamError.Marshal(b, m, deterministic)
+}
+func (dst *StreamError) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StreamError.Merge(dst, src)
+}
+func (m *StreamError) XXX_Size() int {
+	return xxx_messageInfo_StreamError.Size(m)
+}
+func (m *StreamError) XXX_DiscardUnknown() {
+	xxx_messageInfo_StreamError.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StreamError proto.InternalMessageInfo
+
+func (m *StreamError) GetGrpcCode() int32 {
+	if m != nil {
+		return m.GrpcCode
+	}
+	return 0
+}
+
+func (m *StreamError) GetHttpCode() int32 {
+	if m != nil {
+		return m.HttpCode
+	}
+	return 0
+}
+
+func (m *StreamError) GetMessage() string {
+	if m != nil {
+		return m.Message
+	}
+	return ""
+}
+
+func (m *StreamError) GetHttpStatus() string {
+	if m != nil {
+		return m.HttpStatus
+	}
+	return ""
+}
+
+func (m *StreamError) GetDetails() []*any.Any {
+	if m != nil {
+		return m.Details
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
+}
+
+func init() {
+	proto.RegisterFile("internal/stream_chunk.proto", fileDescriptor_stream_chunk_a2afb657504565d7)
+}
+
+var fileDescriptor_stream_chunk_a2afb657504565d7 = []byte{
+	// 223 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x90, 0x41, 0x4e, 0xc3, 0x30,
+	0x10, 0x45, 0x15, 0x4a, 0x69, 0x3b, 0xd9, 0x45, 0x5d, 0x18, 0xba, 0x20, 0x62, 0x95, 0x95, 0x23,
+	0xc1, 0x09, 0x00, 0x71, 0x81, 0x74, 0xc7, 0xa6, 0x9a, 0x26, 0x83, 0x13, 0x91, 0xd8, 0xd1, 0x78,
+	0x22, 0x94, 0x6b, 0x71, 0xc2, 0xca, 0x8e, 0xb2, 0xf4, 0x7b, 0x7f, 0xbe, 0xbe, 0x0c, 0xa7, 0xce,
+	0x0a, 0xb1, 0xc5, 0xbe, 0xf4, 0xc2, 0x84, 0xc3, 0xa5, 0x6e, 0x27, 0xfb, 0xab, 0x47, 0x76, 0xe2,
+	0xb2, 0xa3, 0xe1, 0xb1, 0xd6, 0x06, 0x85, 0xfe, 0x70, 0xd6, 0x3c, 0x59, 0xe9, 0x06, 0x7a, 0x7a,
+	0x34, 0xce, 0x99, 0x9e, 0xca, 0x98, 0xb9, 0x4e, 0x3f, 0x25, 0xda, 0x79, 0x39, 0x78, 0xf9, 0x4f,
+	0x20, 0x3d, 0xc7, 0x9e, 0x2f, 0x66, 0xc7, 0xd9, 0x09, 0x0e, 0xa1, 0xe2, 0x52, 0xbb, 0x86, 0x54,
+	0x92, 0x27, 0xc5, 0xb6, 0xda, 0x07, 0xf0, 0xe9, 0x1a, 0x0a, 0xb2, 0x15, 0x19, 0x17, 0x79, 0xb7,
+	0xc8, 0x00, 0xa2, 0x54, 0xb0, 0x1b, 0xc8, 0x7b, 0x34, 0xa4, 0x36, 0x79, 0x52, 0x1c, 0xaa, 0xf5,
+	0x99, 0x3d, 0x43, 0x1a, 0xcf, 0xbc, 0xa0, 0x4c, 0x5e, 0xdd, 0x47, 0x0b, 0x01, 0x9d, 0x23, 0xc9,
+	0x34, 0xec, 0x1a, 0x12, 0xec, 0x7a, 0xaf, 0xb6, 0xf9, 0xa6, 0x48, 0x5f, 0x8f, 0x7a, 0x59, 0xac,
+	0xd7, 0xc5, 0xfa, 0xdd, 0xce, 0xd5, 0x1a, 0xfa, 0x80, 0xef, 0xfd, 0xfa, 0x09, 0xd7, 0x87, 0x18,
+	0x79, 0xbb, 0x05, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x7d, 0xa5, 0x18, 0x17, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
new file mode 100644
index 0000000..55f42ce
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/stream_chunk.proto
@@ -0,0 +1,15 @@
+syntax = "proto3";
+package grpc.gateway.runtime;
+option go_package = "internal";
+
+import "google/protobuf/any.proto";
+
+// StreamError is a response type which is returned when
+// streaming rpc returns an error.
+message StreamError {
+	int32 grpc_code = 1;
+	int32 http_code = 2;
+	string message = 3;
+	string http_status = 4;
+	repeated google.protobuf.Any details = 5;
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
new file mode 100644
index 0000000..c99f83e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
@@ -0,0 +1,80 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "context.go",
+        "convert.go",
+        "doc.go",
+        "errors.go",
+        "fieldmask.go",
+        "handler.go",
+        "marshal_json.go",
+        "marshal_jsonpb.go",
+        "marshal_proto.go",
+        "marshaler.go",
+        "marshaler_registry.go",
+        "mux.go",
+        "pattern.go",
+        "proto2_convert.go",
+        "proto_errors.go",
+        "query.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
+    deps = [
+        "//internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//protoc-gen-go/generator:go_default_library_gen",
+        "@io_bazel_rules_go//proto/wkt:any_go_proto",
+        "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+        "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//grpclog:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = [
+        "context_test.go",
+        "errors_test.go",
+        "fieldmask_test.go",
+        "handler_test.go",
+        "marshal_json_test.go",
+        "marshal_jsonpb_test.go",
+        "marshal_proto_test.go",
+        "marshaler_registry_test.go",
+        "mux_test.go",
+        "pattern_test.go",
+        "query_test.go",
+    ],
+    embed = [":go_default_library"],
+    deps = [
+        "//examples/proto/examplepb:go_default_library",
+        "//internal:go_default_library",
+        "//utilities:go_default_library",
+        "@com_github_golang_protobuf//jsonpb:go_default_library_gen",
+        "@com_github_golang_protobuf//proto:go_default_library",
+        "@com_github_golang_protobuf//ptypes:go_default_library_gen",
+        "@go_googleapis//google/rpc:errdetails_go_proto",
+        "@io_bazel_rules_go//proto/wkt:duration_go_proto",
+        "@io_bazel_rules_go//proto/wkt:empty_go_proto",
+        "@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
+        "@io_bazel_rules_go//proto/wkt:struct_go_proto",
+        "@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
+        "@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
+        "@org_golang_google_grpc//:go_default_library",
+        "@org_golang_google_grpc//codes:go_default_library",
+        "@org_golang_google_grpc//metadata:go_default_library",
+        "@org_golang_google_grpc//status:go_default_library",
+    ],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
new file mode 100644
index 0000000..896057e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
@@ -0,0 +1,210 @@
+package runtime
+
+import (
+	"context"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"net/textproto"
+	"strconv"
+	"strings"
+	"time"
+
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// MetadataHeaderPrefix is the http prefix that represents custom metadata
+// parameters to or from a gRPC call.
+const MetadataHeaderPrefix = "Grpc-Metadata-"
+
+// MetadataPrefix is prepended to permanent HTTP header keys (as specified
+// by the IANA) when added to the gRPC context.
+const MetadataPrefix = "grpcgateway-"
+
+// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
+// HTTP headers in a response handled by grpc-gateway
+const MetadataTrailerPrefix = "Grpc-Trailer-"
+
+const metadataGrpcTimeout = "Grpc-Timeout"
+const metadataHeaderBinarySuffix = "-Bin"
+
+const xForwardedFor = "X-Forwarded-For"
+const xForwardedHost = "X-Forwarded-Host"
+
+var (
+	// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
+	// header isn't present. If the value is 0 the sent `context` will not have a timeout.
+	DefaultContextTimeout = 0 * time.Second
+)
+
+func decodeBinHeader(v string) ([]byte, error) {
+	if len(v)%4 == 0 {
+		// Input was padded, or padding was not necessary.
+		return base64.StdEncoding.DecodeString(v)
+	}
+	return base64.RawStdEncoding.DecodeString(v)
+}
+
+/*
+AnnotateContext adds context information such as metadata from the request.
+
+At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
+except that the forwarded destination is not another HTTP service but rather
+a gRPC service.
+*/
+func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
+	var pairs []string
+	timeout := DefaultContextTimeout
+	if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
+		var err error
+		timeout, err = timeoutDecode(tm)
+		if err != nil {
+			return nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
+		}
+	}
+
+	for key, vals := range req.Header {
+		for _, val := range vals {
+			key = textproto.CanonicalMIMEHeaderKey(key)
+			// For backwards-compatibility, pass through 'authorization' header with no prefix.
+			if key == "Authorization" {
+				pairs = append(pairs, "authorization", val)
+			}
+			if h, ok := mux.incomingHeaderMatcher(key); ok {
+				// Handles "-bin" metadata in grpc, since grpc will do another base64
+				// encode before sending to server, we need to decode it first.
+				if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
+					b, err := decodeBinHeader(val)
+					if err != nil {
+						return nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
+					}
+
+					val = string(b)
+				}
+				pairs = append(pairs, h, val)
+			}
+		}
+	}
+	if host := req.Header.Get(xForwardedHost); host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), host)
+	} else if req.Host != "" {
+		pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
+	}
+
+	if addr := req.RemoteAddr; addr != "" {
+		if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
+			if fwd := req.Header.Get(xForwardedFor); fwd == "" {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
+			} else {
+				pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
+			}
+		} else {
+			grpclog.Infof("invalid remote addr: %s", addr)
+		}
+	}
+
+	if timeout != 0 {
+		ctx, _ = context.WithTimeout(ctx, timeout)
+	}
+	if len(pairs) == 0 {
+		return ctx, nil
+	}
+	md := metadata.Pairs(pairs...)
+	for _, mda := range mux.metadataAnnotators {
+		md = metadata.Join(md, mda(ctx, req))
+	}
+	return metadata.NewOutgoingContext(ctx, md), nil
+}
+
+// ServerMetadata consists of metadata sent from gRPC server.
+type ServerMetadata struct {
+	HeaderMD  metadata.MD
+	TrailerMD metadata.MD
+}
+
+type serverMetadataKey struct{}
+
+// NewServerMetadataContext creates a new context with ServerMetadata
+func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
+	return context.WithValue(ctx, serverMetadataKey{}, md)
+}
+
+// ServerMetadataFromContext returns the ServerMetadata in ctx
+func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
+	md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
+	return
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+	size := len(s)
+	if size < 2 {
+		return 0, fmt.Errorf("timeout string is too short: %q", s)
+	}
+	d, ok := timeoutUnitToDuration(s[size-1])
+	if !ok {
+		return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
+	}
+	t, err := strconv.ParseInt(s[:size-1], 10, 64)
+	if err != nil {
+		return 0, err
+	}
+	return d * time.Duration(t), nil
+}
+
+func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
+	switch u {
+	case 'H':
+		return time.Hour, true
+	case 'M':
+		return time.Minute, true
+	case 'S':
+		return time.Second, true
+	case 'm':
+		return time.Millisecond, true
+	case 'u':
+		return time.Microsecond, true
+	case 'n':
+		return time.Nanosecond, true
+	default:
+	}
+	return
+}
+
+// isPermanentHTTPHeader checks whether hdr belongs to the list of
+// permenant request headers maintained by IANA.
+// http://www.iana.org/assignments/message-headers/message-headers.xml
+func isPermanentHTTPHeader(hdr string) bool {
+	switch hdr {
+	case
+		"Accept",
+		"Accept-Charset",
+		"Accept-Language",
+		"Accept-Ranges",
+		"Authorization",
+		"Cache-Control",
+		"Content-Type",
+		"Cookie",
+		"Date",
+		"Expect",
+		"From",
+		"Host",
+		"If-Match",
+		"If-Modified-Since",
+		"If-None-Match",
+		"If-Schedule-Tag-Match",
+		"If-Unmodified-Since",
+		"Max-Forwards",
+		"Origin",
+		"Pragma",
+		"Referer",
+		"User-Agent",
+		"Via",
+		"Warning":
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
new file mode 100644
index 0000000..a5b3bd6
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
@@ -0,0 +1,312 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strconv"
+	"strings"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/ptypes/duration"
+	"github.com/golang/protobuf/ptypes/timestamp"
+	"github.com/golang/protobuf/ptypes/wrappers"
+)
+
+// String just returns the given string.
+// It is just for compatibility to other types.
+func String(val string) (string, error) {
+	return val, nil
+}
+
+// StringSlice converts 'val' where individual strings are separated by
+// 'sep' into a string slice.
+func StringSlice(val, sep string) ([]string, error) {
+	return strings.Split(val, sep), nil
+}
+
+// Bool converts the given string representation of a boolean value into bool.
+func Bool(val string) (bool, error) {
+	return strconv.ParseBool(val)
+}
+
+// BoolSlice converts 'val' where individual booleans are separated by
+// 'sep' into a bool slice.
+func BoolSlice(val, sep string) ([]bool, error) {
+	s := strings.Split(val, sep)
+	values := make([]bool, len(s))
+	for i, v := range s {
+		value, err := Bool(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float64 converts the given string representation into representation of a floating point number into float64.
+func Float64(val string) (float64, error) {
+	return strconv.ParseFloat(val, 64)
+}
+
+// Float64Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float64 slice.
+func Float64Slice(val, sep string) ([]float64, error) {
+	s := strings.Split(val, sep)
+	values := make([]float64, len(s))
+	for i, v := range s {
+		value, err := Float64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Float32 converts the given string representation of a floating point number into float32.
+func Float32(val string) (float32, error) {
+	f, err := strconv.ParseFloat(val, 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+// Float32Slice converts 'val' where individual floating point numbers are separated by
+// 'sep' into a float32 slice.
+func Float32Slice(val, sep string) ([]float32, error) {
+	s := strings.Split(val, sep)
+	values := make([]float32, len(s))
+	for i, v := range s {
+		value, err := Float32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int64 converts the given string representation of an integer into int64.
+func Int64(val string) (int64, error) {
+	return strconv.ParseInt(val, 0, 64)
+}
+
+// Int64Slice converts 'val' where individual integers are separated by
+// 'sep' into a int64 slice.
+func Int64Slice(val, sep string) ([]int64, error) {
+	s := strings.Split(val, sep)
+	values := make([]int64, len(s))
+	for i, v := range s {
+		value, err := Int64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Int32 converts the given string representation of an integer into int32.
+func Int32(val string) (int32, error) {
+	i, err := strconv.ParseInt(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return int32(i), nil
+}
+
+// Int32Slice converts 'val' where individual integers are separated by
+// 'sep' into a int32 slice.
+func Int32Slice(val, sep string) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Int32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint64 converts the given string representation of an integer into uint64.
+func Uint64(val string) (uint64, error) {
+	return strconv.ParseUint(val, 0, 64)
+}
+
+// Uint64Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint64 slice.
+func Uint64Slice(val, sep string) ([]uint64, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint64, len(s))
+	for i, v := range s {
+		value, err := Uint64(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Uint32 converts the given string representation of an integer into uint32.
+func Uint32(val string) (uint32, error) {
+	i, err := strconv.ParseUint(val, 0, 32)
+	if err != nil {
+		return 0, err
+	}
+	return uint32(i), nil
+}
+
+// Uint32Slice converts 'val' where individual integers are separated by
+// 'sep' into a uint32 slice.
+func Uint32Slice(val, sep string) ([]uint32, error) {
+	s := strings.Split(val, sep)
+	values := make([]uint32, len(s))
+	for i, v := range s {
+		value, err := Uint32(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Bytes converts the given string representation of a byte sequence into a slice of bytes
+// A bytes sequence is encoded in URL-safe base64 without padding
+func Bytes(val string) ([]byte, error) {
+	b, err := base64.StdEncoding.DecodeString(val)
+	if err != nil {
+		b, err = base64.URLEncoding.DecodeString(val)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return b, nil
+}
+
+// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
+// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
+func BytesSlice(val, sep string) ([][]byte, error) {
+	s := strings.Split(val, sep)
+	values := make([][]byte, len(s))
+	for i, v := range s {
+		value, err := Bytes(v)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
+func Timestamp(val string) (*timestamp.Timestamp, error) {
+	var r *timestamp.Timestamp
+	err := jsonpb.UnmarshalString(val, r)
+	return r, err
+}
+
+// Duration converts the given string into a timestamp.Duration.
+func Duration(val string) (*duration.Duration, error) {
+	var r *duration.Duration
+	err := jsonpb.UnmarshalString(val, r)
+	return r, err
+}
+
+// Enum converts the given string into an int32 that should be type casted into the
+// correct enum proto type.
+func Enum(val string, enumValMap map[string]int32) (int32, error) {
+	e, ok := enumValMap[val]
+	if ok {
+		return e, nil
+	}
+
+	i, err := Int32(val)
+	if err != nil {
+		return 0, fmt.Errorf("%s is not valid", val)
+	}
+	for _, v := range enumValMap {
+		if v == i {
+			return i, nil
+		}
+	}
+	return 0, fmt.Errorf("%s is not valid", val)
+}
+
+// EnumSlice converts 'val' where individual enums are separated by 'sep'
+// into a int32 slice. Each individual int32 should be type casted into the
+// correct enum proto type.
+func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
+	s := strings.Split(val, sep)
+	values := make([]int32, len(s))
+	for i, v := range s {
+		value, err := Enum(v, enumValMap)
+		if err != nil {
+			return values, err
+		}
+		values[i] = value
+	}
+	return values, nil
+}
+
+/*
+	Support fot google.protobuf.wrappers on top of primitive types
+*/
+
+// StringValue well-known type support as wrapper around string type
+func StringValue(val string) (*wrappers.StringValue, error) {
+	return &wrappers.StringValue{Value: val}, nil
+}
+
+// FloatValue well-known type support as wrapper around float32 type
+func FloatValue(val string) (*wrappers.FloatValue, error) {
+	parsedVal, err := Float32(val)
+	return &wrappers.FloatValue{Value: parsedVal}, err
+}
+
+// DoubleValue well-known type support as wrapper around float64 type
+func DoubleValue(val string) (*wrappers.DoubleValue, error) {
+	parsedVal, err := Float64(val)
+	return &wrappers.DoubleValue{Value: parsedVal}, err
+}
+
+// BoolValue well-known type support as wrapper around bool type
+func BoolValue(val string) (*wrappers.BoolValue, error) {
+	parsedVal, err := Bool(val)
+	return &wrappers.BoolValue{Value: parsedVal}, err
+}
+
+// Int32Value well-known type support as wrapper around int32 type
+func Int32Value(val string) (*wrappers.Int32Value, error) {
+	parsedVal, err := Int32(val)
+	return &wrappers.Int32Value{Value: parsedVal}, err
+}
+
+// UInt32Value well-known type support as wrapper around uint32 type
+func UInt32Value(val string) (*wrappers.UInt32Value, error) {
+	parsedVal, err := Uint32(val)
+	return &wrappers.UInt32Value{Value: parsedVal}, err
+}
+
+// Int64Value well-known type support as wrapper around int64 type
+func Int64Value(val string) (*wrappers.Int64Value, error) {
+	parsedVal, err := Int64(val)
+	return &wrappers.Int64Value{Value: parsedVal}, err
+}
+
+// UInt64Value well-known type support as wrapper around uint64 type
+func UInt64Value(val string) (*wrappers.UInt64Value, error) {
+	parsedVal, err := Uint64(val)
+	return &wrappers.UInt64Value{Value: parsedVal}, err
+}
+
+// BytesValue well-known type support as wrapper around bytes[] type
+func BytesValue(val string) (*wrappers.BytesValue, error) {
+	parsedVal, err := Bytes(val)
+	return &wrappers.BytesValue{Value: parsedVal}, err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
new file mode 100644
index 0000000..b6e5ddf
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
@@ -0,0 +1,5 @@
+/*
+Package runtime contains runtime helper functions used by
+servers which protoc-gen-grpc-gateway generates.
+*/
+package runtime
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
new file mode 100644
index 0000000..41d54ef
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
@@ -0,0 +1,145 @@
+package runtime
+
+import (
+	"context"
+	"io"
+	"net/http"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
+// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
+func HTTPStatusFromCode(code codes.Code) int {
+	switch code {
+	case codes.OK:
+		return http.StatusOK
+	case codes.Canceled:
+		return http.StatusRequestTimeout
+	case codes.Unknown:
+		return http.StatusInternalServerError
+	case codes.InvalidArgument:
+		return http.StatusBadRequest
+	case codes.DeadlineExceeded:
+		return http.StatusGatewayTimeout
+	case codes.NotFound:
+		return http.StatusNotFound
+	case codes.AlreadyExists:
+		return http.StatusConflict
+	case codes.PermissionDenied:
+		return http.StatusForbidden
+	case codes.Unauthenticated:
+		return http.StatusUnauthorized
+	case codes.ResourceExhausted:
+		return http.StatusTooManyRequests
+	case codes.FailedPrecondition:
+		return http.StatusPreconditionFailed
+	case codes.Aborted:
+		return http.StatusConflict
+	case codes.OutOfRange:
+		return http.StatusBadRequest
+	case codes.Unimplemented:
+		return http.StatusNotImplemented
+	case codes.Internal:
+		return http.StatusInternalServerError
+	case codes.Unavailable:
+		return http.StatusServiceUnavailable
+	case codes.DataLoss:
+		return http.StatusInternalServerError
+	}
+
+	grpclog.Infof("Unknown gRPC error code: %v", code)
+	return http.StatusInternalServerError
+}
+
+var (
+	// HTTPError replies to the request with the error.
+	// You can set a custom function to this variable to customize error format.
+	HTTPError = DefaultHTTPError
+	// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
+	OtherErrorHandler = DefaultOtherErrorHandler
+)
+
+type errorBody struct {
+	Error string `protobuf:"bytes,1,name=error" json:"error"`
+	// This is to make the error more compatible with users that expect errors to be Status objects:
+	// https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto
+	// It should be the exact same message as the Error field.
+	Message string     `protobuf:"bytes,1,name=message" json:"message"`
+	Code    int32      `protobuf:"varint,2,name=code" json:"code"`
+	Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"`
+}
+
+// Make this also conform to proto.Message for builtin JSONPb Marshaler
+func (e *errorBody) Reset()         { *e = errorBody{} }
+func (e *errorBody) String() string { return proto.CompactTextString(e) }
+func (*errorBody) ProtoMessage()    {}
+
+// DefaultHTTPError is the default implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a JSON object,
+// which contains a member whose key is "error" and whose value is err.Error().
+func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	const fallback = `{"error": "failed to marshal error message"}`
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	w.Header().Del("Trailer")
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		pb := s.Proto()
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	body := &errorBody{
+		Error:   s.Message(),
+		Message: s.Message(),
+		Code:    int32(s.Code()),
+		Details: s.Proto().GetDetails(),
+	}
+
+	buf, merr := marshaler.Marshal(body)
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
+// It simply writes a string representation of the given error into "w".
+func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
+	http.Error(w, msg, code)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
new file mode 100644
index 0000000..e1cf7a9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
@@ -0,0 +1,70 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+	"strings"
+
+	"github.com/golang/protobuf/protoc-gen-go/generator"
+	"google.golang.org/genproto/protobuf/field_mask"
+)
+
+// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
+func FieldMaskFromRequestBody(r io.Reader) (*field_mask.FieldMask, error) {
+	fm := &field_mask.FieldMask{}
+	var root interface{}
+	if err := json.NewDecoder(r).Decode(&root); err != nil {
+		if err == io.EOF {
+			return fm, nil
+		}
+		return nil, err
+	}
+
+	queue := []fieldMaskPathItem{{node: root}}
+	for len(queue) > 0 {
+		// dequeue an item
+		item := queue[0]
+		queue = queue[1:]
+
+		if m, ok := item.node.(map[string]interface{}); ok {
+			// if the item is an object, then enqueue all of its children
+			for k, v := range m {
+				queue = append(queue, fieldMaskPathItem{path: append(item.path, generator.CamelCase(k)), node: v})
+			}
+		} else if len(item.path) > 0 {
+			// otherwise, it's a leaf node so print its path
+			fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
+		}
+	}
+
+	return fm, nil
+}
+
+// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
+type fieldMaskPathItem struct {
+	// the list of prior fields leading up to node
+	path []string
+
+	// a generic decoded json object the current item to inspect for further path extraction
+	node interface{}
+}
+
+// CamelCaseFieldMask updates the given FieldMask by converting all of its paths to CamelCase, using the same heuristic
+// that's used for naming protobuf fields in Go.
+func CamelCaseFieldMask(mask *field_mask.FieldMask) {
+	if mask == nil || mask.Paths == nil {
+		return
+	}
+
+	var newPaths []string
+	for _, path := range mask.Paths {
+		lowerCasedParts := strings.Split(path, ".")
+		var camelCasedParts []string
+		for _, part := range lowerCasedParts {
+			camelCasedParts = append(camelCasedParts, generator.CamelCase(part))
+		}
+		newPaths = append(newPaths, strings.Join(camelCasedParts, "."))
+	}
+
+	mask.Paths = newPaths
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
new file mode 100644
index 0000000..1fc63f7
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
@@ -0,0 +1,215 @@
+package runtime
+
+import (
+	"fmt"
+	"io"
+	"net/http"
+	"net/textproto"
+
+	"context"
+	"github.com/golang/protobuf/proto"
+	"github.com/golang/protobuf/ptypes/any"
+	"github.com/grpc-ecosystem/grpc-gateway/internal"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ForwardResponseStream forwards the stream from gRPC server to REST client.
+func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	f, ok := w.(http.Flusher)
+	if !ok {
+		grpclog.Infof("Flush not supported in %T", w)
+		http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+		http.Error(w, "unexpected error", http.StatusInternalServerError)
+		return
+	}
+	handleForwardResponseServerMetadata(w, mux, md)
+
+	w.Header().Set("Transfer-Encoding", "chunked")
+	w.Header().Set("Content-Type", marshaler.ContentType())
+	if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	var delimiter []byte
+	if d, ok := marshaler.(Delimited); ok {
+		delimiter = d.Delimiter()
+	} else {
+		delimiter = []byte("\n")
+	}
+
+	var wroteHeader bool
+	for {
+		resp, err := recv()
+		if err == io.EOF {
+			return
+		}
+		if err != nil {
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+		if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+
+		buf, err := marshaler.Marshal(streamChunk(resp, nil))
+		if err != nil {
+			grpclog.Infof("Failed to marshal response chunk: %v", err)
+			handleForwardResponseStreamError(wroteHeader, marshaler, w, err)
+			return
+		}
+		if _, err = w.Write(buf); err != nil {
+			grpclog.Infof("Failed to send response chunk: %v", err)
+			return
+		}
+		wroteHeader = true
+		if _, err = w.Write(delimiter); err != nil {
+			grpclog.Infof("Failed to send delimiter chunk: %v", err)
+			return
+		}
+		f.Flush()
+	}
+}
+
+func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
+	for k, vs := range md.HeaderMD {
+		if h, ok := mux.outgoingHeaderMatcher(k); ok {
+			for _, v := range vs {
+				w.Header().Add(h, v)
+			}
+		}
+	}
+}
+
+func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
+	for k := range md.TrailerMD {
+		tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
+		w.Header().Add("Trailer", tKey)
+	}
+}
+
+func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
+	for k, vs := range md.TrailerMD {
+		tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
+		for _, v := range vs {
+			w.Header().Add(tKey, v)
+		}
+	}
+}
+
+// responseBody interface contains method for getting field for marshaling to the response body
+// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
+type responseBody interface {
+	XXX_ResponseBody() interface{}
+}
+
+// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
+func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on 
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(resp)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+	var buf []byte
+	var err error
+	if rb, ok := resp.(responseBody); ok {
+		buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
+	} else {
+		buf, err = marshaler.Marshal(resp)
+	}
+	if err != nil {
+		grpclog.Infof("Marshal error: %v", err)
+		HTTPError(ctx, mux, marshaler, w, req, err)
+		return
+	}
+
+	if _, err = w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
+
+func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
+	if len(opts) == 0 {
+		return nil
+	}
+	for _, opt := range opts {
+		if err := opt(ctx, w, resp); err != nil {
+			grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
+			return err
+		}
+	}
+	return nil
+}
+
+func handleForwardResponseStreamError(wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, err error) {
+	buf, merr := marshaler.Marshal(streamChunk(nil, err))
+	if merr != nil {
+		grpclog.Infof("Failed to marshal an error: %v", merr)
+		return
+	}
+	if !wroteHeader {
+		s, ok := status.FromError(err)
+		if !ok {
+			s = status.New(codes.Unknown, err.Error())
+		}
+		w.WriteHeader(HTTPStatusFromCode(s.Code()))
+	}
+	if _, werr := w.Write(buf); werr != nil {
+		grpclog.Infof("Failed to notify error to client: %v", werr)
+		return
+	}
+}
+
+func streamChunk(result proto.Message, err error) map[string]proto.Message {
+	if err != nil {
+		grpcCode := codes.Unknown
+		grpcMessage := err.Error()
+		var grpcDetails []*any.Any
+		if s, ok := status.FromError(err); ok {
+			grpcCode = s.Code()
+			grpcMessage = s.Message()
+			grpcDetails = s.Proto().GetDetails()
+		}
+		httpCode := HTTPStatusFromCode(grpcCode)
+		return map[string]proto.Message{
+			"error": &internal.StreamError{
+				GrpcCode:   int32(grpcCode),
+				HttpCode:   int32(httpCode),
+				Message:    grpcMessage,
+				HttpStatus: http.StatusText(httpCode),
+				Details:    grpcDetails,
+			},
+		}
+	}
+	if result == nil {
+		return streamChunk(nil, fmt.Errorf("empty response"))
+	}
+	return map[string]proto.Message{"result": result}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
new file mode 100644
index 0000000..f55285b
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
@@ -0,0 +1,43 @@
+package runtime
+
+import (
+	"google.golang.org/genproto/googleapis/api/httpbody"
+)
+
+// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
+func SetHTTPBodyMarshaler(serveMux *ServeMux) {
+	serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
+		Marshaler: &JSONPb{OrigName: true},
+	}
+}
+
+// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
+// google.api.HttpBody message as the full response body if it is
+// the actual message used as the response. If not, then this will
+// simply fallback to the Marshaler specified as its default Marshaler.
+type HTTPBodyMarshaler struct {
+	Marshaler
+}
+
+// ContentType implementation to keep backwards compatability with marshal interface
+func (h *HTTPBodyMarshaler) ContentType() string {
+	return h.ContentTypeFromMessage(nil)
+}
+
+// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
+// its specified content type otherwise fall back to the default Marshaler.
+func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.GetContentType()
+	}
+	return h.Marshaler.ContentType()
+}
+
+// Marshal marshals "v" by returning the body bytes if v is a
+// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
+func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
+	if httpBody, ok := v.(*httpbody.HttpBody); ok {
+		return httpBody.Data, nil
+	}
+	return h.Marshaler.Marshal(v)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
new file mode 100644
index 0000000..f9d3a58
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
@@ -0,0 +1,45 @@
+package runtime
+
+import (
+	"encoding/json"
+	"io"
+)
+
+// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
+// with the standard "encoding/json" package of Golang.
+// Although it is generally faster for simple proto messages than JSONPb,
+// it does not support advanced features of protobuf, e.g. map, oneof, ....
+//
+// The NewEncoder and NewDecoder types return *json.Encoder and
+// *json.Decoder respectively.
+type JSONBuiltin struct{}
+
+// ContentType always Returns "application/json".
+func (*JSONBuiltin) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON
+func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
+	return json.Marshal(v)
+}
+
+// Unmarshal unmarshals JSON data into "v".
+func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
+	return json.Unmarshal(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
+	return json.NewDecoder(r)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
+	return json.NewEncoder(w)
+}
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONBuiltin) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
new file mode 100644
index 0000000..3530ddd
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
@@ -0,0 +1,242 @@
+package runtime
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"reflect"
+
+	"github.com/golang/protobuf/jsonpb"
+	"github.com/golang/protobuf/proto"
+)
+
+// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
+// with the "github.com/golang/protobuf/jsonpb".
+// It supports fully functionality of protobuf unlike JSONBuiltin.
+//
+// The NewDecoder method returns a DecoderWrapper, so the underlying
+// *json.Decoder methods can be used.
+type JSONPb jsonpb.Marshaler
+
+// ContentType always returns "application/json".
+func (*JSONPb) ContentType() string {
+	return "application/json"
+}
+
+// Marshal marshals "v" into JSON.
+func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
+	if _, ok := v.(proto.Message); !ok {
+		return j.marshalNonProtoField(v)
+	}
+
+	var buf bytes.Buffer
+	if err := j.marshalTo(&buf, v); err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		buf, err := j.marshalNonProtoField(v)
+		if err != nil {
+			return err
+		}
+		_, err = w.Write(buf)
+		return err
+	}
+	return (*jsonpb.Marshaler)(j).Marshal(w, p)
+}
+
+var (
+	// protoMessageType is stored to prevent constant lookup of the same type at runtime.
+	protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+)
+
+// marshalNonProto marshals a non-message field of a protobuf message.
+// This function does not correctly marshals arbitrary data structure into JSON,
+// but it is only capable of marshaling non-message field values of protobuf,
+// i.e. primitive types, enums; pointers to primitives or enums; maps from
+// integer/string types to primitives/enums/pointers to messages.
+func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
+	if v == nil {
+		return []byte("null"), nil
+	}
+	rv := reflect.ValueOf(v)
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return []byte("null"), nil
+		}
+		rv = rv.Elem()
+	}
+
+	if rv.Kind() == reflect.Slice {
+		if rv.IsNil() {
+			if j.EmitDefaults {
+				return []byte("[]"), nil
+			}
+			return []byte("null"), nil
+		}
+
+		if rv.Type().Elem().Implements(protoMessageType) {
+			var buf bytes.Buffer
+			err := buf.WriteByte('[')
+			if err != nil {
+				return nil, err
+			}
+			for i := 0; i < rv.Len(); i++ {
+				if i != 0 {
+					err = buf.WriteByte(',')
+					if err != nil {
+						return nil, err
+					}
+				}
+				if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
+					return nil, err
+				}
+			}
+			err = buf.WriteByte(']')
+			if err != nil {
+				return nil, err
+			}
+
+			return buf.Bytes(), nil
+		}
+	}
+
+	if rv.Kind() == reflect.Map {
+		m := make(map[string]*json.RawMessage)
+		for _, k := range rv.MapKeys() {
+			buf, err := j.Marshal(rv.MapIndex(k).Interface())
+			if err != nil {
+				return nil, err
+			}
+			m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
+		}
+		if j.Indent != "" {
+			return json.MarshalIndent(m, "", j.Indent)
+		}
+		return json.Marshal(m)
+	}
+	if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
+		return json.Marshal(enum.String())
+	}
+	return json.Marshal(rv.Interface())
+}
+
+// Unmarshal unmarshals JSON "data" into "v"
+func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
+	return unmarshalJSONPb(data, v)
+}
+
+// NewDecoder returns a Decoder which reads JSON stream from "r".
+func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
+	d := json.NewDecoder(r)
+	return DecoderWrapper{Decoder: d}
+}
+
+// DecoderWrapper is a wrapper around a *json.Decoder that adds
+// support for protos to the Decode method.
+type DecoderWrapper struct {
+	*json.Decoder
+}
+
+// Decode wraps the embedded decoder's Decode method to support
+// protos using a jsonpb.Unmarshaler.
+func (d DecoderWrapper) Decode(v interface{}) error {
+	return decodeJSONPb(d.Decoder, v)
+}
+
+// NewEncoder returns an Encoder which writes JSON stream into "w".
+func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
+	return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
+}
+
+func unmarshalJSONPb(data []byte, v interface{}) error {
+	d := json.NewDecoder(bytes.NewReader(data))
+	return decodeJSONPb(d, v)
+}
+
+func decodeJSONPb(d *json.Decoder, v interface{}) error {
+	p, ok := v.(proto.Message)
+	if !ok {
+		return decodeNonProtoField(d, v)
+	}
+	unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+	return unmarshaler.UnmarshalNext(d, p)
+}
+
+func decodeNonProtoField(d *json.Decoder, v interface{}) error {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr {
+		return fmt.Errorf("%T is not a pointer", v)
+	}
+	for rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			rv.Set(reflect.New(rv.Type().Elem()))
+		}
+		if rv.Type().ConvertibleTo(typeProtoMessage) {
+			unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
+			return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
+		}
+		rv = rv.Elem()
+	}
+	if rv.Kind() == reflect.Map {
+		if rv.IsNil() {
+			rv.Set(reflect.MakeMap(rv.Type()))
+		}
+		conv, ok := convFromType[rv.Type().Key().Kind()]
+		if !ok {
+			return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
+		}
+
+		m := make(map[string]*json.RawMessage)
+		if err := d.Decode(&m); err != nil {
+			return err
+		}
+		for k, v := range m {
+			result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
+			if err := result[1].Interface(); err != nil {
+				return err.(error)
+			}
+			bk := result[0]
+			bv := reflect.New(rv.Type().Elem())
+			if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
+				return err
+			}
+			rv.SetMapIndex(bk, bv.Elem())
+		}
+		return nil
+	}
+	if _, ok := rv.Interface().(protoEnum); ok {
+		var repr interface{}
+		if err := d.Decode(&repr); err != nil {
+			return err
+		}
+		switch repr.(type) {
+		case string:
+			// TODO(yugui) Should use proto.StructProperties?
+			return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
+		case float64:
+			rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
+			return nil
+		default:
+			return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
+		}
+	}
+	return d.Decode(v)
+}
+
+type protoEnum interface {
+	fmt.Stringer
+	EnumDescriptor() ([]byte, []int)
+}
+
+var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
+
+// Delimiter for newline encoded JSON streams.
+func (j *JSONPb) Delimiter() []byte {
+	return []byte("\n")
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
new file mode 100644
index 0000000..f65d1a2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
@@ -0,0 +1,62 @@
+package runtime
+
+import (
+	"io"
+
+	"errors"
+	"github.com/golang/protobuf/proto"
+	"io/ioutil"
+)
+
+// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
+type ProtoMarshaller struct{}
+
+// ContentType always returns "application/octet-stream".
+func (*ProtoMarshaller) ContentType() string {
+	return "application/octet-stream"
+}
+
+// Marshal marshals "value" into Proto
+func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return nil, errors.New("unable to marshal non proto field")
+	}
+	return proto.Marshal(message)
+}
+
+// Unmarshal unmarshals proto "data" into "value"
+func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
+	message, ok := value.(proto.Message)
+	if !ok {
+		return errors.New("unable to unmarshal non proto field")
+	}
+	return proto.Unmarshal(data, message)
+}
+
+// NewDecoder returns a Decoder which reads proto stream from "reader".
+func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
+	return DecoderFunc(func(value interface{}) error {
+		buffer, err := ioutil.ReadAll(reader)
+		if err != nil {
+			return err
+		}
+		return marshaller.Unmarshal(buffer, value)
+	})
+}
+
+// NewEncoder returns an Encoder which writes proto stream into "writer".
+func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
+	return EncoderFunc(func(value interface{}) error {
+		buffer, err := marshaller.Marshal(value)
+		if err != nil {
+			return err
+		}
+		_, err = writer.Write(buffer)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
new file mode 100644
index 0000000..98fe6e8
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
@@ -0,0 +1,48 @@
+package runtime
+
+import (
+	"io"
+)
+
+// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
+type Marshaler interface {
+	// Marshal marshals "v" into byte sequence.
+	Marshal(v interface{}) ([]byte, error)
+	// Unmarshal unmarshals "data" into "v".
+	// "v" must be a pointer value.
+	Unmarshal(data []byte, v interface{}) error
+	// NewDecoder returns a Decoder which reads byte sequence from "r".
+	NewDecoder(r io.Reader) Decoder
+	// NewEncoder returns an Encoder which writes bytes sequence into "w".
+	NewEncoder(w io.Writer) Encoder
+	// ContentType returns the Content-Type which this marshaler is responsible for.
+	ContentType() string
+}
+
+// Decoder decodes a byte sequence
+type Decoder interface {
+	Decode(v interface{}) error
+}
+
+// Encoder encodes gRPC payloads / fields into byte sequence.
+type Encoder interface {
+	Encode(v interface{}) error
+}
+
+// DecoderFunc adapts an decoder function into Decoder.
+type DecoderFunc func(v interface{}) error
+
+// Decode delegates invocations to the underlying function itself.
+func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
+
+// EncoderFunc adapts an encoder function into Encoder
+type EncoderFunc func(v interface{}) error
+
+// Encode delegates invocations to the underlying function itself.
+func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
+
+// Delimited defines the streaming delimiter.
+type Delimited interface {
+	// Delimiter returns the record seperator for the stream.
+	Delimiter() []byte
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
new file mode 100644
index 0000000..5cc53ae
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
@@ -0,0 +1,91 @@
+package runtime
+
+import (
+	"errors"
+	"net/http"
+)
+
+// MIMEWildcard is the fallback MIME type used for requests which do not match
+// a registered MIME type.
+const MIMEWildcard = "*"
+
+var (
+	acceptHeader      = http.CanonicalHeaderKey("Accept")
+	contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
+
+	defaultMarshaler = &JSONPb{OrigName: true}
+)
+
+// MarshalerForRequest returns the inbound/outbound marshalers for this request.
+// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
+// If it isn't set (or the request Content-Type is empty), checks for "*".
+// If there are multiple Content-Type headers set, choose the first one that it can
+// exactly match in the registry.
+// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
+func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
+	for _, acceptVal := range r.Header[acceptHeader] {
+		if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
+			outbound = m
+			break
+		}
+	}
+
+	for _, contentTypeVal := range r.Header[contentTypeHeader] {
+		if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
+			inbound = m
+			break
+		}
+	}
+
+	if inbound == nil {
+		inbound = mux.marshalers.mimeMap[MIMEWildcard]
+	}
+	if outbound == nil {
+		outbound = inbound
+	}
+
+	return inbound, outbound
+}
+
+// marshalerRegistry is a mapping from MIME types to Marshalers.
+type marshalerRegistry struct {
+	mimeMap map[string]Marshaler
+}
+
+// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
+// MIME type).
+func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
+	if len(mime) == 0 {
+		return errors.New("empty MIME type")
+	}
+
+	m.mimeMap[mime] = marshaler
+
+	return nil
+}
+
+// makeMarshalerMIMERegistry returns a new registry of marshalers.
+// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
+//
+// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
+// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
+// with a "application/json" Content-Type.
+// "*" can be used to match any Content-Type.
+// This can be attached to a ServerMux with the marshaler option.
+func makeMarshalerMIMERegistry() marshalerRegistry {
+	return marshalerRegistry{
+		mimeMap: map[string]Marshaler{
+			MIMEWildcard: defaultMarshaler,
+		},
+	}
+}
+
+// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
+// Marshalers to a MIME type in mux.
+func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
+	return func(mux *ServeMux) {
+		if err := mux.marshalers.add(mime, marshaler); err != nil {
+			panic(err)
+		}
+	}
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
new file mode 100644
index 0000000..ec81e55
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
@@ -0,0 +1,268 @@
+package runtime
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"net/textproto"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/metadata"
+	"google.golang.org/grpc/status"
+)
+
+// A HandlerFunc handles a specific pair of path pattern and HTTP method.
+type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
+
+// ServeMux is a request multiplexer for grpc-gateway.
+// It matches http requests to patterns and invokes the corresponding handler.
+type ServeMux struct {
+	// handlers maps HTTP method to a list of handlers.
+	handlers                  map[string][]handler
+	forwardResponseOptions    []func(context.Context, http.ResponseWriter, proto.Message) error
+	marshalers                marshalerRegistry
+	incomingHeaderMatcher     HeaderMatcherFunc
+	outgoingHeaderMatcher     HeaderMatcherFunc
+	metadataAnnotators        []func(context.Context, *http.Request) metadata.MD
+	protoErrorHandler         ProtoErrorHandlerFunc
+	disablePathLengthFallback bool
+}
+
+// ServeMuxOption is an option that can be given to a ServeMux on construction.
+type ServeMuxOption func(*ServeMux)
+
+// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
+//
+// forwardResponseOption is an option that will be called on the relevant context.Context,
+// http.ResponseWriter, and proto.Message before every forwarded response.
+//
+// The message may be nil in the case where just a header is being sent.
+func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
+	}
+}
+
+// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
+type HeaderMatcherFunc func(string) (string, bool)
+
+// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
+// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
+// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
+func DefaultHeaderMatcher(key string) (string, bool) {
+	key = textproto.CanonicalMIMEHeaderKey(key)
+	if isPermanentHTTPHeader(key) {
+		return MetadataPrefix + key, true
+	} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
+		return key[len(MetadataHeaderPrefix):], true
+	}
+	return "", false
+}
+
+// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
+//
+// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
+// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
+func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.incomingHeaderMatcher = fn
+	}
+}
+
+// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
+//
+// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
+// passed to http response returned from gateway. To transform the header before passing to response,
+// matcher should return modified header.
+func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
+	return func(mux *ServeMux) {
+		mux.outgoingHeaderMatcher = fn
+	}
+}
+
+// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
+// is reading token from cookie and adding it in gRPC context.
+func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
+	}
+}
+
+// WithProtoErrorHandler returns a ServeMuxOption for passing metadata to a gRPC context.
+//
+// This can be used to handle an error as general proto message defined by gRPC.
+// The response including body and status is not backward compatible with the default error handler.
+// When this option is used, HTTPError and OtherErrorHandler are overwritten on initialization.
+func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.protoErrorHandler = fn
+	}
+}
+
+// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
+func WithDisablePathLengthFallback() ServeMuxOption {
+	return func(serveMux *ServeMux) {
+		serveMux.disablePathLengthFallback = true
+	}
+}
+
+// NewServeMux returns a new ServeMux whose internal mapping is empty.
+func NewServeMux(opts ...ServeMuxOption) *ServeMux {
+	serveMux := &ServeMux{
+		handlers:               make(map[string][]handler),
+		forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
+		marshalers:             makeMarshalerMIMERegistry(),
+	}
+
+	for _, opt := range opts {
+		opt(serveMux)
+	}
+
+	if serveMux.protoErrorHandler != nil {
+		HTTPError = serveMux.protoErrorHandler
+		// OtherErrorHandler is no longer used when protoErrorHandler is set.
+		// Overwritten by a special error handler to return Unknown.
+		OtherErrorHandler = func(w http.ResponseWriter, r *http.Request, _ string, _ int) {
+			ctx := context.Background()
+			_, outboundMarshaler := MarshalerForRequest(serveMux, r)
+			sterr := status.Error(codes.Unknown, "unexpected use of OtherErrorHandler")
+			serveMux.protoErrorHandler(ctx, serveMux, outboundMarshaler, w, r, sterr)
+		}
+	}
+
+	if serveMux.incomingHeaderMatcher == nil {
+		serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
+	}
+
+	if serveMux.outgoingHeaderMatcher == nil {
+		serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
+			return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
+		}
+	}
+
+	return serveMux
+}
+
+// Handle associates "h" to the pair of HTTP method and path pattern.
+func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
+	s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
+}
+
+// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
+func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+	ctx := r.Context()
+
+	path := r.URL.Path
+	if !strings.HasPrefix(path, "/") {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
+		}
+		return
+	}
+
+	components := strings.Split(path[1:], "/")
+	l := len(components)
+	var verb string
+	if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
+		if s.protoErrorHandler != nil {
+			_, outboundMarshaler := MarshalerForRequest(s, r)
+			sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+			s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+		} else {
+			OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+		}
+		return
+	} else if idx > 0 {
+		c := components[l-1]
+		components[l-1], verb = c[:idx], c[idx+1:]
+	}
+
+	if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
+		r.Method = strings.ToUpper(override)
+		if err := r.ParseForm(); err != nil {
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				sterr := status.Error(codes.InvalidArgument, err.Error())
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			} else {
+				OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+			}
+			return
+		}
+	}
+	for _, h := range s.handlers[r.Method] {
+		pathParams, err := h.pat.Match(components, verb)
+		if err != nil {
+			continue
+		}
+		h.h(w, r, pathParams)
+		return
+	}
+
+	// lookup other methods to handle fallback from GET to POST and
+	// to determine if it is MethodNotAllowed or NotFound.
+	for m, handlers := range s.handlers {
+		if m == r.Method {
+			continue
+		}
+		for _, h := range handlers {
+			pathParams, err := h.pat.Match(components, verb)
+			if err != nil {
+				continue
+			}
+			// X-HTTP-Method-Override is optional. Always allow fallback to POST.
+			if s.isPathLengthFallback(r) {
+				if err := r.ParseForm(); err != nil {
+					if s.protoErrorHandler != nil {
+						_, outboundMarshaler := MarshalerForRequest(s, r)
+						sterr := status.Error(codes.InvalidArgument, err.Error())
+						s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+					} else {
+						OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
+					}
+					return
+				}
+				h.h(w, r, pathParams)
+				return
+			}
+			if s.protoErrorHandler != nil {
+				_, outboundMarshaler := MarshalerForRequest(s, r)
+				sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusMethodNotAllowed))
+				s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+			} else {
+				OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
+			}
+			return
+		}
+	}
+
+	if s.protoErrorHandler != nil {
+		_, outboundMarshaler := MarshalerForRequest(s, r)
+		sterr := status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
+		s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
+	} else {
+		OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
+	}
+}
+
+// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
+func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
+	return s.forwardResponseOptions
+}
+
+func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
+	return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
+}
+
+type handler struct {
+	pat Pattern
+	h   HandlerFunc
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
new file mode 100644
index 0000000..f16a84a
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
@@ -0,0 +1,227 @@
+package runtime
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+var (
+	// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
+	ErrNotMatch = errors.New("not match to the path pattern")
+	// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
+	ErrInvalidPattern = errors.New("invalid pattern")
+)
+
+type op struct {
+	code    utilities.OpCode
+	operand int
+}
+
+// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
+type Pattern struct {
+	// ops is a list of operations
+	ops []op
+	// pool is a constant pool indexed by the operands or vars.
+	pool []string
+	// vars is a list of variables names to be bound by this pattern
+	vars []string
+	// stacksize is the max depth of the stack
+	stacksize int
+	// tailLen is the length of the fixed-size segments after a deep wildcard
+	tailLen int
+	// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
+	verb string
+}
+
+// NewPattern returns a new Pattern from the given definition values.
+// "ops" is a sequence of op codes. "pool" is a constant pool.
+// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
+// "version" must be 1 for now.
+// It returns an error if the given definition is invalid.
+func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
+	if version != 1 {
+		grpclog.Infof("unsupported version: %d", version)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	l := len(ops)
+	if l%2 != 0 {
+		grpclog.Infof("odd number of ops codes: %d", l)
+		return Pattern{}, ErrInvalidPattern
+	}
+
+	var (
+		typedOps        []op
+		stack, maxstack int
+		tailLen         int
+		pushMSeen       bool
+		vars            []string
+	)
+	for i := 0; i < l; i += 2 {
+		op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpPushM:
+			if pushMSeen {
+				grpclog.Infof("pushM appears twice")
+				return Pattern{}, ErrInvalidPattern
+			}
+			pushMSeen = true
+			stack++
+		case utilities.OpLitPush:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("negative literal index: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			if pushMSeen {
+				tailLen++
+			}
+			stack++
+		case utilities.OpConcatN:
+			if op.operand <= 0 {
+				grpclog.Infof("negative concat size: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack -= op.operand
+			if stack < 0 {
+				grpclog.Print("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+			stack++
+		case utilities.OpCapture:
+			if op.operand < 0 || len(pool) <= op.operand {
+				grpclog.Infof("variable name index out of bound: %d", op.operand)
+				return Pattern{}, ErrInvalidPattern
+			}
+			v := pool[op.operand]
+			op.operand = len(vars)
+			vars = append(vars, v)
+			stack--
+			if stack < 0 {
+				grpclog.Infof("stack underflow")
+				return Pattern{}, ErrInvalidPattern
+			}
+		default:
+			grpclog.Infof("invalid opcode: %d", op.code)
+			return Pattern{}, ErrInvalidPattern
+		}
+
+		if maxstack < stack {
+			maxstack = stack
+		}
+		typedOps = append(typedOps, op)
+	}
+	return Pattern{
+		ops:       typedOps,
+		pool:      pool,
+		vars:      vars,
+		stacksize: maxstack,
+		tailLen:   tailLen,
+		verb:      verb,
+	}, nil
+}
+
+// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
+func MustPattern(p Pattern, err error) Pattern {
+	if err != nil {
+		grpclog.Fatalf("Pattern initialization failed: %v", err)
+	}
+	return p
+}
+
+// Match examines components if it matches to the Pattern.
+// If it matches, the function returns a mapping from field paths to their captured values.
+// If otherwise, the function returns an error.
+func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
+	if p.verb != verb {
+		return nil, ErrNotMatch
+	}
+
+	var pos int
+	stack := make([]string, 0, p.stacksize)
+	captured := make([]string, len(p.vars))
+	l := len(components)
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush, utilities.OpLitPush:
+			if pos >= l {
+				return nil, ErrNotMatch
+			}
+			c := components[pos]
+			if op.code == utilities.OpLitPush {
+				if lit := p.pool[op.operand]; c != lit {
+					return nil, ErrNotMatch
+				}
+			}
+			stack = append(stack, c)
+			pos++
+		case utilities.OpPushM:
+			end := len(components)
+			if end < pos+p.tailLen {
+				return nil, ErrNotMatch
+			}
+			end -= p.tailLen
+			stack = append(stack, strings.Join(components[pos:end], "/"))
+			pos = end
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			captured[op.operand] = stack[n]
+			stack = stack[:n]
+		}
+	}
+	if pos < l {
+		return nil, ErrNotMatch
+	}
+	bindings := make(map[string]string)
+	for i, val := range captured {
+		bindings[p.vars[i]] = val
+	}
+	return bindings, nil
+}
+
+// Verb returns the verb part of the Pattern.
+func (p Pattern) Verb() string { return p.verb }
+
+func (p Pattern) String() string {
+	var stack []string
+	for _, op := range p.ops {
+		switch op.code {
+		case utilities.OpNop:
+			continue
+		case utilities.OpPush:
+			stack = append(stack, "*")
+		case utilities.OpLitPush:
+			stack = append(stack, p.pool[op.operand])
+		case utilities.OpPushM:
+			stack = append(stack, "**")
+		case utilities.OpConcatN:
+			n := op.operand
+			l := len(stack) - n
+			stack = append(stack[:l], strings.Join(stack[l:], "/"))
+		case utilities.OpCapture:
+			n := len(stack) - 1
+			stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
+		}
+	}
+	segs := strings.Join(stack, "/")
+	if p.verb != "" {
+		return fmt.Sprintf("/%s:%s", segs, p.verb)
+	}
+	return "/" + segs
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
new file mode 100644
index 0000000..a3151e2
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
@@ -0,0 +1,80 @@
+package runtime
+
+import (
+	"github.com/golang/protobuf/proto"
+)
+
+// StringP returns a pointer to a string whose pointee is same as the given string value.
+func StringP(val string) (*string, error) {
+	return proto.String(val), nil
+}
+
+// BoolP parses the given string representation of a boolean value,
+// and returns a pointer to a bool whose value is same as the parsed value.
+func BoolP(val string) (*bool, error) {
+	b, err := Bool(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Bool(b), nil
+}
+
+// Float64P parses the given string representation of a floating point number,
+// and returns a pointer to a float64 whose value is same as the parsed number.
+func Float64P(val string) (*float64, error) {
+	f, err := Float64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float64(f), nil
+}
+
+// Float32P parses the given string representation of a floating point number,
+// and returns a pointer to a float32 whose value is same as the parsed number.
+func Float32P(val string) (*float32, error) {
+	f, err := Float32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Float32(f), nil
+}
+
+// Int64P parses the given string representation of an integer
+// and returns a pointer to a int64 whose value is same as the parsed integer.
+func Int64P(val string) (*int64, error) {
+	i, err := Int64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int64(i), nil
+}
+
+// Int32P parses the given string representation of an integer
+// and returns a pointer to a int32 whose value is same as the parsed integer.
+func Int32P(val string) (*int32, error) {
+	i, err := Int32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Int32(i), err
+}
+
+// Uint64P parses the given string representation of an integer
+// and returns a pointer to a uint64 whose value is same as the parsed integer.
+func Uint64P(val string) (*uint64, error) {
+	i, err := Uint64(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint64(i), err
+}
+
+// Uint32P parses the given string representation of an integer
+// and returns a pointer to a uint32 whose value is same as the parsed integer.
+func Uint32P(val string) (*uint32, error) {
+	i, err := Uint32(val)
+	if err != nil {
+		return nil, err
+	}
+	return proto.Uint32(i), err
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
new file mode 100644
index 0000000..b7fa32e
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
@@ -0,0 +1,70 @@
+package runtime
+
+import (
+	"io"
+	"net/http"
+
+	"context"
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/grpclog"
+	"google.golang.org/grpc/status"
+)
+
+// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
+type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
+
+var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
+
+// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
+// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
+// If otherwise, it replies with http.StatusInternalServerError.
+//
+// The response body returned by this function is a Status message marshaled by a Marshaler.
+//
+// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
+func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
+	// return Internal when Marshal failed
+	const fallback = `{"code": 13, "message": "failed to marshal error message"}`
+
+	s, ok := status.FromError(err)
+	if !ok {
+		s = status.New(codes.Unknown, err.Error())
+	}
+
+	w.Header().Del("Trailer")
+
+	contentType := marshaler.ContentType()
+	// Check marshaler on run time in order to keep backwards compatability
+	// An interface param needs to be added to the ContentType() function on 
+	// the Marshal interface to be able to remove this check
+	if httpBodyMarshaler, ok := marshaler.(*HTTPBodyMarshaler); ok {
+		pb := s.Proto()
+		contentType = httpBodyMarshaler.ContentTypeFromMessage(pb)
+	}
+	w.Header().Set("Content-Type", contentType)
+
+	buf, merr := marshaler.Marshal(s.Proto())
+	if merr != nil {
+		grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
+		w.WriteHeader(http.StatusInternalServerError)
+		if _, err := io.WriteString(w, fallback); err != nil {
+			grpclog.Infof("Failed to write response: %v", err)
+		}
+		return
+	}
+
+	md, ok := ServerMetadataFromContext(ctx)
+	if !ok {
+		grpclog.Infof("Failed to extract ServerMetadata from context")
+	}
+
+	handleForwardResponseServerMetadata(w, mux, md)
+	handleForwardResponseTrailerHeader(w, md)
+	st := HTTPStatusFromCode(s.Code())
+	w.WriteHeader(st)
+	if _, err := w.Write(buf); err != nil {
+		grpclog.Infof("Failed to write response: %v", err)
+	}
+
+	handleForwardResponseTrailer(w, md)
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
new file mode 100644
index 0000000..bb9359f
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
@@ -0,0 +1,392 @@
+package runtime
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/golang/protobuf/proto"
+	"github.com/grpc-ecosystem/grpc-gateway/utilities"
+	"google.golang.org/grpc/grpclog"
+)
+
+// PopulateQueryParameters populates "values" into "msg".
+// A value is ignored if its key starts with one of the elements in "filter".
+func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
+	for key, values := range values {
+		re, err := regexp.Compile("^(.*)\\[(.*)\\]$")
+		if err != nil {
+			return err
+		}
+		match := re.FindStringSubmatch(key)
+		if len(match) == 3 {
+			key = match[1]
+			values = append([]string{match[2]}, values...)
+		}
+		fieldPath := strings.Split(key, ".")
+		if filter.HasCommonPrefix(fieldPath) {
+			continue
+		}
+		if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// PopulateFieldFromPath sets a value in a nested Protobuf structure.
+// It instantiates missing protobuf fields as it goes.
+func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
+	fieldPath := strings.Split(fieldPathString, ".")
+	return populateFieldValueFromPath(msg, fieldPath, []string{value})
+}
+
+func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
+	m := reflect.ValueOf(msg)
+	if m.Kind() != reflect.Ptr {
+		return fmt.Errorf("unexpected type %T: %v", msg, msg)
+	}
+	var props *proto.Properties
+	m = m.Elem()
+	for i, fieldName := range fieldPath {
+		isLast := i == len(fieldPath)-1
+		if !isLast && m.Kind() != reflect.Struct {
+			return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
+		}
+		var f reflect.Value
+		var err error
+		f, props, err = fieldByProtoName(m, fieldName)
+		if err != nil {
+			return err
+		} else if !f.IsValid() {
+			grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
+			return nil
+		}
+
+		switch f.Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			m = f
+		case reflect.Slice:
+			if !isLast {
+				return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
+			}
+			// Handle []byte
+			if f.Type().Elem().Kind() == reflect.Uint8 {
+				m = f
+				break
+			}
+			return populateRepeatedField(f, values, props)
+		case reflect.Ptr:
+			if f.IsNil() {
+				m = reflect.New(f.Type().Elem())
+				f.Set(m.Convert(f.Type()))
+			}
+			m = f.Elem()
+			continue
+		case reflect.Struct:
+			m = f
+			continue
+		case reflect.Map:
+			if !isLast {
+				return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
+			}
+			return populateMapField(f, values, props)
+		default:
+			return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
+		}
+	}
+	switch len(values) {
+	case 0:
+		return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
+	case 1:
+	default:
+		grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
+	}
+	return populateField(m, values[0], props)
+}
+
+// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
+// "m" must be a struct value. It returns zero reflect.Value if no such field found.
+func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
+	props := proto.GetProperties(m.Type())
+
+	// look up field name in oneof map
+	if op, ok := props.OneofTypes[name]; ok {
+		v := reflect.New(op.Type.Elem())
+		field := m.Field(op.Field)
+		if !field.IsNil() {
+			return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
+		}
+		field.Set(v)
+		return v.Elem().Field(0), op.Prop, nil
+	}
+
+	for _, p := range props.Prop {
+		if p.OrigName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+		if p.JSONName == name {
+			return m.FieldByName(p.Name), p, nil
+		}
+	}
+	return reflect.Value{}, nil, nil
+}
+
+func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
+	if len(values) != 2 {
+		return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
+	}
+
+	key, value := values[0], values[1]
+	keyType := f.Type().Key()
+	valueType := f.Type().Elem()
+	if f.IsNil() {
+		f.Set(reflect.MakeMap(f.Type()))
+	}
+
+	keyConv, ok := convFromType[keyType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
+	}
+	valueConv, ok := convFromType[valueType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
+	}
+
+	keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
+	if err := keyV[1].Interface(); err != nil {
+		return err.(error)
+	}
+	valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := valueV[1].Interface(); err != nil {
+		return err.(error)
+	}
+
+	f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
+
+	return nil
+}
+
+func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
+	elemType := f.Type().Elem()
+
+	// is the destination field a slice of an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnumRepeated(f, values, enumValMap)
+	}
+
+	conv, ok := convFromType[elemType.Kind()]
+	if !ok {
+		return fmt.Errorf("unsupported field type %s", elemType)
+	}
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
+		if err := result[1].Interface(); err != nil {
+			return err.(error)
+		}
+		f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
+	}
+	return nil
+}
+
+func populateField(f reflect.Value, value string, props *proto.Properties) error {
+	i := f.Addr().Interface()
+
+	// Handle protobuf well known types
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	if wkt, ok := i.(wkt); ok {
+		switch wkt.XXX_WellKnownType() {
+		case "Timestamp":
+			if value == "null" {
+				f.Field(0).SetInt(0)
+				f.Field(1).SetInt(0)
+				return nil
+			}
+
+			t, err := time.Parse(time.RFC3339Nano, value)
+			if err != nil {
+				return fmt.Errorf("bad Timestamp: %v", err)
+			}
+			f.Field(0).SetInt(int64(t.Unix()))
+			f.Field(1).SetInt(int64(t.Nanosecond()))
+			return nil
+		case "Duration":
+			if value == "null" {
+				f.Field(0).SetInt(0)
+				f.Field(1).SetInt(0)
+				return nil
+			}
+			d, err := time.ParseDuration(value)
+			if err != nil {
+				return fmt.Errorf("bad Duration: %v", err)
+			}
+
+			ns := d.Nanoseconds()
+			s := ns / 1e9
+			ns %= 1e9
+			f.Field(0).SetInt(s)
+			f.Field(1).SetInt(ns)
+			return nil
+		case "DoubleValue":
+			fallthrough
+		case "FloatValue":
+			float64Val, err := strconv.ParseFloat(value, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetFloat(float64Val)
+			return nil
+		case "Int64Value":
+			fallthrough
+		case "Int32Value":
+			int64Val, err := strconv.ParseInt(value, 10, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetInt(int64Val)
+			return nil
+		case "UInt64Value":
+			fallthrough
+		case "UInt32Value":
+			uint64Val, err := strconv.ParseUint(value, 10, 64)
+			if err != nil {
+				return fmt.Errorf("bad DoubleValue: %s", value)
+			}
+			f.Field(0).SetUint(uint64Val)
+			return nil
+		case "BoolValue":
+			if value == "true" {
+				f.Field(0).SetBool(true)
+			} else if value == "false" {
+				f.Field(0).SetBool(false)
+			} else {
+				return fmt.Errorf("bad BoolValue: %s", value)
+			}
+			return nil
+		case "StringValue":
+			f.Field(0).SetString(value)
+			return nil
+		case "BytesValue":
+			bytesVal, err := base64.StdEncoding.DecodeString(value)
+			if err != nil {
+				return fmt.Errorf("bad BytesValue: %s", value)
+			}
+			f.Field(0).SetBytes(bytesVal)
+			return nil
+		}
+	}
+
+	// Handle google well known types
+	if gwkt, ok := i.(proto.Message); ok {
+		switch proto.MessageName(gwkt) {
+		case "google.protobuf.FieldMask":
+			p := f.Field(0)
+			for _, v := range strings.Split(value, ",") {
+				if v != "" {
+					p.Set(reflect.Append(p, reflect.ValueOf(v)))
+				}
+			}
+			return nil
+		}
+	}
+
+	// Handle Time and Duration stdlib types
+	switch t := i.(type) {
+	case *time.Time:
+		pt, err := time.Parse(time.RFC3339Nano, value)
+		if err != nil {
+			return fmt.Errorf("bad Timestamp: %v", err)
+		}
+		*t = pt
+		return nil
+	case *time.Duration:
+		d, err := time.ParseDuration(value)
+		if err != nil {
+			return fmt.Errorf("bad Duration: %v", err)
+		}
+		*t = d
+		return nil
+	}
+
+	// is the destination field an enumeration type?
+	if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
+		return populateFieldEnum(f, value, enumValMap)
+	}
+
+	conv, ok := convFromType[f.Kind()]
+	if !ok {
+		return fmt.Errorf("field type %T is not supported in query parameters", i)
+	}
+	result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
+	if err := result[1].Interface(); err != nil {
+		return err.(error)
+	}
+	f.Set(result[0].Convert(f.Type()))
+	return nil
+}
+
+func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
+	// see if it's an enumeration string
+	if enumVal, ok := enumValMap[value]; ok {
+		return reflect.ValueOf(enumVal).Convert(t), nil
+	}
+
+	// check for an integer that matches an enumeration value
+	eVal, err := strconv.Atoi(value)
+	if err != nil {
+		return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+	}
+	for _, v := range enumValMap {
+		if v == int32(eVal) {
+			return reflect.ValueOf(eVal).Convert(t), nil
+		}
+	}
+	return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
+}
+
+func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
+	cval, err := convertEnum(value, f.Type(), enumValMap)
+	if err != nil {
+		return err
+	}
+	f.Set(cval)
+	return nil
+}
+
+func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
+	elemType := f.Type().Elem()
+	f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
+	for i, v := range values {
+		result, err := convertEnum(v, elemType, enumValMap)
+		if err != nil {
+			return err
+		}
+		f.Index(i).Set(result)
+	}
+	return nil
+}
+
+var (
+	convFromType = map[reflect.Kind]reflect.Value{
+		reflect.String:  reflect.ValueOf(String),
+		reflect.Bool:    reflect.ValueOf(Bool),
+		reflect.Float64: reflect.ValueOf(Float64),
+		reflect.Float32: reflect.ValueOf(Float32),
+		reflect.Int64:   reflect.ValueOf(Int64),
+		reflect.Int32:   reflect.ValueOf(Int32),
+		reflect.Uint64:  reflect.ValueOf(Uint64),
+		reflect.Uint32:  reflect.ValueOf(Uint32),
+		reflect.Slice:   reflect.ValueOf(Bytes),
+	}
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
new file mode 100644
index 0000000..7109d79
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel
@@ -0,0 +1,21 @@
+load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
+
+package(default_visibility = ["//visibility:public"])
+
+go_library(
+    name = "go_default_library",
+    srcs = [
+        "doc.go",
+        "pattern.go",
+        "readerfactory.go",
+        "trie.go",
+    ],
+    importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities",
+)
+
+go_test(
+    name = "go_default_test",
+    size = "small",
+    srcs = ["trie_test.go"],
+    embed = [":go_default_library"],
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
new file mode 100644
index 0000000..cf79a4d
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
@@ -0,0 +1,2 @@
+// Package utilities provides members for internal use in grpc-gateway.
+package utilities
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
new file mode 100644
index 0000000..dfe7de4
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
@@ -0,0 +1,22 @@
+package utilities
+
+// An OpCode is a opcode of compiled path patterns.
+type OpCode int
+
+// These constants are the valid values of OpCode.
+const (
+	// OpNop does nothing
+	OpNop = OpCode(iota)
+	// OpPush pushes a component to stack
+	OpPush
+	// OpLitPush pushes a component to stack if it matches to the literal
+	OpLitPush
+	// OpPushM concatenates the remaining components and pushes it to stack
+	OpPushM
+	// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
+	OpConcatN
+	// OpCapture pops an item and binds it to the variable
+	OpCapture
+	// OpEnd is the least positive invalid opcode.
+	OpEnd
+)
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
new file mode 100644
index 0000000..6dd3854
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go
@@ -0,0 +1,20 @@
+package utilities
+
+import (
+	"bytes"
+	"io"
+	"io/ioutil"
+)
+
+// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
+// at the start of the stream
+func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, err
+	}
+
+	return func() io.Reader {
+		return bytes.NewReader(b)
+	}, nil
+}
diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
new file mode 100644
index 0000000..c2b7b30
--- /dev/null
+++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
@@ -0,0 +1,177 @@
+package utilities
+
+import (
+	"sort"
+)
+
+// DoubleArray is a Double Array implementation of trie on sequences of strings.
+type DoubleArray struct {
+	// Encoding keeps an encoding from string to int
+	Encoding map[string]int
+	// Base is the base array of Double Array
+	Base []int
+	// Check is the check array of Double Array
+	Check []int
+}
+
+// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
+func NewDoubleArray(seqs [][]string) *DoubleArray {
+	da := &DoubleArray{Encoding: make(map[string]int)}
+	if len(seqs) == 0 {
+		return da
+	}
+
+	encoded := registerTokens(da, seqs)
+	sort.Sort(byLex(encoded))
+
+	root := node{row: -1, col: -1, left: 0, right: len(encoded)}
+	addSeqs(da, encoded, 0, root)
+
+	for i := len(da.Base); i > 0; i-- {
+		if da.Check[i-1] != 0 {
+			da.Base = da.Base[:i]
+			da.Check = da.Check[:i]
+			break
+		}
+	}
+	return da
+}
+
+func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
+	var result [][]int
+	for _, seq := range seqs {
+		var encoded []int
+		for _, token := range seq {
+			if _, ok := da.Encoding[token]; !ok {
+				da.Encoding[token] = len(da.Encoding)
+			}
+			encoded = append(encoded, da.Encoding[token])
+		}
+		result = append(result, encoded)
+	}
+	for i := range result {
+		result[i] = append(result[i], len(da.Encoding))
+	}
+	return result
+}
+
+type node struct {
+	row, col    int
+	left, right int
+}
+
+func (n node) value(seqs [][]int) int {
+	return seqs[n.row][n.col]
+}
+
+func (n node) children(seqs [][]int) []*node {
+	var result []*node
+	lastVal := int(-1)
+	last := new(node)
+	for i := n.left; i < n.right; i++ {
+		if lastVal == seqs[i][n.col+1] {
+			continue
+		}
+		last.right = i
+		last = &node{
+			row:  i,
+			col:  n.col + 1,
+			left: i,
+		}
+		result = append(result, last)
+	}
+	last.right = n.right
+	return result
+}
+
+func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
+	ensureSize(da, pos)
+
+	children := n.children(seqs)
+	var i int
+	for i = 1; ; i++ {
+		ok := func() bool {
+			for _, child := range children {
+				code := child.value(seqs)
+				j := i + code
+				ensureSize(da, j)
+				if da.Check[j] != 0 {
+					return false
+				}
+			}
+			return true
+		}()
+		if ok {
+			break
+		}
+	}
+	da.Base[pos] = i
+	for _, child := range children {
+		code := child.value(seqs)
+		j := i + code
+		da.Check[j] = pos + 1
+	}
+	terminator := len(da.Encoding)
+	for _, child := range children {
+		code := child.value(seqs)
+		if code == terminator {
+			continue
+		}
+		j := i + code
+		addSeqs(da, seqs, j, *child)
+	}
+}
+
+func ensureSize(da *DoubleArray, i int) {
+	for i >= len(da.Base) {
+		da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
+		da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
+	}
+}
+
+type byLex [][]int
+
+func (l byLex) Len() int      { return len(l) }
+func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l byLex) Less(i, j int) bool {
+	si := l[i]
+	sj := l[j]
+	var k int
+	for k = 0; k < len(si) && k < len(sj); k++ {
+		if si[k] < sj[k] {
+			return true
+		}
+		if si[k] > sj[k] {
+			return false
+		}
+	}
+	if k < len(sj) {
+		return true
+	}
+	return false
+}
+
+// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
+func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
+	if len(da.Base) == 0 {
+		return false
+	}
+
+	var i int
+	for _, t := range seq {
+		code, ok := da.Encoding[t]
+		if !ok {
+			break
+		}
+		j := da.Base[i] + code
+		if len(da.Check) <= j || da.Check[j] != i+1 {
+			break
+		}
+		i = j
+	}
+	j := da.Base[i] + len(da.Encoding)
+	if len(da.Check) <= j || da.Check[j] != i+1 {
+		return false
+	}
+	return true
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/LICENSE b/vendor/github.com/mongodb/mongo-go-driver/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES b/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES
new file mode 100644
index 0000000..6e6cd4b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES
@@ -0,0 +1,445 @@
+---------------------------------------------------------------------
+License notice for gopkg.in/mgo.v2/bson
+---------------------------------------------------------------------
+
+BSON library for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---------------------------------------------------------------------
+License notice for JSON and CSV code from github.com/golang/go
+---------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/buger/jsonparser
+----------------------------------------------------------------------
+
+MIT License
+
+Copyright (c) 2016 Leonid Bugaev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/davecgh/go-spew
+----------------------------------------------------------------------
+
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/golang/snappy
+----------------------------------------------------------------------
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/google/go-cmp
+----------------------------------------------------------------------
+
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/kr/pretty
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/kr/text
+----------------------------------------------------------------------
+
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/montanaflynn/stats
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/pmezard/go-difflib
+----------------------------------------------------------------------
+
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/stretchr/testify
+----------------------------------------------------------------------
+
+Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
+
+Please consider promoting this project if you find it useful.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
+OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/tidwall/pretty
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2017 Josh Baker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/crypto
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/net
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/sync
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for gopkg.in/yaml.v2
+----------------------------------------------------------------------
+
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go
new file mode 100644
index 0000000..44cdec8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+// +build go1.9
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D = primitive.D
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E = primitive.E
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M = primitive.M
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
+//
+type A = primitive.A
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go
new file mode 100644
index 0000000..beac40b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go
@@ -0,0 +1,91 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// +build !go1.9
+
+package bson
+
+import (
+	"math"
+	"strconv"
+	"strings"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D []E
+
+// Map creates a map from the elements of the D.
+func (d D) Map() M {
+	m := make(M, len(d))
+	for _, e := range d {
+		m[e.Key] = e.Value
+	}
+	return m
+}
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E struct {
+	Key   string
+	Value interface{}
+}
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M map[string]interface{}
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
+//
+type A []interface{}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go
new file mode 100644
index 0000000..4c5530c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go
@@ -0,0 +1,163 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representaiton of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be
+// encoded by the ValueEncoder.
+type ValueEncoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vee ValueEncoderError) Error() string {
+	typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds))
+	for _, t := range vee.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vee.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vee.Received.Kind().String()
+	if vee.Received.IsValid() {
+		received = vee.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be
+// decoded by the ValueDecoder.
+type ValueDecoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vde ValueDecoderError) Error() string {
+	typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds))
+	for _, t := range vde.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vde.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vde.Received.Kind().String()
+	if vde.Received.IsValid() {
+		received = vde.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// EncodeContext is the contextual information required for a Codec to encode a
+// value.
+type EncodeContext struct {
+	*Registry
+	MinSize bool
+}
+
+// DecodeContext is the contextual information required for a Codec to decode a
+// value.
+type DecodeContext struct {
+	*Registry
+	Truncate bool
+	// Ancestor is the type of a containing document. This is mainly used to determine what type
+	// should be used when decoding an embedded document into an empty interface. For example, if
+	// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
+	// will be decoded into a bson.M.
+	Ancestor reflect.Type
+}
+
+// ValueCodec is the interface that groups the methods to encode and decode
+// values.
+type ValueCodec interface {
+	ValueEncoder
+	ValueDecoder
+}
+
+// ValueEncoder is the interface implemented by types that can handle the encoding of a value.
+type ValueEncoder interface {
+	EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+}
+
+// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueEncoder.
+type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+
+// EncodeValue implements the ValueEncoder interface.
+func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	return fn(ec, vw, val)
+}
+
+// ValueDecoder is the interface implemented by types that can handle the decoding of a value.
+type ValueDecoder interface {
+	DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+}
+
+// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueDecoder.
+type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+
+// DecodeValue implements the ValueDecoder interface.
+func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	return fn(dc, vr, val)
+}
+
+// CodecZeroer is the interface implemented by Codecs that can also determine if
+// a value of the type that would be encoded is zero.
+type CodecZeroer interface {
+	IsTypeZero(interface{}) bool
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go
new file mode 100644
index 0000000..645f89e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go
@@ -0,0 +1,1014 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueDecoders DefaultValueDecoders
+
+// DefaultValueDecoders is a namespace type for the default ValueDecoders used
+// when creating a registry.
+type DefaultValueDecoders struct{}
+
+// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
+// the provided RegistryBuilder.
+//
+// There is no support for decoding map[string]interface{} becuase there is no decoder for
+// interface{}, so users must either register this decoder themselves or use the
+// EmptyInterfaceDecoder avaialble in the bson package.
+func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
+	}
+
+	rb.
+		RegisterDecoder(tBinary, ValueDecoderFunc(dvd.BinaryDecodeValue)).
+		RegisterDecoder(tUndefined, ValueDecoderFunc(dvd.UndefinedDecodeValue)).
+		RegisterDecoder(tDateTime, ValueDecoderFunc(dvd.DateTimeDecodeValue)).
+		RegisterDecoder(tNull, ValueDecoderFunc(dvd.NullDecodeValue)).
+		RegisterDecoder(tRegex, ValueDecoderFunc(dvd.RegexDecodeValue)).
+		RegisterDecoder(tDBPointer, ValueDecoderFunc(dvd.DBPointerDecodeValue)).
+		RegisterDecoder(tTimestamp, ValueDecoderFunc(dvd.TimestampDecodeValue)).
+		RegisterDecoder(tMinKey, ValueDecoderFunc(dvd.MinKeyDecodeValue)).
+		RegisterDecoder(tMaxKey, ValueDecoderFunc(dvd.MaxKeyDecodeValue)).
+		RegisterDecoder(tJavaScript, ValueDecoderFunc(dvd.JavaScriptDecodeValue)).
+		RegisterDecoder(tSymbol, ValueDecoderFunc(dvd.SymbolDecodeValue)).
+		RegisterDecoder(tByteSlice, ValueDecoderFunc(dvd.ByteSliceDecodeValue)).
+		RegisterDecoder(tTime, ValueDecoderFunc(dvd.TimeDecodeValue)).
+		RegisterDecoder(tEmpty, ValueDecoderFunc(dvd.EmptyInterfaceDecodeValue)).
+		RegisterDecoder(tOID, ValueDecoderFunc(dvd.ObjectIDDecodeValue)).
+		RegisterDecoder(tDecimal, ValueDecoderFunc(dvd.Decimal128DecodeValue)).
+		RegisterDecoder(tJSONNumber, ValueDecoderFunc(dvd.JSONNumberDecodeValue)).
+		RegisterDecoder(tURL, ValueDecoderFunc(dvd.URLDecodeValue)).
+		RegisterDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)).
+		RegisterDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)).
+		RegisterDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)).
+		RegisterDecoder(tCodeWithScope, ValueDecoderFunc(dvd.CodeWithScopeDecodeValue)).
+		RegisterDefaultDecoder(reflect.Bool, ValueDecoderFunc(dvd.BooleanDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int8, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int16, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int32, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int64, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint8, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint16, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint32, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint64, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Float32, ValueDecoderFunc(dvd.FloatDecodeValue)).
+		RegisterDefaultDecoder(reflect.Float64, ValueDecoderFunc(dvd.FloatDecodeValue)).
+		RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)).
+		RegisterDefaultDecoder(reflect.Map, ValueDecoderFunc(dvd.MapDecodeValue)).
+		RegisterDefaultDecoder(reflect.Slice, ValueDecoderFunc(dvd.SliceDecodeValue)).
+		RegisterDefaultDecoder(reflect.String, ValueDecoderFunc(dvd.StringDecodeValue)).
+		RegisterDefaultDecoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}).
+		RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()).
+		RegisterTypeMapEntry(bsontype.Double, tFloat64).
+		RegisterTypeMapEntry(bsontype.String, tString).
+		RegisterTypeMapEntry(bsontype.Array, tA).
+		RegisterTypeMapEntry(bsontype.Binary, tBinary).
+		RegisterTypeMapEntry(bsontype.Undefined, tUndefined).
+		RegisterTypeMapEntry(bsontype.ObjectID, tOID).
+		RegisterTypeMapEntry(bsontype.Boolean, tBool).
+		RegisterTypeMapEntry(bsontype.DateTime, tDateTime).
+		RegisterTypeMapEntry(bsontype.Regex, tRegex).
+		RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer).
+		RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript).
+		RegisterTypeMapEntry(bsontype.Symbol, tSymbol).
+		RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope).
+		RegisterTypeMapEntry(bsontype.Int32, tInt32).
+		RegisterTypeMapEntry(bsontype.Int64, tInt64).
+		RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp).
+		RegisterTypeMapEntry(bsontype.Decimal128, tDecimal).
+		RegisterTypeMapEntry(bsontype.MinKey, tMinKey).
+		RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey).
+		RegisterTypeMapEntry(bsontype.Type(0), tD)
+}
+
+// BooleanDecodeValue is the ValueDecoderFunc for bool types.
+func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Boolean {
+		return fmt.Errorf("cannot decode %v into a boolean", vr.Type())
+	}
+	if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
+		return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+
+	b, err := vr.ReadBoolean()
+	val.SetBool(b)
+	return err
+}
+
+// IntDecodeValue is the ValueDecoderFunc for bool types.
+func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var i64 int64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return errors.New("IntDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+		}
+		if f64 > float64(math.MaxInt64) {
+			return fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	default:
+		return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: val,
+		}
+	}
+
+	switch val.Kind() {
+	case reflect.Int8:
+		if i64 < math.MinInt8 || i64 > math.MaxInt8 {
+			return fmt.Errorf("%d overflows int8", i64)
+		}
+	case reflect.Int16:
+		if i64 < math.MinInt16 || i64 > math.MaxInt16 {
+			return fmt.Errorf("%d overflows int16", i64)
+		}
+	case reflect.Int32:
+		if i64 < math.MinInt32 || i64 > math.MaxInt32 {
+			return fmt.Errorf("%d overflows int32", i64)
+		}
+	case reflect.Int64:
+	case reflect.Int:
+		if int64(int(i64)) != i64 { // Can we fit this inside of an int
+			return fmt.Errorf("%d overflows int", i64)
+		}
+	default:
+		return ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: val,
+		}
+	}
+
+	val.SetInt(i64)
+	return nil
+}
+
+// UintDecodeValue is the ValueDecoderFunc for uint types.
+func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var i64 int64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+		}
+		if f64 > float64(math.MaxInt64) {
+			return fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	default:
+		return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	switch val.Kind() {
+	case reflect.Uint8:
+		if i64 < 0 || i64 > math.MaxUint8 {
+			return fmt.Errorf("%d overflows uint8", i64)
+		}
+	case reflect.Uint16:
+		if i64 < 0 || i64 > math.MaxUint16 {
+			return fmt.Errorf("%d overflows uint16", i64)
+		}
+	case reflect.Uint32:
+		if i64 < 0 || i64 > math.MaxUint32 {
+			return fmt.Errorf("%d overflows uint32", i64)
+		}
+	case reflect.Uint64:
+		if i64 < 0 {
+			return fmt.Errorf("%d overflows uint64", i64)
+		}
+	case reflect.Uint:
+		if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
+			return fmt.Errorf("%d overflows uint", i64)
+		}
+	default:
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	val.SetUint(uint64(i64))
+	return nil
+}
+
+// FloatDecodeValue is the ValueDecoderFunc for float types.
+func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var f float64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		f = float64(i32)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		f = float64(i64)
+	case bsontype.Double:
+		f, err = vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a float32 or float64 type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+	}
+
+	switch val.Kind() {
+	case reflect.Float32:
+		if !ec.Truncate && float64(float32(f)) != f {
+			return errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed")
+		}
+	case reflect.Float64:
+	default:
+		return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+	}
+
+	val.SetFloat(f)
+	return nil
+}
+
+// StringDecodeValue is the ValueDecoderFunc for string types.
+func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var str string
+	var err error
+	switch vr.Type() {
+	// TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed.
+	case bsontype.String:
+		str, err = vr.ReadString()
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a string type", vr.Type())
+	}
+	if !val.CanSet() || val.Kind() != reflect.String {
+		return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+	}
+
+	val.SetString(str)
+	return nil
+}
+
+// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
+func (DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJavaScript {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	if vr.Type() != bsontype.JavaScript {
+		return fmt.Errorf("cannot decode %v into a primitive.JavaScript", vr.Type())
+	}
+
+	js, err := vr.ReadJavascript()
+	if err != nil {
+		return err
+	}
+
+	val.SetString(js)
+	return nil
+}
+
+// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
+func (DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tSymbol {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Symbol {
+		return fmt.Errorf("cannot decode %v into a primitive.Symbol", vr.Type())
+	}
+
+	symbol, err := vr.ReadSymbol()
+	if err != nil {
+		return err
+	}
+
+	val.SetString(symbol)
+	return nil
+}
+
+// BinaryDecodeValue is the ValueDecoderFunc for Binary.
+func (DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tBinary {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Binary {
+		return fmt.Errorf("cannot decode %v into a Binary", vr.Type())
+	}
+
+	data, subtype, err := vr.ReadBinary()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}))
+	return nil
+}
+
+// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
+func (DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tUndefined {
+		return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Undefined {
+		return fmt.Errorf("cannot decode %v into an Undefined", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.Undefined{}))
+	return vr.ReadUndefined()
+}
+
+// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
+func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tOID {
+		return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+
+	if vr.Type() != bsontype.ObjectID {
+		return fmt.Errorf("cannot decode %v into an ObjectID", vr.Type())
+	}
+	oid, err := vr.ReadObjectID()
+	val.Set(reflect.ValueOf(oid))
+	return err
+}
+
+// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
+func (DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDateTime {
+		return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	if vr.Type() != bsontype.DateTime {
+		return fmt.Errorf("cannot decode %v into a DateTime", vr.Type())
+	}
+
+	dt, err := vr.ReadDateTime()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.DateTime(dt)))
+	return nil
+}
+
+// NullDecodeValue is the ValueDecoderFunc for Null.
+func (DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tNull {
+		return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Null {
+		return fmt.Errorf("cannot decode %v into a Null", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.Null{}))
+	return vr.ReadNull()
+}
+
+// RegexDecodeValue is the ValueDecoderFunc for Regex.
+func (DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRegex {
+		return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Regex {
+		return fmt.Errorf("cannot decode %v into a Regex", vr.Type())
+	}
+
+	pattern, options, err := vr.ReadRegex()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}))
+	return nil
+}
+
+// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
+func (DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDBPointer {
+		return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	if vr.Type() != bsontype.DBPointer {
+		return fmt.Errorf("cannot decode %v into a DBPointer", vr.Type())
+	}
+
+	ns, pointer, err := vr.ReadDBPointer()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}))
+	return nil
+}
+
+// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
+func (DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tTimestamp {
+		return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Timestamp {
+		return fmt.Errorf("cannot decode %v into a Timestamp", vr.Type())
+	}
+
+	t, incr, err := vr.ReadTimestamp()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Timestamp{T: t, I: incr}))
+	return nil
+}
+
+// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
+func (DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMinKey {
+		return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	if vr.Type() != bsontype.MinKey {
+		return fmt.Errorf("cannot decode %v into a MinKey", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.MinKey{}))
+	return vr.ReadMinKey()
+}
+
+// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
+func (DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMaxKey {
+		return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	if vr.Type() != bsontype.MaxKey {
+		return fmt.Errorf("cannot decode %v into a MaxKey", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.MaxKey{}))
+	return vr.ReadMaxKey()
+}
+
+// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
+func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Decimal128 {
+		return fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type())
+	}
+
+	if !val.CanSet() || val.Type() != tDecimal {
+		return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+	d128, err := vr.ReadDecimal128()
+	val.Set(reflect.ValueOf(d128))
+	return err
+}
+
+// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
+func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJSONNumber {
+		return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatFloat(f64, 'g', -1, 64))))
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(int64(i32), 10))))
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(i64, 10))))
+	default:
+		return fmt.Errorf("cannot decode %v into a json.Number", vr.Type())
+	}
+
+	return nil
+}
+
+// URLDecodeValue is the ValueDecoderFunc for url.URL.
+func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.String {
+		return fmt.Errorf("cannot decode %v into a *url.URL", vr.Type())
+	}
+
+	str, err := vr.ReadString()
+	if err != nil {
+		return err
+	}
+
+	u, err := url.Parse(str)
+	if err != nil {
+		return err
+	}
+
+	if !val.CanSet() || val.Type() != tURL {
+		return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+
+	val.Set(reflect.ValueOf(u).Elem())
+	return nil
+}
+
+// TimeDecodeValue is the ValueDecoderFunc for time.Time.
+func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.DateTime {
+		return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
+	}
+
+	dt, err := vr.ReadDateTime()
+	if err != nil {
+		return err
+	}
+
+	if !val.CanSet() || val.Type() != tTime {
+		return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+
+	val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000)))
+	return nil
+}
+
+// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
+func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
+		return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
+	}
+
+	if !val.CanSet() || val.Type() != tByteSlice {
+		return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	}
+
+	data, subtype, err := vr.ReadBinary()
+	if err != nil {
+		return err
+	}
+	if subtype != 0x00 {
+		return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype)
+	}
+
+	val.Set(reflect.ValueOf(data))
+	return nil
+}
+
+// MapDecodeValue is the ValueDecoderFunc for map[string]* types.
+func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeMap(val.Type()))
+	}
+
+	eType := val.Type().Elem()
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return err
+	}
+
+	if eType == tEmpty {
+		dc.Ancestor = val.Type()
+	}
+
+	keyType := val.Type().Key()
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		elem := reflect.New(eType).Elem()
+
+		err = decoder.DecodeValue(dc, vr, elem)
+		if err != nil {
+			return err
+		}
+
+		val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem)
+	}
+	return nil
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for array types.
+func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Array:
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into an array", vr.Type())
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if len(elems) > val.Len() {
+		return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type())
+	}
+
+	for idx, elem := range elems {
+		val.Index(idx).Set(elem)
+	}
+
+	return nil
+}
+
+// SliceDecodeValue is the ValueDecoderFunc for slice types.
+func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Slice {
+		return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Array:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a slice", vr.Type())
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		dc.Ancestor = val.Type()
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+	}
+
+	val.SetLen(0)
+	val.Set(reflect.Append(val, elems...))
+
+	return nil
+}
+
+// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
+func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
+		return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	if !val.Type().Implements(tValueUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If they type doesn't implement the interface, a pointer to it must.
+	}
+
+	t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue")
+	errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0]
+	if !errVal.IsNil() {
+		return errVal.Interface().(error)
+	}
+	return nil
+}
+
+// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
+func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
+		return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	if !val.Type().Implements(tUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If they type doesn't implement the interface, a pointer to it must.
+	}
+
+	_, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON")
+	errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0]
+	if !errVal.IsNil() {
+		return errVal.Interface().(error)
+	}
+	return nil
+}
+
+// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
+func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tEmpty {
+		return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	rtype, err := dc.LookupTypeMapEntry(vr.Type())
+	if err != nil {
+		switch vr.Type() {
+		case bsontype.EmbeddedDocument:
+			if dc.Ancestor != nil {
+				rtype = dc.Ancestor
+				break
+			}
+			rtype = tD
+		case bsontype.Null:
+			val.Set(reflect.Zero(val.Type()))
+			return vr.ReadNull()
+		default:
+			return err
+		}
+	}
+
+	decoder, err := dc.LookupDecoder(rtype)
+	if err != nil {
+		return err
+	}
+
+	elem := reflect.New(rtype).Elem()
+	err = decoder.DecodeValue(dc, vr, elem)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
+func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCoreDocument {
+		return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr)
+	val.Set(reflect.ValueOf(cdoc))
+	return err
+}
+
+func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) {
+	elems := make([]reflect.Value, 0)
+
+	ar, err := vr.ReadArray()
+	if err != nil {
+		return nil, err
+	}
+
+	eType := val.Type().Elem()
+
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return nil, err
+	}
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == bsonrw.ErrEOA {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		elem := reflect.New(eType).Elem()
+
+		err = decoder.DecodeValue(dc, vr, elem)
+		if err != nil {
+			return nil, err
+		}
+		elems = append(elems, elem)
+	}
+
+	return elems, nil
+}
+
+// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
+func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCodeWithScope {
+		return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	if vr.Type() != bsontype.CodeWithScope {
+		return fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vr.Type())
+	}
+
+	code, dr, err := vr.ReadCodeWithScope()
+	if err != nil {
+		return err
+	}
+
+	scope := reflect.New(tD).Elem()
+
+	elems, err := dvd.decodeElemsFromDocumentReader(dc, dr)
+	if err != nil {
+		return err
+	}
+
+	scope.Set(reflect.MakeSlice(tD, 0, len(elems)))
+	scope.Set(reflect.Append(scope, elems...))
+
+	val.Set(reflect.ValueOf(primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope.Interface().(primitive.D)}))
+	return nil
+}
+
+func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) {
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	default:
+		return nil, fmt.Errorf("cannot decode %v into a D", vr.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return dvd.decodeElemsFromDocumentReader(dc, dr)
+}
+
+func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) {
+	decoder, err := dc.LookupDecoder(tEmpty)
+	if err != nil {
+		return nil, err
+	}
+
+	elems := make([]reflect.Value, 0)
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		val := reflect.New(tEmpty).Elem()
+		err = decoder.DecodeValue(dc, vr, val)
+		if err != nil {
+			return nil, err
+		}
+
+		elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()}))
+	}
+
+	return elems, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go
new file mode 100644
index 0000000..61295bd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go
@@ -0,0 +1,648 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueEncoders DefaultValueEncoders
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+
+var sliceWriterPool = sync.Pool{
+	New: func() interface{} {
+		sw := make(bsonrw.SliceWriter, 0, 0)
+		return &sw
+	},
+}
+
+func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error {
+	vw, err := dw.WriteDocumentElement(e.Key)
+	if err != nil {
+		return err
+	}
+
+	if e.Value == nil {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// DefaultValueEncoders is a namespace type for the default ValueEncoders used
+// when creating a registry.
+type DefaultValueEncoders struct{}
+
+// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
+// the provided RegistryBuilder.
+func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
+	}
+	rb.
+		RegisterEncoder(tByteSlice, ValueEncoderFunc(dve.ByteSliceEncodeValue)).
+		RegisterEncoder(tTime, ValueEncoderFunc(dve.TimeEncodeValue)).
+		RegisterEncoder(tEmpty, ValueEncoderFunc(dve.EmptyInterfaceEncodeValue)).
+		RegisterEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
+		RegisterEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
+		RegisterEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
+		RegisterEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)).
+		RegisterEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
+		RegisterEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
+		RegisterEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)).
+		RegisterEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)).
+		RegisterEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)).
+		RegisterEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)).
+		RegisterEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)).
+		RegisterEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)).
+		RegisterEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)).
+		RegisterEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)).
+		RegisterEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)).
+		RegisterEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)).
+		RegisterEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)).
+		RegisterEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)).
+		RegisterEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)).
+		RegisterEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)).
+		RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint8, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint16, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint32, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint64, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)).
+		RegisterDefaultEncoder(reflect.Map, ValueEncoderFunc(dve.MapEncodeValue)).
+		RegisterDefaultEncoder(reflect.Slice, ValueEncoderFunc(dve.SliceEncodeValue)).
+		RegisterDefaultEncoder(reflect.String, ValueEncoderFunc(dve.StringEncodeValue)).
+		RegisterDefaultEncoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}).
+		RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec())
+}
+
+// BooleanEncodeValue is the ValueEncoderFunc for bool types.
+func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Bool {
+		return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+	return vw.WriteBoolean(val.Bool())
+}
+
+func fitsIn32Bits(i int64) bool {
+	return math.MinInt32 <= i && i <= math.MaxInt32
+}
+
+// IntEncodeValue is the ValueEncoderFunc for int types.
+func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Int8, reflect.Int16, reflect.Int32:
+		return vw.WriteInt32(int32(val.Int()))
+	case reflect.Int:
+		i64 := val.Int()
+		if fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	case reflect.Int64:
+		i64 := val.Int()
+		if ec.MinSize && fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	}
+
+	return ValueEncoderError{
+		Name:     "IntEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+		Received: val,
+	}
+}
+
+// UintEncodeValue is the ValueEncoderFunc for uint types.
+func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Uint8, reflect.Uint16:
+		return vw.WriteInt32(int32(val.Uint()))
+	case reflect.Uint, reflect.Uint32, reflect.Uint64:
+		u64 := val.Uint()
+		if ec.MinSize && u64 <= math.MaxInt32 {
+			return vw.WriteInt32(int32(u64))
+		}
+		if u64 > math.MaxInt64 {
+			return fmt.Errorf("%d overflows int64", u64)
+		}
+		return vw.WriteInt64(int64(u64))
+	}
+
+	return ValueEncoderError{
+		Name:     "UintEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+		Received: val,
+	}
+}
+
+// FloatEncodeValue is the ValueEncoderFunc for float types.
+func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Float32, reflect.Float64:
+		return vw.WriteDouble(val.Float())
+	}
+
+	return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+}
+
+// StringEncodeValue is the ValueEncoderFunc for string types.
+func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.String {
+		return ValueEncoderError{
+			Name:     "StringEncodeValue",
+			Kinds:    []reflect.Kind{reflect.String},
+			Received: val,
+		}
+	}
+
+	return vw.WriteString(val.String())
+}
+
+// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
+func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tOID {
+		return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+	return vw.WriteObjectID(val.Interface().(primitive.ObjectID))
+}
+
+// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
+func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDecimal {
+		return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+	return vw.WriteDecimal128(val.Interface().(primitive.Decimal128))
+}
+
+// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
+func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJSONNumber {
+		return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+	jsnum := val.Interface().(json.Number)
+
+	// Attempt int first, then float64
+	if i64, err := jsnum.Int64(); err == nil {
+		return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64))
+	}
+
+	f64, err := jsnum.Float64()
+	if err != nil {
+		return err
+	}
+
+	return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64))
+}
+
+// URLEncodeValue is the ValueEncoderFunc for url.URL.
+func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tURL {
+		return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+	u := val.Interface().(url.URL)
+	return vw.WriteString(u.String())
+}
+
+// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
+func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTime {
+		return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+	tt := val.Interface().(time.Time)
+	return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6))
+}
+
+// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
+func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tByteSlice {
+		return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	return vw.WriteBinary(val.Interface().([]byte))
+}
+
+// MapEncodeValue is the ValueEncoderFunc for map[string]* types.
+func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	if val.IsNil() {
+		// If we have a nill map but we can't WriteNull, that means we're probably trying to encode
+		// to a TopLevel document. We can't currently tell if this is what actually happened, but if
+		// there's a deeper underlying problem, the error will also be returned from WriteDocument,
+		// so just continue. The operations on a map reflection value are valid, so we can call
+		// MapKeys within mapEncodeValue without a problem.
+		err := vw.WriteNull()
+		if err == nil {
+			return nil
+		}
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return dve.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	keys := val.MapKeys()
+	for _, key := range keys {
+		if collisionFn != nil && collisionFn(key.String()) {
+			return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+		}
+		vw, err := dw.WriteDocumentElement(key.String())
+		if err != nil {
+			return err
+		}
+
+		if enc, ok := encoder.(ValueEncoder); ok {
+			err = enc.EncodeValue(ec, vw, val.MapIndex(key))
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		err = encoder.EncodeValue(ec, vw, val.MapIndex(key))
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for array types.
+func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().Elem() == tE {
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for idx := 0; idx < val.Len(); idx++ {
+			e := val.Index(idx).Interface().(primitive.E)
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = encoder.EncodeValue(ec, vw, val.Index(idx))
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// SliceEncodeValue is the ValueEncoderFunc for slice types.
+func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Slice {
+		return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().ConvertibleTo(tD) {
+		d := val.Convert(tD).Interface().(primitive.D)
+
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for _, e := range d {
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = encoder.EncodeValue(ec, vw, val.Index(idx))
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
+func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tEmpty {
+		return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(val.Elem().Type())
+	if err != nil {
+		return err
+	}
+
+	return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
+func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tValueMarshaler) {
+		return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+	}
+
+	fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue")
+	returns := fn.Call(nil)
+	if !returns[2].IsNil() {
+		return returns[2].Interface().(error)
+	}
+	t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte)
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
+}
+
+// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
+func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tMarshaler) {
+		return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+	}
+
+	fn := val.Convert(tMarshaler).MethodByName("MarshalBSON")
+	returns := fn.Call(nil)
+	if !returns[1].IsNil() {
+		return returns[1].Interface().(error)
+	}
+	data := returns[0].Interface().([]byte)
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
+}
+
+// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
+func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tProxy) {
+		return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+	}
+
+	fn := val.Convert(tProxy).MethodByName("ProxyBSON")
+	returns := fn.Call(nil)
+	if !returns[1].IsNil() {
+		return returns[1].Interface().(error)
+	}
+	data := returns[0]
+	var encoder ValueEncoder
+	var err error
+	if data.Elem().IsValid() {
+		encoder, err = ec.LookupEncoder(data.Elem().Type())
+	} else {
+		encoder, err = ec.LookupEncoder(nil)
+	}
+	if err != nil {
+		return err
+	}
+	return encoder.EncodeValue(ec, vw, data.Elem())
+}
+
+// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
+func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJavaScript {
+		return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	return vw.WriteJavascript(val.String())
+}
+
+// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
+func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tSymbol {
+		return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	return vw.WriteSymbol(val.String())
+}
+
+// BinaryEncodeValue is the ValueEncoderFunc for Binary.
+func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tBinary {
+		return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+	b := val.Interface().(primitive.Binary)
+
+	return vw.WriteBinaryWithSubtype(b.Data, b.Subtype)
+}
+
+// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
+func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tUndefined {
+		return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	return vw.WriteUndefined()
+}
+
+// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
+func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDateTime {
+		return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	return vw.WriteDateTime(val.Int())
+}
+
+// NullEncodeValue is the ValueEncoderFunc for Null.
+func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tNull {
+		return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	return vw.WriteNull()
+}
+
+// RegexEncodeValue is the ValueEncoderFunc for Regex.
+func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRegex {
+		return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	regex := val.Interface().(primitive.Regex)
+
+	return vw.WriteRegex(regex.Pattern, regex.Options)
+}
+
+// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
+func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDBPointer {
+		return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	dbp := val.Interface().(primitive.DBPointer)
+
+	return vw.WriteDBPointer(dbp.DB, dbp.Pointer)
+}
+
+// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
+func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTimestamp {
+		return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	ts := val.Interface().(primitive.Timestamp)
+
+	return vw.WriteTimestamp(ts.T, ts.I)
+}
+
+// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
+func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMinKey {
+		return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	return vw.WriteMinKey()
+}
+
+// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
+func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMaxKey {
+		return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	return vw.WriteMaxKey()
+}
+
+// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
+func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCoreDocument {
+		return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	cdoc := val.Interface().(bsoncore.Document)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc)
+}
+
+// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
+func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCodeWithScope {
+		return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	cws := val.Interface().(primitive.CodeWithScope)
+
+	dw, err := vw.WriteCodeWithScope(string(cws.Code))
+	if err != nil {
+		return err
+	}
+
+	sw := sliceWriterPool.Get().(*bsonrw.SliceWriter)
+	defer sliceWriterPool.Put(sw)
+	*sw = (*sw)[:0]
+
+	scopeVW := bvwPool.Get(sw)
+	defer bvwPool.Put(scopeVW)
+
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw)
+	if err != nil {
+		return err
+	}
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go
new file mode 100644
index 0000000..978511c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go
@@ -0,0 +1,61 @@
+// Package bsoncodec provides a system for encoding values to BSON representations and decoding
+// values from BSON representations. This package considers both binary BSON and ExtendedJSON as
+// BSON representations. The types in this package enable a flexible system for handling this
+// encoding and decoding.
+//
+// The codec system is composed of two parts:
+//
+// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON
+// representations.
+//
+// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for
+// retrieving them.
+//
+// ValueEncoders and ValueDecoders
+//
+// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON.
+// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the
+// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc
+// is provided to allow use of a function with the correct signature as a ValueEncoder. An
+// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and
+// to provide configuration information.
+//
+// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that
+// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to
+// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
+// instance is provided and serves similar functionality to the EncodeContext.
+//
+// Registry and RegistryBuilder
+//
+// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. For looking up
+// ValueEncoders and Decoders the Registry first attempts to find a ValueEncoder or ValueDecoder for
+// the type provided; if one cannot be found it then checks to see if a registered ValueEncoder or
+// ValueDecoder exists for an interface the type implements. Finally, the reflect.Kind of the type
+// is used to lookup a default ValueEncoder or ValueDecoder for that kind. If no ValueEncoder or
+// ValueDecoder can be found, an error is returned.
+//
+// The Registry also holds a type map. This allows users to retrieve the Go type that should be used
+// when decoding a BSON value into an empty interface. This is primarily only used for the empty
+// interface ValueDecoder.
+//
+// A RegistryBuilder is used to construct a Registry. The Register methods are used to associate
+// either a reflect.Type or a reflect.Kind with a ValueEncoder or ValueDecoder. A RegistryBuilder
+// returned from NewRegistryBuilder contains no registered ValueEncoders nor ValueDecoders and
+// contains an empty type map.
+//
+// The RegisterTypeMapEntry method handles associating a BSON type with a Go type. For example, if
+// you want to decode BSON int64 and int32 values into Go int instances, you would do the following:
+//
+//  var regbuilder *RegistryBuilder = ... intType := reflect.TypeOf(int(0))
+//  regbuilder.RegisterTypeMapEntry(bsontype.Int64, intType).RegisterTypeMapEntry(bsontype.Int32,
+//  intType)
+//
+// DefaultValueEncoders and DefaultValueDecoders
+//
+// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and
+// ValueDecoders for handling a wide range of Go types, including all of the types within the
+// primitive package. To make registering these codecs easier, a helper method on each type is
+// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for
+// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also
+// handles registering type map entries for each BSON type.
+package bsoncodec
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go
new file mode 100644
index 0000000..fbd9f0a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import "fmt"
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+type TransitionError struct {
+	parent      mode
+	current     mode
+	destination mode
+}
+
+func (te TransitionError) Error() string {
+	if te.destination == mode(0) {
+		return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current)
+	}
+	if te.parent == mode(0) {
+		return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination)
+	}
+	return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go
new file mode 100644
index 0000000..b78ec0a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go
@@ -0,0 +1,110 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var defaultPointerCodec = &PointerCodec{
+	ecache: make(map[reflect.Type]ValueEncoder),
+	dcache: make(map[reflect.Type]ValueDecoder),
+}
+
+var _ ValueEncoder = &PointerCodec{}
+var _ ValueDecoder = &PointerCodec{}
+
+// PointerCodec is the Codec used for pointers.
+type PointerCodec struct {
+	ecache map[reflect.Type]ValueEncoder
+	dcache map[reflect.Type]ValueDecoder
+	l      sync.RWMutex
+}
+
+// NewPointerCodec returns a PointerCodec that has been initialized.
+func NewPointerCodec() *PointerCodec {
+	return &PointerCodec{
+		ecache: make(map[reflect.Type]ValueEncoder),
+		dcache: make(map[reflect.Type]ValueDecoder),
+	}
+}
+
+// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
+// or looking up an encoder for the type of value the pointer points to.
+func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.Ptr {
+		if !val.IsValid() {
+			return vw.WriteNull()
+		}
+		return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	pc.l.RLock()
+	enc, ok := pc.ecache[val.Type()]
+	pc.l.RUnlock()
+	if ok {
+		if enc == nil {
+			return ErrNoEncoder{Type: val.Type()}
+		}
+		return enc.EncodeValue(ec, vw, val.Elem())
+	}
+
+	enc, err := ec.LookupEncoder(val.Type().Elem())
+	pc.l.Lock()
+	pc.ecache[val.Type()] = enc
+	pc.l.Unlock()
+	if err != nil {
+		return err
+	}
+
+	return enc.EncodeValue(ec, vw, val.Elem())
+}
+
+// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and
+// using that to decode. If the BSON value is Null, this method will set the pointer to nil.
+func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Ptr {
+		return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	pc.l.RLock()
+	dec, ok := pc.dcache[val.Type()]
+	pc.l.RUnlock()
+	if ok {
+		if dec == nil {
+			return ErrNoDecoder{Type: val.Type()}
+		}
+		return dec.DecodeValue(dc, vr, val.Elem())
+	}
+
+	dec, err := dc.LookupDecoder(val.Type().Elem())
+	pc.l.Lock()
+	pc.dcache[val.Type()] = dec
+	pc.l.Unlock()
+	if err != nil {
+		return err
+	}
+
+	return dec.DecodeValue(dc, vr, val.Elem())
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go
new file mode 100644
index 0000000..4cf2b01
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types
+// that implement this interface with have ProxyBSON called during the encoding process and that
+// value will be encoded in place for the implementer.
+type Proxy interface {
+	ProxyBSON() (interface{}, error)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go
new file mode 100644
index 0000000..741deb8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go
@@ -0,0 +1,384 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
+var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
+
+// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
+var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
+
+// ErrNoEncoder is returned when there wasn't an encoder available for a type.
+type ErrNoEncoder struct {
+	Type reflect.Type
+}
+
+func (ene ErrNoEncoder) Error() string {
+	if ene.Type == nil {
+		return "no encoder found for <nil>"
+	}
+	return "no encoder found for " + ene.Type.String()
+}
+
+// ErrNoDecoder is returned when there wasn't a decoder available for a type.
+type ErrNoDecoder struct {
+	Type reflect.Type
+}
+
+func (end ErrNoDecoder) Error() string {
+	return "no decoder found for " + end.Type.String()
+}
+
+// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
+type ErrNoTypeMapEntry struct {
+	Type bsontype.Type
+}
+
+func (entme ErrNoTypeMapEntry) Error() string {
+	return "no type map entry found for " + entme.Type.String()
+}
+
+// ErrNotInterface is returned when the provided type is not an interface.
+var ErrNotInterface = errors.New("The provided type is not an interface")
+
+var defaultRegistry *Registry
+
+func init() {
+	defaultRegistry = buildDefaultRegistry()
+}
+
+// A RegistryBuilder is used to build a Registry. This type is not goroutine
+// safe.
+type RegistryBuilder struct {
+	typeEncoders      map[reflect.Type]ValueEncoder
+	interfaceEncoders []interfaceValueEncoder
+	kindEncoders      map[reflect.Kind]ValueEncoder
+
+	typeDecoders      map[reflect.Type]ValueDecoder
+	interfaceDecoders []interfaceValueDecoder
+	kindDecoders      map[reflect.Kind]ValueDecoder
+
+	typeMap map[bsontype.Type]reflect.Type
+}
+
+// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
+// typed passed around and Encoders and Decoders are constructed from it.
+type Registry struct {
+	typeEncoders map[reflect.Type]ValueEncoder
+	typeDecoders map[reflect.Type]ValueDecoder
+
+	interfaceEncoders []interfaceValueEncoder
+	interfaceDecoders []interfaceValueDecoder
+
+	kindEncoders map[reflect.Kind]ValueEncoder
+	kindDecoders map[reflect.Kind]ValueDecoder
+
+	typeMap map[bsontype.Type]reflect.Type
+
+	mu sync.RWMutex
+}
+
+// NewRegistryBuilder creates a new empty RegistryBuilder.
+func NewRegistryBuilder() *RegistryBuilder {
+	return &RegistryBuilder{
+		typeEncoders: make(map[reflect.Type]ValueEncoder),
+		typeDecoders: make(map[reflect.Type]ValueDecoder),
+
+		interfaceEncoders: make([]interfaceValueEncoder, 0),
+		interfaceDecoders: make([]interfaceValueDecoder, 0),
+
+		kindEncoders: make(map[reflect.Kind]ValueEncoder),
+		kindDecoders: make(map[reflect.Kind]ValueDecoder),
+
+		typeMap: make(map[bsontype.Type]reflect.Type),
+	}
+}
+
+func buildDefaultRegistry() *Registry {
+	rb := NewRegistryBuilder()
+	defaultValueEncoders.RegisterDefaultEncoders(rb)
+	defaultValueDecoders.RegisterDefaultDecoders(rb)
+	return rb.Build()
+}
+
+// RegisterCodec will register the provided ValueCodec for the provided type.
+func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
+	rb.RegisterEncoder(t, codec)
+	rb.RegisterDecoder(t, codec)
+	return rb
+}
+
+// RegisterEncoder will register the provided ValueEncoder to the provided type.
+//
+// The type registered will be used directly, so an encoder can be registered for a type and a
+// different encoder can be registered for a pointer to that type.
+func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+	if t == tEmpty {
+		rb.typeEncoders[t] = enc
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		for idx, ir := range rb.interfaceEncoders {
+			if ir.i == t {
+				rb.interfaceEncoders[idx].ve = enc
+				return rb
+			}
+		}
+
+		rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
+	default:
+		rb.typeEncoders[t] = enc
+	}
+	return rb
+}
+
+// RegisterDecoder will register the provided ValueDecoder to the provided type.
+//
+// The type registered will be used directly, so a decoder can be registered for a type and a
+// different decoder can be registered for a pointer to that type.
+func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+	if t == nil {
+		rb.typeDecoders[nil] = dec
+		return rb
+	}
+	if t == tEmpty {
+		rb.typeDecoders[t] = dec
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		for idx, ir := range rb.interfaceDecoders {
+			if ir.i == t {
+				rb.interfaceDecoders[idx].vd = dec
+				return rb
+			}
+		}
+
+		rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
+	default:
+		rb.typeDecoders[t] = dec
+	}
+	return rb
+}
+
+// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided
+// kind.
+func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
+	rb.kindEncoders[kind] = enc
+	return rb
+}
+
+// RegisterDefaultDecoder will register the provided ValueDecoder to the
+// provided kind.
+func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
+	rb.kindDecoders[kind] = dec
+	return rb
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// NOTE: It is unlikely that registering a type for BSON Embedded Document is actually desired. By
+// registering a type map entry for BSON Embedded Document the type registered will be used in any
+// case where a BSON Embedded Document will be decoded into an empty interface. For example, if you
+// register primitive.M, the EmptyInterface decoder will always use primitive.M, even if an ancestor
+// was a primitive.D.
+func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
+	rb.typeMap[bt] = rt
+	return rb
+}
+
+// Build creates a Registry from the current state of this RegistryBuilder.
+func (rb *RegistryBuilder) Build() *Registry {
+	registry := new(Registry)
+
+	registry.typeEncoders = make(map[reflect.Type]ValueEncoder)
+	for t, enc := range rb.typeEncoders {
+		registry.typeEncoders[t] = enc
+	}
+
+	registry.typeDecoders = make(map[reflect.Type]ValueDecoder)
+	for t, dec := range rb.typeDecoders {
+		registry.typeDecoders[t] = dec
+	}
+
+	registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders))
+	copy(registry.interfaceEncoders, rb.interfaceEncoders)
+
+	registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders))
+	copy(registry.interfaceDecoders, rb.interfaceDecoders)
+
+	registry.kindEncoders = make(map[reflect.Kind]ValueEncoder)
+	for kind, enc := range rb.kindEncoders {
+		registry.kindEncoders[kind] = enc
+	}
+
+	registry.kindDecoders = make(map[reflect.Kind]ValueDecoder)
+	for kind, dec := range rb.kindDecoders {
+		registry.kindDecoders[kind] = dec
+	}
+
+	registry.typeMap = make(map[bsontype.Type]reflect.Type)
+	for bt, rt := range rb.typeMap {
+		registry.typeMap[bt] = rt
+	}
+
+	return registry
+}
+
+// LookupEncoder will inspect the registry for an encoder that satisfies the
+// type provided. An encoder registered for a specific type will take
+// precedence over an encoder registered for an interface the type satisfies,
+// which takes precedence over an encoder for the reflect.Kind of the value. If
+// no encoder can be found, an error is returned.
+func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) {
+	encodererr := ErrNoEncoder{Type: t}
+	r.mu.RLock()
+	enc, found := r.lookupTypeEncoder(t)
+	r.mu.RUnlock()
+	if found {
+		if enc == nil {
+			return nil, ErrNoEncoder{Type: t}
+		}
+		return enc, nil
+	}
+
+	enc, found = r.lookupInterfaceEncoder(t)
+	if found {
+		r.mu.Lock()
+		r.typeEncoders[t] = enc
+		r.mu.Unlock()
+		return enc, nil
+	}
+
+	if t == nil {
+		r.mu.Lock()
+		r.typeEncoders[t] = nil
+		r.mu.Unlock()
+		return nil, encodererr
+	}
+
+	enc, found = r.kindEncoders[t.Kind()]
+	if !found {
+		r.mu.Lock()
+		r.typeEncoders[t] = nil
+		r.mu.Unlock()
+		return nil, encodererr
+	}
+
+	r.mu.Lock()
+	r.typeEncoders[t] = enc
+	r.mu.Unlock()
+	return enc, nil
+}
+
+func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) {
+	enc, found := r.typeEncoders[t]
+	return enc, found
+}
+
+func (r *Registry) lookupInterfaceEncoder(t reflect.Type) (ValueEncoder, bool) {
+	if t == nil {
+		return nil, false
+	}
+	for _, ienc := range r.interfaceEncoders {
+		if !t.Implements(ienc.i) {
+			continue
+		}
+
+		return ienc.ve, true
+	}
+	return nil, false
+}
+
+// LookupDecoder will inspect the registry for a decoder that satisfies the
+// type provided. A decoder registered for a specific type will take
+// precedence over a decoder registered for an interface the type satisfies,
+// which takes precedence over a decoder for the reflect.Kind of the value. If
+// no decoder can be found, an error is returned.
+func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) {
+	if t == nil {
+		return nil, ErrNilType
+	}
+	decodererr := ErrNoDecoder{Type: t}
+	r.mu.RLock()
+	dec, found := r.lookupTypeDecoder(t)
+	r.mu.RUnlock()
+	if found {
+		if dec == nil {
+			return nil, ErrNoDecoder{Type: t}
+		}
+		return dec, nil
+	}
+
+	dec, found = r.lookupInterfaceDecoder(t)
+	if found {
+		r.mu.Lock()
+		r.typeDecoders[t] = dec
+		r.mu.Unlock()
+		return dec, nil
+	}
+
+	dec, found = r.kindDecoders[t.Kind()]
+	if !found {
+		r.mu.Lock()
+		r.typeDecoders[t] = nil
+		r.mu.Unlock()
+		return nil, decodererr
+	}
+
+	r.mu.Lock()
+	r.typeDecoders[t] = dec
+	r.mu.Unlock()
+	return dec, nil
+}
+
+func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) {
+	dec, found := r.typeDecoders[t]
+	return dec, found
+}
+
+func (r *Registry) lookupInterfaceDecoder(t reflect.Type) (ValueDecoder, bool) {
+	for _, idec := range r.interfaceDecoders {
+		if !t.Implements(idec.i) && !reflect.PtrTo(t).Implements(idec.i) {
+			continue
+		}
+
+		return idec.vd, true
+	}
+	return nil, false
+}
+
+// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
+// type. If no type is found, ErrNoTypeMapEntry is returned.
+func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
+	t, ok := r.typeMap[bt]
+	if !ok || t == nil {
+		return nil, ErrNoTypeMapEntry{Type: bt}
+	}
+	return t, nil
+}
+
+type interfaceValueEncoder struct {
+	i  reflect.Type
+	ve ValueEncoder
+}
+
+type interfaceValueDecoder struct {
+	i  reflect.Type
+	vd ValueDecoder
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go
new file mode 100644
index 0000000..34f3b4c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go
@@ -0,0 +1,357 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var defaultStructCodec = &StructCodec{
+	cache:  make(map[reflect.Type]*structDescription),
+	parser: DefaultStructTagParser,
+}
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// StructCodec is the Codec used for struct values.
+type StructCodec struct {
+	cache  map[reflect.Type]*structDescription
+	l      sync.RWMutex
+	parser StructTagParser
+}
+
+var _ ValueEncoder = &StructCodec{}
+var _ ValueDecoder = &StructCodec{}
+
+// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
+func NewStructCodec(p StructTagParser) (*StructCodec, error) {
+	if p == nil {
+		return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
+	}
+
+	return &StructCodec{
+		cache:  make(map[reflect.Type]*structDescription),
+		parser: p,
+	}, nil
+}
+
+// EncodeValue handles encoding generic struct types.
+func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Struct {
+		return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	sd, err := sc.describeStruct(r.Registry, val.Type())
+	if err != nil {
+		return err
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+	var rv reflect.Value
+	for _, desc := range sd.fl {
+		if desc.inline == nil {
+			rv = val.Field(desc.idx)
+		} else {
+			rv = val.FieldByIndex(desc.inline)
+		}
+
+		if desc.encoder == nil {
+			return ErrNoEncoder{Type: rv.Type()}
+		}
+
+		encoder := desc.encoder
+
+		iszero := sc.isZero
+		if iz, ok := encoder.(CodecZeroer); ok {
+			iszero = iz.IsTypeZero
+		}
+
+		if desc.omitEmpty && iszero(rv.Interface()) {
+			continue
+		}
+
+		vw2, err := dw.WriteDocumentElement(desc.name)
+		if err != nil {
+			return err
+		}
+
+		ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize}
+		err = encoder.EncodeValue(ectx, vw2, rv)
+		if err != nil {
+			return err
+		}
+	}
+
+	if sd.inlineMap >= 0 {
+		rv := val.Field(sd.inlineMap)
+		collisionFn := func(key string) bool {
+			_, exists := sd.fm[key]
+			return exists
+		}
+
+		return defaultValueEncoders.mapEncodeValue(r, dw, rv, collisionFn)
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// DecodeValue implements the Codec interface.
+func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Struct {
+		return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+	}
+
+	sd, err := sc.describeStruct(r.Registry, val.Type())
+	if err != nil {
+		return err
+	}
+
+	var decoder ValueDecoder
+	var inlineMap reflect.Value
+	if sd.inlineMap >= 0 {
+		inlineMap = val.Field(sd.inlineMap)
+		if inlineMap.IsNil() {
+			inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+		}
+		decoder, err = r.LookupDecoder(inlineMap.Type().Elem())
+		if err != nil {
+			return err
+		}
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	for {
+		name, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		fd, exists := sd.fm[name]
+		if !exists {
+			if sd.inlineMap < 0 {
+				// The encoding/json package requires a flag to return on error for non-existent fields.
+				// This functionality seems appropriate for the struct codec.
+				err = vr.Skip()
+				if err != nil {
+					return err
+				}
+				continue
+			}
+
+			elem := reflect.New(inlineMap.Type().Elem()).Elem()
+			err = decoder.DecodeValue(r, vr, elem)
+			if err != nil {
+				return err
+			}
+			inlineMap.SetMapIndex(reflect.ValueOf(name), elem)
+			continue
+		}
+
+		var field reflect.Value
+		if fd.inline == nil {
+			field = val.Field(fd.idx)
+		} else {
+			field = val.FieldByIndex(fd.inline)
+		}
+
+		if !field.CanSet() { // Being settable is a super set of being addressable.
+			return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field)
+		}
+		if field.Kind() == reflect.Ptr && field.IsNil() {
+			field.Set(reflect.New(field.Type().Elem()))
+		}
+		field = field.Addr()
+
+		dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate}
+		if fd.decoder == nil {
+			return ErrNoDecoder{Type: field.Elem().Type()}
+		}
+
+		if decoder, ok := fd.decoder.(ValueDecoder); ok {
+			err = decoder.DecodeValue(dctx, vr, field.Elem())
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		err = fd.decoder.DecodeValue(dctx, vr, field)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (sc *StructCodec) isZero(i interface{}) bool {
+	v := reflect.ValueOf(i)
+
+	// check the value validity
+	if !v.IsValid() {
+		return true
+	}
+
+	if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+		return z.IsZero()
+	}
+
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+
+	return false
+}
+
+type structDescription struct {
+	fm        map[string]fieldDescription
+	fl        []fieldDescription
+	inlineMap int
+}
+
+type fieldDescription struct {
+	name      string
+	idx       int
+	omitEmpty bool
+	minSize   bool
+	truncate  bool
+	inline    []int
+	encoder   ValueEncoder
+	decoder   ValueDecoder
+}
+
+func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
+	// We need to analyze the struct, including getting the tags, collecting
+	// information about inlining, and create a map of the field name to the field.
+	sc.l.RLock()
+	ds, exists := sc.cache[t]
+	sc.l.RUnlock()
+	if exists {
+		return ds, nil
+	}
+
+	numFields := t.NumField()
+	sd := &structDescription{
+		fm:        make(map[string]fieldDescription, numFields),
+		fl:        make([]fieldDescription, 0, numFields),
+		inlineMap: -1,
+	}
+
+	for i := 0; i < numFields; i++ {
+		sf := t.Field(i)
+		if sf.PkgPath != "" {
+			// unexported, ignore
+			continue
+		}
+
+		encoder, err := r.LookupEncoder(sf.Type)
+		if err != nil {
+			encoder = nil
+		}
+		decoder, err := r.LookupDecoder(sf.Type)
+		if err != nil {
+			decoder = nil
+		}
+
+		description := fieldDescription{idx: i, encoder: encoder, decoder: decoder}
+
+		stags, err := sc.parser.ParseStructTags(sf)
+		if err != nil {
+			return nil, err
+		}
+		if stags.Skip {
+			continue
+		}
+		description.name = stags.Name
+		description.omitEmpty = stags.OmitEmpty
+		description.minSize = stags.MinSize
+		description.truncate = stags.Truncate
+
+		if stags.Inline {
+			switch sf.Type.Kind() {
+			case reflect.Map:
+				if sd.inlineMap >= 0 {
+					return nil, errors.New("(struct " + t.String() + ") multiple inline maps")
+				}
+				if sf.Type.Key() != tString {
+					return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys")
+				}
+				sd.inlineMap = description.idx
+			case reflect.Struct:
+				inlinesf, err := sc.describeStruct(r, sf.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, fd := range inlinesf.fl {
+					if _, exists := sd.fm[fd.name]; exists {
+						return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name)
+					}
+					if fd.inline == nil {
+						fd.inline = []int{i, fd.idx}
+					} else {
+						fd.inline = append([]int{i}, fd.inline...)
+					}
+					sd.fm[fd.name] = fd
+					sd.fl = append(sd.fl, fd)
+				}
+			default:
+				return nil, fmt.Errorf("(struct %s) inline fields must be either a struct or a map", t.String())
+			}
+			continue
+		}
+
+		if _, exists := sd.fm[description.name]; exists {
+			return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name)
+		}
+
+		sd.fm[description.name] = description
+		sd.fl = append(sd.fl, description)
+	}
+
+	sc.l.Lock()
+	sc.cache[t] = sd
+	sc.l.Unlock()
+
+	return sd, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go
new file mode 100644
index 0000000..69d0ae4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"strings"
+)
+
+// StructTagParser returns the struct tags for a given struct field.
+type StructTagParser interface {
+	ParseStructTags(reflect.StructField) (StructTags, error)
+}
+
+// StructTagParserFunc is an adapter that allows a generic function to be used
+// as a StructTagParser.
+type StructTagParserFunc func(reflect.StructField) (StructTags, error)
+
+// ParseStructTags implements the StructTagParser interface.
+func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) {
+	return stpf(sf)
+}
+
+// StructTags represents the struct tag fields that the StructCodec uses during
+// the encoding and decoding process.
+//
+// In the case of a struct, the lowercased field name is used as the key for each exported
+// field but this behavior may be changed using a struct tag. The tag may also contain flags to
+// adjust the marshalling behavior for the field.
+//
+// The properties are defined below:
+//
+//     OmitEmpty  Only include the field if it's not set to the zero value for the type or to
+//                empty slices or maps.
+//
+//     MinSize    Marshal an integer of a type larger than 32 bits value as an int32, if that's
+//                feasible while preserving the numeric value.
+//
+//     Truncate   When unmarshaling a BSON double, it is permitted to lose precision to fit within
+//                a float32.
+//
+//     Inline     Inline the field, which must be a struct or a map, causing all of its fields
+//                or keys to be processed as if they were part of the outer struct. For maps,
+//                keys must not conflict with the bson keys of other struct fields.
+//
+//     Skip       This struct field should be skipped. This is usually denoted by parsing a "-"
+//                for the name.
+//
+// TODO(skriptble): Add tags for undefined as nil and for null as nil.
+type StructTags struct {
+	Name      string
+	OmitEmpty bool
+	MinSize   bool
+	Truncate  bool
+	Inline    bool
+	Skip      bool
+}
+
+// DefaultStructTagParser is the StructTagParser used by the StructCodec by default.
+// It will handle the bson struct tag. See the documentation for StructTags to see
+// what each of the returned fields means.
+//
+// If there is no name in the struct tag fields, the struct field name is lowercased.
+// The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// An example:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+// A struct tag either consisting entirely of '-' or with a bson key with a
+// value consisting entirely of '-' will return a StructTags with Skip true and
+// the remaining fields will be their default values.
+var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+	key := strings.ToLower(sf.Name)
+	tag, ok := sf.Tag.Lookup("bson")
+	if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+		tag = string(sf.Tag)
+	}
+	var st StructTags
+	if tag == "-" {
+		st.Skip = true
+		return st, nil
+	}
+
+	for idx, str := range strings.Split(tag, ",") {
+		if idx == 0 && str != "" {
+			key = str
+		}
+		switch str {
+		case "omitempty":
+			st.OmitEmpty = true
+		case "minsize":
+			st.MinSize = true
+		case "truncate":
+			st.Truncate = true
+		case "inline":
+			st.Inline = true
+		}
+	}
+
+	st.Name = key
+
+	return st, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go
new file mode 100644
index 0000000..c349a94
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go
@@ -0,0 +1,80 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"net/url"
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var ptBool = reflect.TypeOf((*bool)(nil))
+var ptInt8 = reflect.TypeOf((*int8)(nil))
+var ptInt16 = reflect.TypeOf((*int16)(nil))
+var ptInt32 = reflect.TypeOf((*int32)(nil))
+var ptInt64 = reflect.TypeOf((*int64)(nil))
+var ptInt = reflect.TypeOf((*int)(nil))
+var ptUint8 = reflect.TypeOf((*uint8)(nil))
+var ptUint16 = reflect.TypeOf((*uint16)(nil))
+var ptUint32 = reflect.TypeOf((*uint32)(nil))
+var ptUint64 = reflect.TypeOf((*uint64)(nil))
+var ptUint = reflect.TypeOf((*uint)(nil))
+var ptFloat32 = reflect.TypeOf((*float32)(nil))
+var ptFloat64 = reflect.TypeOf((*float64)(nil))
+var ptString = reflect.TypeOf((*string)(nil))
+
+var tBool = reflect.TypeOf(false)
+var tFloat32 = reflect.TypeOf(float32(0))
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt = reflect.TypeOf(int(0))
+var tInt8 = reflect.TypeOf(int8(0))
+var tInt16 = reflect.TypeOf(int16(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tString = reflect.TypeOf("")
+var tTime = reflect.TypeOf(time.Time{})
+var tUint = reflect.TypeOf(uint(0))
+var tUint8 = reflect.TypeOf(uint8(0))
+var tUint16 = reflect.TypeOf(uint16(0))
+var tUint32 = reflect.TypeOf(uint32(0))
+var tUint64 = reflect.TypeOf(uint64(0))
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tByteSlice = reflect.TypeOf([]byte(nil))
+var tByte = reflect.TypeOf(byte(0x00))
+var tURL = reflect.TypeOf(url.URL{})
+var tJSONNumber = reflect.TypeOf(json.Number(""))
+
+var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem()
+var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
+var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+var tD = reflect.TypeOf(primitive.D{})
+var tA = reflect.TypeOf(primitive.A{})
+var tE = reflect.TypeOf(primitive.E{})
+
+var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go
new file mode 100644
index 0000000..e01a650
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go
@@ -0,0 +1,389 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// Copier is a type that allows copying between ValueReaders, ValueWriters, and
+// []byte values.
+type Copier struct{}
+
+// NewCopier creates a new copier with the given registry. If a nil registry is provided
+// a default registry is used.
+func NewCopier() Copier {
+	return Copier{}
+}
+
+// CopyDocument handles copying a document from src to dst.
+func CopyDocument(dst ValueWriter, src ValueReader) error {
+	return Copier{}.CopyDocument(dst, src)
+}
+
+// CopyDocument handles copying one document from the src to the dst.
+func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
+	dr, err := src.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return c.copyDocumentCore(dw, dr)
+}
+
+// CopyDocumentFromBytes copies the values from a BSON document represented as a
+// []byte to a ValueWriter.
+func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	err = c.CopyBytesToDocumentWriter(dw, src)
+	if err != nil {
+		return err
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
+// DocumentWriter.
+func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
+	// TODO(skriptble): Create errors types here. Anything thats a tag should be a property.
+	length, rem, ok := bsoncore.ReadLength(src)
+	if !ok {
+		return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
+	}
+	if len(src) < int(length) {
+		return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length)
+	}
+	rem = rem[:length-4]
+
+	var t bsontype.Type
+	var key string
+	var val bsoncore.Value
+	for {
+		t, rem, ok = bsoncore.ReadType(rem)
+		if !ok {
+			return io.EOF
+		}
+		if t == bsontype.Type(0) {
+			if len(rem) != 0 {
+				return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem)
+			}
+			break
+		}
+
+		key, rem, ok = bsoncore.ReadKey(rem)
+		if !ok {
+			return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
+		}
+		dvw, err := dst.WriteDocumentElement(key)
+		if err != nil {
+			return err
+		}
+		val, rem, ok = bsoncore.ReadValue(rem, t)
+		if !ok {
+			return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
+		}
+		err = c.CopyValueFromBytes(dvw, t, val.Data)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CopyDocumentToBytes copies an entire document from the ValueReader and
+// returns it as bytes.
+func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
+	return c.AppendDocumentBytes(nil, src)
+}
+
+// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
+// append the result to dst.
+func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		_, dst, err := br.ReadValueBytes(dst)
+		return dst, err
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer vwPool.Put(vw)
+
+	vw.reset(dst)
+
+	err := c.CopyDocument(vw, src)
+	dst = vw.buf
+	return dst, err
+}
+
+// CopyValueFromBytes will write the value represtend by t and src to dst.
+func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
+	if wvb, ok := dst.(BytesWriter); ok {
+		return wvb.WriteValueBytes(t, src)
+	}
+
+	vr := vrPool.Get().(*valueReader)
+	defer vrPool.Put(vr)
+
+	vr.reset(src)
+	vr.pushElement(t)
+
+	return c.CopyValue(dst, vr)
+}
+
+// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
+// []byte.
+func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
+	return c.AppendValueBytes(nil, src)
+}
+
+// AppendValueBytes functions the same as CopyValueToBytes, but will append the
+// result to dst.
+func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		return br.ReadValueBytes(dst)
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer vwPool.Put(vw)
+
+	start := len(dst)
+
+	vw.reset(dst)
+	vw.push(mElement)
+
+	err := c.CopyValue(vw, src)
+	if err != nil {
+		return 0, dst, err
+	}
+
+	return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil
+}
+
+// CopyValue will copy a single value from src to dst.
+func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
+	var err error
+	switch src.Type() {
+	case bsontype.Double:
+		var f64 float64
+		f64, err = src.ReadDouble()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDouble(f64)
+	case bsontype.String:
+		var str string
+		str, err = src.ReadString()
+		if err != nil {
+			return err
+		}
+		err = dst.WriteString(str)
+	case bsontype.EmbeddedDocument:
+		err = c.CopyDocument(dst, src)
+	case bsontype.Array:
+		err = c.copyArray(dst, src)
+	case bsontype.Binary:
+		var data []byte
+		var subtype byte
+		data, subtype, err = src.ReadBinary()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBinaryWithSubtype(data, subtype)
+	case bsontype.Undefined:
+		err = src.ReadUndefined()
+		if err != nil {
+			break
+		}
+		err = dst.WriteUndefined()
+	case bsontype.ObjectID:
+		var oid primitive.ObjectID
+		oid, err = src.ReadObjectID()
+		if err != nil {
+			break
+		}
+		err = dst.WriteObjectID(oid)
+	case bsontype.Boolean:
+		var b bool
+		b, err = src.ReadBoolean()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBoolean(b)
+	case bsontype.DateTime:
+		var dt int64
+		dt, err = src.ReadDateTime()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDateTime(dt)
+	case bsontype.Null:
+		err = src.ReadNull()
+		if err != nil {
+			break
+		}
+		err = dst.WriteNull()
+	case bsontype.Regex:
+		var pattern, options string
+		pattern, options, err = src.ReadRegex()
+		if err != nil {
+			break
+		}
+		err = dst.WriteRegex(pattern, options)
+	case bsontype.DBPointer:
+		var ns string
+		var pointer primitive.ObjectID
+		ns, pointer, err = src.ReadDBPointer()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		var js string
+		js, err = src.ReadJavascript()
+		if err != nil {
+			break
+		}
+		err = dst.WriteJavascript(js)
+	case bsontype.Symbol:
+		var symbol string
+		symbol, err = src.ReadSymbol()
+		if err != nil {
+			break
+		}
+		err = dst.WriteSymbol(symbol)
+	case bsontype.CodeWithScope:
+		var code string
+		var srcScope DocumentReader
+		code, srcScope, err = src.ReadCodeWithScope()
+		if err != nil {
+			break
+		}
+
+		var dstScope DocumentWriter
+		dstScope, err = dst.WriteCodeWithScope(code)
+		if err != nil {
+			break
+		}
+		err = c.copyDocumentCore(dstScope, srcScope)
+	case bsontype.Int32:
+		var i32 int32
+		i32, err = src.ReadInt32()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt32(i32)
+	case bsontype.Timestamp:
+		var t, i uint32
+		t, i, err = src.ReadTimestamp()
+		if err != nil {
+			break
+		}
+		err = dst.WriteTimestamp(t, i)
+	case bsontype.Int64:
+		var i64 int64
+		i64, err = src.ReadInt64()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt64(i64)
+	case bsontype.Decimal128:
+		var d128 primitive.Decimal128
+		d128, err = src.ReadDecimal128()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDecimal128(d128)
+	case bsontype.MinKey:
+		err = src.ReadMinKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMinKey()
+	case bsontype.MaxKey:
+		err = src.ReadMaxKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMaxKey()
+	default:
+		err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type())
+	}
+
+	return err
+}
+
+func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
+	ar, err := src.ReadArray()
+	if err != nil {
+		return err
+	}
+
+	aw, err := dst.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == ErrEOA {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := dw.WriteDocumentElement(key)
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go
new file mode 100644
index 0000000..21f24b0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go
@@ -0,0 +1,9 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonrw contains abstractions for reading and writing
+// BSON and BSON like types from sources.
+package bsonrw
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go
new file mode 100644
index 0000000..689982c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go
@@ -0,0 +1,687 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const maxNestingDepth = 200
+
+// ErrInvalidJSON indicates the JSON input is invalid
+var ErrInvalidJSON = errors.New("invalid JSON input")
+
+type jsonParseState byte
+
+const (
+	jpsStartState jsonParseState = iota
+	jpsSawBeginObject
+	jpsSawEndObject
+	jpsSawBeginArray
+	jpsSawEndArray
+	jpsSawColon
+	jpsSawComma
+	jpsSawKey
+	jpsSawValue
+	jpsDoneState
+	jpsInvalidState
+)
+
+type jsonParseMode byte
+
+const (
+	jpmInvalidMode jsonParseMode = iota
+	jpmObjectMode
+	jpmArrayMode
+)
+
+type extJSONValue struct {
+	t bsontype.Type
+	v interface{}
+}
+
+type extJSONObject struct {
+	keys   []string
+	values []*extJSONValue
+}
+
+type extJSONParser struct {
+	js *jsonScanner
+	s  jsonParseState
+	m  []jsonParseMode
+	k  string
+	v  *extJSONValue
+
+	err       error
+	canonical bool
+	depth     int
+	maxDepth  int
+
+	emptyObject bool
+}
+
+// newExtJSONParser returns a new extended JSON parser, ready to to begin
+// parsing from the first character of the argued json input. It will not
+// perform any read-ahead and will therefore not report any errors about
+// malformed JSON at this point.
+func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser {
+	return &extJSONParser{
+		js:        &jsonScanner{r: r},
+		s:         jpsStartState,
+		m:         []jsonParseMode{},
+		canonical: canonical,
+		maxDepth:  maxNestingDepth,
+	}
+}
+
+// peekType examines the next value and returns its BSON Type
+func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
+	var t bsontype.Type
+	var err error
+
+	ejp.advanceState()
+	switch ejp.s {
+	case jpsSawValue:
+		t = ejp.v.t
+	case jpsSawBeginArray:
+		t = bsontype.Array
+	case jpsInvalidState:
+		err = ejp.err
+	case jpsSawComma:
+		// in array mode, seeing a comma means we need to progress again to actually observe a type
+		if ejp.peekMode() == jpmArrayMode {
+			return ejp.peekType()
+		}
+	case jpsSawEndArray:
+		// this would only be a valid state if we were in array mode, so return end-of-array error
+		err = ErrEOA
+	case jpsSawBeginObject:
+		// peek key to determine type
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawEndObject: // empty embedded document
+			t = bsontype.EmbeddedDocument
+			ejp.emptyObject = true
+		case jpsInvalidState:
+			err = ejp.err
+		case jpsSawKey:
+			t = wrapperKeyBSONType(ejp.k)
+
+			if t == bsontype.JavaScript {
+				// just saw $code, need to check for $scope at same level
+				_, err := ejp.readValue(bsontype.JavaScript)
+
+				if err != nil {
+					break
+				}
+
+				switch ejp.s {
+				case jpsSawEndObject: // type is TypeJavaScript
+				case jpsSawComma:
+					ejp.advanceState()
+					if ejp.s == jpsSawKey && ejp.k == "$scope" {
+						t = bsontype.CodeWithScope
+					} else {
+						err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k)
+					}
+				case jpsInvalidState:
+					err = ejp.err
+				default:
+					err = ErrInvalidJSON
+				}
+			}
+		}
+	}
+
+	return t, err
+}
+
+// readKey parses the next key and its type and returns them
+func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) {
+	if ejp.emptyObject {
+		ejp.emptyObject = false
+		return "", 0, ErrEOD
+	}
+
+	// advance to key (or return with error)
+	switch ejp.s {
+	case jpsStartState:
+		ejp.advanceState()
+		if ejp.s == jpsSawBeginObject {
+			ejp.advanceState()
+		}
+	case jpsSawBeginObject:
+		ejp.advanceState()
+	case jpsSawValue, jpsSawEndObject, jpsSawEndArray:
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawBeginObject, jpsSawComma:
+			ejp.advanceState()
+		case jpsSawEndObject:
+			return "", 0, ErrEOD
+		case jpsDoneState:
+			return "", 0, io.EOF
+		case jpsInvalidState:
+			return "", 0, ejp.err
+		default:
+			return "", 0, ErrInvalidJSON
+		}
+	case jpsSawKey: // do nothing (key was peeked before)
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// read key
+	var key string
+
+	switch ejp.s {
+	case jpsSawKey:
+		key = ejp.k
+	case jpsSawEndObject:
+		return "", 0, ErrEOD
+	case jpsInvalidState:
+		return "", 0, ejp.err
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// check for colon
+	ejp.advanceState()
+	if err := ensureColon(ejp.s, key); err != nil {
+		return "", 0, err
+	}
+
+	// peek at the value to determine type
+	t, err := ejp.peekType()
+	if err != nil {
+		return "", 0, err
+	}
+
+	return key, t, nil
+}
+
+// readValue returns the value corresponding to the Type returned by peekType
+func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
+	if ejp.s == jpsInvalidState {
+		return nil, ejp.err
+	}
+
+	var v *extJSONValue
+
+	switch t {
+	case bsontype.Null, bsontype.Boolean, bsontype.String:
+		if ejp.s != jpsSawValue {
+			return nil, invalidRequestError(t.String())
+		}
+		v = ejp.v
+	case bsontype.Int32, bsontype.Int64, bsontype.Double:
+		// relaxed version allows these to be literal number values
+		if ejp.s == jpsSawValue {
+			v = ejp.v
+			break
+		}
+		fallthrough
+	case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+
+			v = ejp.v
+
+			// read end object
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("} after value", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer:
+		if ejp.s != jpsSawKey {
+			return nil, invalidRequestError(t.String())
+		}
+		// read colon
+		ejp.advanceState()
+		if err := ensureColon(ejp.s, ejp.k); err != nil {
+			return nil, err
+		}
+
+		// read KV pairs
+		keys, vals, err := ejp.readObject(2, false)
+		if err != nil {
+			return nil, err
+		}
+
+		ejp.advanceState()
+		if ejp.s != jpsSawEndObject {
+			return nil, invalidJSONErrorForType("2 key-value pairs and then }", t)
+		}
+
+		v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+	case bsontype.DateTime:
+		switch ejp.s {
+		case jpsSawValue:
+			v = ejp.v
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			ejp.advanceState()
+			switch ejp.s {
+			case jpsSawBeginObject:
+				keys, vals, err := ejp.readObject(1, true)
+				if err != nil {
+					return nil, err
+				}
+				v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+			case jpsSawValue:
+				if ejp.canonical {
+					return nil, invalidJSONError("{")
+				}
+				v = ejp.v
+			default:
+				if ejp.canonical {
+					return nil, invalidJSONErrorForType("object", t)
+				}
+				return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t)
+			}
+
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("value and then }", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.JavaScript:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+			v = ejp.v
+
+			// read end object or comma and just return
+			ejp.advanceState()
+		case jpsSawEndObject:
+			v = ejp.v
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.CodeWithScope:
+		if ejp.s == jpsSawKey && ejp.k == "$scope" {
+			v = ejp.v // this is the $code string from earlier
+
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read {
+			ejp.advanceState()
+			if ejp.s != jpsSawBeginObject {
+				return nil, invalidJSONError("$scope to be embedded document")
+			}
+		} else {
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.EmbeddedDocument, bsontype.Array:
+		return nil, invalidRequestError(t.String())
+	}
+
+	return v, nil
+}
+
+// readObject is a utility method for reading full objects of known (or expected) size
+// it is useful for extended JSON types such as binary, datetime, regex, and timestamp
+func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) {
+	keys := make([]string, numKeys)
+	vals := make([]*extJSONValue, numKeys)
+
+	if !started {
+		ejp.advanceState()
+		if ejp.s != jpsSawBeginObject {
+			return nil, nil, invalidJSONError("{")
+		}
+	}
+
+	for i := 0; i < numKeys; i++ {
+		key, t, err := ejp.readKey()
+		if err != nil {
+			return nil, nil, err
+		}
+
+		switch ejp.s {
+		case jpsSawKey:
+			v, err := ejp.readValue(t)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			keys[i] = key
+			vals[i] = v
+		case jpsSawValue:
+			keys[i] = key
+			vals[i] = ejp.v
+		default:
+			return nil, nil, invalidJSONError("value")
+		}
+	}
+
+	ejp.advanceState()
+	if ejp.s != jpsSawEndObject {
+		return nil, nil, invalidJSONError("}")
+	}
+
+	return keys, vals, nil
+}
+
+// advanceState reads the next JSON token from the scanner and transitions
+// from the current state based on that token's type
+func (ejp *extJSONParser) advanceState() {
+	if ejp.s == jpsDoneState || ejp.s == jpsInvalidState {
+		return
+	}
+
+	jt, err := ejp.js.nextToken()
+
+	if err != nil {
+		ejp.err = err
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	valid := ejp.validateToken(jt.t)
+	if !valid {
+		ejp.err = unexpectedTokenError(jt)
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	switch jt.t {
+	case jttBeginObject:
+		ejp.s = jpsSawBeginObject
+		ejp.pushMode(jpmObjectMode)
+		ejp.depth++
+
+		if ejp.depth > ejp.maxDepth {
+			ejp.err = nestingDepthError(jt.p, ejp.depth)
+			ejp.s = jpsInvalidState
+		}
+	case jttEndObject:
+		ejp.s = jpsSawEndObject
+		ejp.depth--
+
+		if ejp.popMode() != jpmObjectMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttBeginArray:
+		ejp.s = jpsSawBeginArray
+		ejp.pushMode(jpmArrayMode)
+	case jttEndArray:
+		ejp.s = jpsSawEndArray
+
+		if ejp.popMode() != jpmArrayMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttColon:
+		ejp.s = jpsSawColon
+	case jttComma:
+		ejp.s = jpsSawComma
+	case jttEOF:
+		ejp.s = jpsDoneState
+		if len(ejp.m) != 0 {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttString:
+		switch ejp.s {
+		case jpsSawComma:
+			if ejp.peekMode() == jpmArrayMode {
+				ejp.s = jpsSawValue
+				ejp.v = extendJSONToken(jt)
+				return
+			}
+			fallthrough
+		case jpsSawBeginObject:
+			ejp.s = jpsSawKey
+			ejp.k = jt.v.(string)
+			return
+		}
+		fallthrough
+	default:
+		ejp.s = jpsSawValue
+		ejp.v = extendJSONToken(jt)
+	}
+}
+
+var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{
+	jpsStartState: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+		jttEOF:         true,
+	},
+	jpsSawBeginObject: {
+		jttEndObject: true,
+		jttString:    true,
+	},
+	jpsSawEndObject: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawBeginArray: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttEndArray:    true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawEndArray: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawColon: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawComma: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawKey: {
+		jttColon: true,
+	},
+	jpsSawValue: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsDoneState:    {},
+	jpsInvalidState: {},
+}
+
+func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool {
+	switch ejp.s {
+	case jpsSawEndObject:
+		// if we are at depth zero and the next token is a '{',
+		// we can consider it valid only if we are not in array mode.
+		if jtt == jttBeginObject && ejp.depth == 0 {
+			return ejp.peekMode() != jpmArrayMode
+		}
+	case jpsSawComma:
+		switch ejp.peekMode() {
+		// the only valid next token after a comma inside a document is a string (a key)
+		case jpmObjectMode:
+			return jtt == jttString
+		case jpmInvalidMode:
+			return false
+		}
+	}
+
+	_, ok := jpsValidTransitionTokens[ejp.s][jtt]
+	return ok
+}
+
+// ensureExtValueType returns true if the current value has the expected
+// value type for single-key extended JSON types. For example,
+// {"$numberInt": v} v must be TypeString
+func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool {
+	switch t {
+	case bsontype.MinKey, bsontype.MaxKey:
+		return ejp.v.t == bsontype.Int32
+	case bsontype.Undefined:
+		return ejp.v.t == bsontype.Boolean
+	case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID:
+		return ejp.v.t == bsontype.String
+	default:
+		return false
+	}
+}
+
+func (ejp *extJSONParser) pushMode(m jsonParseMode) {
+	ejp.m = append(ejp.m, m)
+}
+
+func (ejp *extJSONParser) popMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	m := ejp.m[l-1]
+	ejp.m = ejp.m[:l-1]
+
+	return m
+}
+
+func (ejp *extJSONParser) peekMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	return ejp.m[l-1]
+}
+
+func extendJSONToken(jt *jsonToken) *extJSONValue {
+	var t bsontype.Type
+
+	switch jt.t {
+	case jttInt32:
+		t = bsontype.Int32
+	case jttInt64:
+		t = bsontype.Int64
+	case jttDouble:
+		t = bsontype.Double
+	case jttString:
+		t = bsontype.String
+	case jttBool:
+		t = bsontype.Boolean
+	case jttNull:
+		t = bsontype.Null
+	default:
+		return nil
+	}
+
+	return &extJSONValue{t: t, v: jt.v}
+}
+
+func ensureColon(s jsonParseState, key string) error {
+	if s != jpsSawColon {
+		return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key)
+	}
+
+	return nil
+}
+
+func invalidRequestError(s string) error {
+	return fmt.Errorf("invalid request to read %s", s)
+}
+
+func invalidJSONError(expected string) error {
+	return fmt.Errorf("invalid JSON input; expected %s", expected)
+}
+
+func invalidJSONErrorForType(expected string, t bsontype.Type) error {
+	return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t)
+}
+
+func unexpectedTokenError(jt *jsonToken) error {
+	switch jt.t {
+	case jttInt32, jttInt64, jttDouble:
+		return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p)
+	case jttString:
+		return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p)
+	case jttBool:
+		return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p)
+	case jttNull:
+		return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p)
+	case jttEOF:
+		return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p)
+	default:
+		return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p)
+	}
+}
+
+func nestingDepthError(p, depth int) error {
+	return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go
new file mode 100644
index 0000000..eebd56e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go
@@ -0,0 +1,659 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
+type ExtJSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
+func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
+	return &ExtJSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
+func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
+	vr := bvrp.pool.Get().(*extJSONValueReader)
+	return vr.reset(r, canonical)
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*extJSONValueReader)
+	if !ok {
+		return false
+	}
+
+	bvr, _ = bvr.reset(nil, false)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+type ejvrState struct {
+	mode  mode
+	vType bsontype.Type
+	depth int
+}
+
+// extJSONValueReader is for reading extended JSON.
+type extJSONValueReader struct {
+	p *extJSONParser
+
+	stack []ejvrState
+	frame int
+}
+
+// NewExtJSONValueReader creates a new ValueReader from a given io.Reader
+// It will interpret the JSON of r as canonical or relaxed according to the
+// given canonical flag
+func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) {
+	return newExtJSONValueReader(r, canonical)
+}
+
+func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	ejvr := new(extJSONValueReader)
+	return ejvr.reset(r, canonical)
+}
+
+func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	p := newExtJSONParser(r, canonical)
+	typ, err := p.peekType()
+
+	if err != nil {
+		return nil, ErrInvalidJSON
+	}
+
+	var m mode
+	switch typ {
+	case bsontype.EmbeddedDocument:
+		m = mTopLevel
+	case bsontype.Array:
+		m = mArray
+	default:
+		m = mValue
+	}
+
+	stack := make([]ejvrState, 1, 5)
+	stack[0] = ejvrState{
+		mode:  m,
+		vType: typ,
+	}
+	return &extJSONValueReader{
+		p:     p,
+		stack: stack,
+	}, nil
+}
+
+func (ejvr *extJSONValueReader) advanceFrame() {
+	if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack
+		length := len(ejvr.stack)
+		if length+1 >= cap(ejvr.stack) {
+			// double it
+			buf := make([]ejvrState, 2*cap(ejvr.stack)+1)
+			copy(buf, ejvr.stack)
+			ejvr.stack = buf
+		}
+		ejvr.stack = ejvr.stack[:length+1]
+	}
+	ejvr.frame++
+
+	// Clean the stack
+	ejvr.stack[ejvr.frame].mode = 0
+	ejvr.stack[ejvr.frame].vType = 0
+	ejvr.stack[ejvr.frame].depth = 0
+}
+
+func (ejvr *extJSONValueReader) pushDocument() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mDocument
+	ejvr.stack[ejvr.frame].depth = ejvr.p.depth
+}
+
+func (ejvr *extJSONValueReader) pushCodeWithScope() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mCodeWithScope
+}
+
+func (ejvr *extJSONValueReader) pushArray() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mArray
+}
+
+func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = m
+	ejvr.stack[ejvr.frame].vType = t
+}
+
+func (ejvr *extJSONValueReader) pop() {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		ejvr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (ejvr *extJSONValueReader) skipDocument() error {
+	// read entire document until ErrEOD (using readKey and readValue)
+	_, typ, err := ejvr.p.readKey()
+	for err == nil {
+		_, err = ejvr.p.readValue(typ)
+		if err != nil {
+			break
+		}
+
+		_, typ, err = ejvr.p.readKey()
+	}
+
+	return err
+}
+
+func (ejvr *extJSONValueReader) skipArray() error {
+	// read entire array until ErrEOA (using peekType)
+	_, err := ejvr.p.peekType()
+	for err == nil {
+		_, err = ejvr.p.peekType()
+	}
+
+	return err
+}
+
+func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvr.stack[ejvr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if ejvr.frame != 0 {
+		te.parent = ejvr.stack[ejvr.frame-1].mode
+	}
+	return te
+}
+
+func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t)
+}
+
+func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != t {
+			return ejvr.typeError(t)
+		}
+	default:
+		modes := []mode{mElement, mValue}
+		if addModes != nil {
+			modes = append(modes, addModes...)
+		}
+		return ejvr.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) Type() bsontype.Type {
+	return ejvr.stack[ejvr.frame].vType
+}
+
+func (ejvr *extJSONValueReader) Skip() error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+	default:
+		return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	defer ejvr.pop()
+
+	t := ejvr.stack[ejvr.frame].vType
+	switch t {
+	case bsontype.Array:
+		// read entire array until ErrEOA
+		err := ejvr.skipArray()
+		if err != ErrEOA {
+			return err
+		}
+	case bsontype.EmbeddedDocument:
+		// read entire doc until ErrEOD
+		err := ejvr.skipDocument()
+		if err != ErrEOD {
+			return err
+		}
+	case bsontype.CodeWithScope:
+		// read the code portion and set up parser in document mode
+		_, err := ejvr.p.readValue(t)
+		if err != nil {
+			return err
+		}
+
+		// read until ErrEOD
+		err = ejvr.skipDocument()
+		if err != ErrEOD {
+			return err
+		}
+	default:
+		_, err := ejvr.p.readValue(t)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel: // allow reading array from top level
+	case mArray:
+		return ejvr, nil
+	default:
+		if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil {
+			return nil, err
+		}
+	}
+
+	ejvr.pushArray()
+
+	return ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Binary)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	b, btype, err = v.parseBinary()
+
+	ejvr.pop()
+	return b, btype, err
+}
+
+func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) {
+	if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Boolean)
+	if err != nil {
+		return false, err
+	}
+
+	if v.t != bsontype.Boolean {
+		return false, fmt.Errorf("expected type bool, but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(bool), nil
+}
+
+func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel:
+		return ejvr, nil
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, ejvr.typeError(bsontype.EmbeddedDocument)
+		}
+
+		ejvr.pushDocument()
+		return ejvr, nil
+	default:
+		return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+}
+
+func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.CodeWithScope)
+	if err != nil {
+		return "", nil, err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pushCodeWithScope()
+	return code, ejvr, err
+}
+
+func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DBPointer)
+	if err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	ns, oid, err = v.parseDBPointer()
+
+	ejvr.pop()
+	return ns, oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DateTime)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDateTime()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Decimal128)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	d, err := v.parseDecimal128()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDouble() (float64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Double)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDouble()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt32() (int32, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int32)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt32()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt64() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int64)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt64()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.JavaScript)
+	if err != nil {
+		return "", err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pop()
+	return code, err
+}
+
+func (ejvr *extJSONValueReader) ReadMaxKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MaxKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("max")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadMinKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MinKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("min")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadNull() error {
+	if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Null)
+	if err != nil {
+		return err
+	}
+
+	if v.t != bsontype.Null {
+		return fmt.Errorf("expected type null but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.ObjectID)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oid, err := v.parseObjectID()
+
+	ejvr.pop()
+	return oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Regex)
+	if err != nil {
+		return "", "", err
+	}
+
+	pattern, options, err = v.parseRegex()
+
+	ejvr.pop()
+	return pattern, options, err
+}
+
+func (ejvr *extJSONValueReader) ReadString() (string, error) {
+	if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.String)
+	if err != nil {
+		return "", err
+	}
+
+	if v.t != bsontype.String {
+		return "", fmt.Errorf("expected type string but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(string), nil
+}
+
+func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Symbol)
+	if err != nil {
+		return "", err
+	}
+
+	symbol, err = v.parseSymbol()
+
+	ejvr.pop()
+	return symbol, err
+}
+
+func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Timestamp)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, i, err = v.parseTimestamp()
+
+	ejvr.pop()
+	return t, i, err
+}
+
+func (ejvr *extJSONValueReader) ReadUndefined() error {
+	if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Undefined)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseUndefined()
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	name, t, err := ejvr.p.readKey()
+
+	if err != nil {
+		if err == ErrEOD {
+			if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
+				_, err := ejvr.p.peekType()
+				if err != nil {
+					return "", nil, err
+				}
+			}
+
+			ejvr.pop()
+		}
+
+		return "", nil, err
+	}
+
+	ejvr.push(mElement, t)
+	return name, ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mArray:
+	default:
+		return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := ejvr.p.peekType()
+	if err != nil {
+		if err == ErrEOA {
+			ejvr.pop()
+		}
+
+		return nil, err
+	}
+
+	ejvr.push(mValue, t)
+	return ejvr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go
new file mode 100644
index 0000000..ba39c96
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsonrw
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go
new file mode 100644
index 0000000..196662c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go
@@ -0,0 +1,481 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+func wrapperKeyBSONType(key string) bsontype.Type {
+	switch string(key) {
+	case "$numberInt":
+		return bsontype.Int32
+	case "$numberLong":
+		return bsontype.Int64
+	case "$oid":
+		return bsontype.ObjectID
+	case "$symbol":
+		return bsontype.Symbol
+	case "$numberDouble":
+		return bsontype.Double
+	case "$numberDecimal":
+		return bsontype.Decimal128
+	case "$binary":
+		return bsontype.Binary
+	case "$code":
+		return bsontype.JavaScript
+	case "$scope":
+		return bsontype.CodeWithScope
+	case "$timestamp":
+		return bsontype.Timestamp
+	case "$regularExpression":
+		return bsontype.Regex
+	case "$dbPointer":
+		return bsontype.DBPointer
+	case "$date":
+		return bsontype.DateTime
+	case "$ref":
+		fallthrough
+	case "$id":
+		fallthrough
+	case "$db":
+		return bsontype.EmbeddedDocument // dbrefs aren't bson types
+	case "$minKey":
+		return bsontype.MinKey
+	case "$maxKey":
+		return bsontype.MaxKey
+	case "$undefined":
+		return bsontype.Undefined
+	}
+
+	return bsontype.EmbeddedDocument
+}
+
+func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return nil, 0, fmt.Errorf("$binary value should be object, but instead is %s", ejv.t)
+	}
+
+	binObj := ejv.v.(*extJSONObject)
+	bFound := false
+	stFound := false
+
+	for i, key := range binObj.keys {
+		val := binObj.values[i]
+
+		switch key {
+		case "base64":
+			if bFound {
+				return nil, 0, errors.New("duplicate base64 key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary base64 value should be string, but instead is %s", val.t)
+			}
+
+			base64Bytes, err := base64.StdEncoding.DecodeString(val.v.(string))
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary base64 string: %s", val.v.(string))
+			}
+
+			b = base64Bytes
+			bFound = true
+		case "subType":
+			if stFound {
+				return nil, 0, errors.New("duplicate subType key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t)
+			}
+
+			i, err := strconv.ParseInt(val.v.(string), 16, 64)
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string))
+			}
+
+			subType = byte(i)
+			stFound = true
+		default:
+			return nil, 0, fmt.Errorf("invalid key in $binary object: %s", key)
+		}
+	}
+
+	if !bFound {
+		return nil, 0, errors.New("missing base64 field in $binary object")
+	}
+
+	if !stFound {
+		return nil, 0, errors.New("missing subType field in $binary object")
+
+	}
+
+	return b, subType, nil
+}
+
+func (ejv *extJSONValue) parseDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", primitive.NilObjectID, fmt.Errorf("$dbPointer value should be object, but instead is %s", ejv.t)
+	}
+
+	dbpObj := ejv.v.(*extJSONObject)
+	oidFound := false
+	nsFound := false
+
+	for i, key := range dbpObj.keys {
+		val := dbpObj.values[i]
+
+		switch key {
+		case "$ref":
+			if nsFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $ref key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $ref value should be string, but instead is %s", val.t)
+			}
+
+			ns = val.v.(string)
+			nsFound = true
+		case "$id":
+			if oidFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $id key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $id value should be string, but instead is %s", val.t)
+			}
+
+			oid, err = primitive.ObjectIDFromHex(val.v.(string))
+			if err != nil {
+				return "", primitive.NilObjectID, err
+			}
+
+			oidFound = true
+		default:
+			return "", primitive.NilObjectID, fmt.Errorf("invalid key in $dbPointer object: %s", key)
+		}
+	}
+
+	if !nsFound {
+		return "", oid, errors.New("missing $ref field in $dbPointer object")
+	}
+
+	if !oidFound {
+		return "", oid, errors.New("missing $id field in $dbPointer object")
+	}
+
+	return ns, oid, nil
+}
+
+const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
+
+func (ejv *extJSONValue) parseDateTime() (int64, error) {
+	switch ejv.t {
+	case bsontype.Int32:
+		return int64(ejv.v.(int32)), nil
+	case bsontype.Int64:
+		return ejv.v.(int64), nil
+	case bsontype.String:
+		return parseDatetimeString(ejv.v.(string))
+	case bsontype.EmbeddedDocument:
+		return parseDatetimeObject(ejv.v.(*extJSONObject))
+	default:
+		return 0, fmt.Errorf("$date value should be string or object, but instead is %s", ejv.t)
+	}
+}
+
+func parseDatetimeString(data string) (int64, error) {
+	t, err := time.Parse(rfc3339Milli, data)
+	if err != nil {
+		return 0, fmt.Errorf("invalid $date value string: %s", data)
+	}
+
+	return t.UnixNano() / 1e6, nil
+}
+
+func parseDatetimeObject(data *extJSONObject) (d int64, err error) {
+	dFound := false
+
+	for i, key := range data.keys {
+		val := data.values[i]
+
+		switch key {
+		case "$numberLong":
+			if dFound {
+				return 0, errors.New("duplicate $numberLong key in $date")
+			}
+
+			if val.t != bsontype.String {
+				return 0, fmt.Errorf("$date $numberLong field should be string, but instead is %s", val.t)
+			}
+
+			d, err = val.parseInt64()
+			if err != nil {
+				return 0, err
+			}
+			dFound = true
+		default:
+			return 0, fmt.Errorf("invalid key in $date object: %s", key)
+		}
+	}
+
+	if !dFound {
+		return 0, errors.New("missing $numberLong field in $date object")
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDecimal128() (primitive.Decimal128, error) {
+	if ejv.t != bsontype.String {
+		return primitive.Decimal128{}, fmt.Errorf("$numberDecimal value should be string, but instead is %s", ejv.t)
+	}
+
+	d, err := primitive.ParseDecimal128(ejv.v.(string))
+	if err != nil {
+		return primitive.Decimal128{}, fmt.Errorf("$invalid $numberDecimal string: %s", ejv.v.(string))
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDouble() (float64, error) {
+	if ejv.t == bsontype.Double {
+		return ejv.v.(float64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t)
+	}
+
+	switch string(ejv.v.(string)) {
+	case "Infinity":
+		return math.Inf(1), nil
+	case "-Infinity":
+		return math.Inf(-1), nil
+	case "NaN":
+		return math.NaN(), nil
+	}
+
+	f, err := strconv.ParseFloat(ejv.v.(string), 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return f, nil
+}
+
+func (ejv *extJSONValue) parseInt32() (int32, error) {
+	if ejv.t == bsontype.Int32 {
+		return ejv.v.(int32), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberInt value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	if i < math.MinInt32 || i > math.MaxInt32 {
+		return 0, fmt.Errorf("$numberInt value should be int32 but instead is int64: %d", i)
+	}
+
+	return int32(i), nil
+}
+
+func (ejv *extJSONValue) parseInt64() (int64, error) {
+	if ejv.t == bsontype.Int64 {
+		return ejv.v.(int64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberLong value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return i, nil
+}
+
+func (ejv *extJSONValue) parseJavascript() (code string, err error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$code value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseMinMaxKey(minmax string) error {
+	if ejv.t != bsontype.Int32 {
+		return fmt.Errorf("$%sKey value should be int32, but instead is %s", minmax, ejv.t)
+	}
+
+	if ejv.v.(int32) != 1 {
+		return fmt.Errorf("$%sKey value must be 1, but instead is %d", minmax, ejv.v.(int32))
+	}
+
+	return nil
+}
+
+func (ejv *extJSONValue) parseObjectID() (primitive.ObjectID, error) {
+	if ejv.t != bsontype.String {
+		return primitive.NilObjectID, fmt.Errorf("$oid value should be string, but instead is %s", ejv.t)
+	}
+
+	return primitive.ObjectIDFromHex(ejv.v.(string))
+}
+
+func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", "", fmt.Errorf("$regularExpression value should be object, but instead is %s", ejv.t)
+	}
+
+	regexObj := ejv.v.(*extJSONObject)
+	patFound := false
+	optFound := false
+
+	for i, key := range regexObj.keys {
+		val := regexObj.values[i]
+
+		switch string(key) {
+		case "pattern":
+			if patFound {
+				return "", "", errors.New("duplicate pattern key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression pattern value should be string, but instead is %s", val.t)
+			}
+
+			pattern = val.v.(string)
+			patFound = true
+		case "options":
+			if optFound {
+				return "", "", errors.New("duplicate options key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression options value should be string, but instead is %s", val.t)
+			}
+
+			options = val.v.(string)
+			optFound = true
+		default:
+			return "", "", fmt.Errorf("invalid key in $regularExpression object: %s", key)
+		}
+	}
+
+	if !patFound {
+		return "", "", errors.New("missing pattern field in $regularExpression object")
+	}
+
+	if !optFound {
+		return "", "", errors.New("missing options field in $regularExpression object")
+
+	}
+
+	return pattern, options, nil
+}
+
+func (ejv *extJSONValue) parseSymbol() (string, error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$symbol value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseTimestamp() (t, i uint32, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return 0, 0, fmt.Errorf("$timestamp value should be object, but instead is %s", ejv.t)
+	}
+
+	handleKey := func(key string, val *extJSONValue, flag bool) (uint32, error) {
+		if flag {
+			return 0, fmt.Errorf("duplicate %s key in $timestamp", key)
+		}
+
+		switch val.t {
+		case bsontype.Int32:
+			if val.v.(int32) < 0 {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
+			}
+
+			return uint32(val.v.(int32)), nil
+		case bsontype.Int64:
+			if val.v.(int64) < 0 || uint32(val.v.(int64)) > math.MaxUint32 {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
+			}
+
+			return uint32(val.v.(int64)), nil
+		default:
+			return 0, fmt.Errorf("$timestamp %s value should be uint32, but instead is %s", key, val.t)
+		}
+	}
+
+	tsObj := ejv.v.(*extJSONObject)
+	tFound := false
+	iFound := false
+
+	for j, key := range tsObj.keys {
+		val := tsObj.values[j]
+
+		switch key {
+		case "t":
+			if t, err = handleKey(key, val, tFound); err != nil {
+				return 0, 0, err
+			}
+
+			tFound = true
+		case "i":
+			if i, err = handleKey(key, val, iFound); err != nil {
+				return 0, 0, err
+			}
+
+			iFound = true
+		default:
+			return 0, 0, fmt.Errorf("invalid key in $timestamp object: %s", key)
+		}
+	}
+
+	if !tFound {
+		return 0, 0, errors.New("missing t field in $timestamp object")
+	}
+
+	if !iFound {
+		return 0, 0, errors.New("missing i field in $timestamp object")
+	}
+
+	return t, i, nil
+}
+
+func (ejv *extJSONValue) parseUndefined() error {
+	if ejv.t != bsontype.Boolean {
+		return fmt.Errorf("undefined value should be boolean, but instead is %s", ejv.t)
+	}
+
+	if !ejv.v.(bool) {
+		return fmt.Errorf("$undefined balue boolean should be true, but instead is %v", ejv.v.(bool))
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go
new file mode 100644
index 0000000..04fbb56
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go
@@ -0,0 +1,734 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"io"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+var ejvwPool = sync.Pool{
+	New: func() interface{} {
+		return new(extJSONValueWriter)
+	},
+}
+
+// ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters.
+type ExtJSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON.
+func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
+	return &ExtJSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination.
+func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter {
+	vw := bvwp.pool.Get().(*extJSONValueWriter)
+	if writer, ok := w.(*SliceWriter); ok {
+		vw.reset(*writer, canonical, escapeHTML)
+		vw.w = writer
+		return vw
+	}
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing
+// happens and ok will be false.
+func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*extJSONValueWriter)
+	if !ok {
+		return false
+	}
+
+	if _, ok := bvw.w.(*SliceWriter); ok {
+		bvw.buf = nil
+	}
+	bvw.w = nil
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+type ejvwState struct {
+	mode mode
+}
+
+type extJSONValueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack      []ejvwState
+	frame      int64
+	canonical  bool
+	escapeHTML bool
+}
+
+// NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w.
+func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+
+	return newExtJSONWriter(w, canonical, escapeHTML), nil
+}
+
+func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		w:          w,
+		buf:        []byte{},
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+	}
+}
+
+func newExtJSONWriterFromSlice(buf []byte, canonical, escapeHTML bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		buf:        buf,
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+	}
+}
+
+func (ejvw *extJSONValueWriter) reset(buf []byte, canonical, escapeHTML bool) {
+	if ejvw.stack == nil {
+		ejvw.stack = make([]ejvwState, 1, 5)
+	}
+
+	ejvw.stack = ejvw.stack[:1]
+	ejvw.stack[0] = ejvwState{mode: mTopLevel}
+	ejvw.canonical = canonical
+	ejvw.escapeHTML = escapeHTML
+	ejvw.frame = 0
+	ejvw.buf = buf
+	ejvw.w = nil
+}
+
+func (ejvw *extJSONValueWriter) advanceFrame() {
+	if ejvw.frame+1 >= int64(len(ejvw.stack)) { // We need to grow the stack
+		length := len(ejvw.stack)
+		if length+1 >= cap(ejvw.stack) {
+			// double it
+			buf := make([]ejvwState, 2*cap(ejvw.stack)+1)
+			copy(buf, ejvw.stack)
+			ejvw.stack = buf
+		}
+		ejvw.stack = ejvw.stack[:length+1]
+	}
+	ejvw.frame++
+}
+
+func (ejvw *extJSONValueWriter) push(m mode) {
+	ejvw.advanceFrame()
+
+	ejvw.stack[ejvw.frame].mode = m
+}
+
+func (ejvw *extJSONValueWriter) pop() {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+		ejvw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+func (ejvw *extJSONValueWriter) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvw.stack[ejvw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if ejvw.frame != 0 {
+		te.parent = ejvw.stack[ejvw.frame-1].mode
+	}
+	return te
+}
+
+func (ejvw *extJSONValueWriter) ensureElementValue(destination mode, callerName string, addmodes ...mode) error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return ejvw.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) writeExtendedSingleValue(key string, value string, quotes bool) {
+	var s string
+	if quotes {
+		s = fmt.Sprintf(`{"$%s":"%s"}`, key, value)
+	} else {
+		s = fmt.Sprintf(`{"$%s":%s}`, key, value)
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(s)...)
+}
+
+func (ejvw *extJSONValueWriter) WriteArray() (ArrayWriter, error) {
+	if err := ejvw.ensureElementValue(mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '[')
+
+	ejvw.push(mArray)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBinary(b []byte) error {
+	return ejvw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (ejvw *extJSONValueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$binary":{"base64":"`)
+	buf.WriteString(base64.StdEncoding.EncodeToString(b))
+	buf.WriteString(fmt.Sprintf(`","subType":"%02x"}},`, btype))
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBoolean(b bool) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(strconv.FormatBool(b))...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := ejvw.ensureElementValue(mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$code":`)
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"$scope":{`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.push(mCodeWithScope)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$dbPointer":{"$ref":"`)
+	buf.WriteString(ns)
+	buf.WriteString(`","$id":{"$oid":"`)
+	buf.WriteString(oid.Hex())
+	buf.WriteString(`"}}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDateTime(dt int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	t := time.Unix(dt/1e3, dt%1e3*1e6).UTC()
+
+	if ejvw.canonical || t.Year() < 1970 || t.Year() > 9999 {
+		s := fmt.Sprintf(`{"$numberLong":"%d"}`, dt)
+		ejvw.writeExtendedSingleValue("date", s, false)
+	} else {
+		ejvw.writeExtendedSingleValue("date", t.Format(rfc3339Milli), true)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDecimal128(d primitive.Decimal128) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("numberDecimal", d.String(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocument() (DocumentWriter, error) {
+	if ejvw.stack[ejvw.frame].mode == mTopLevel {
+		ejvw.buf = append(ejvw.buf, '{')
+		return ejvw, nil
+	}
+
+	if err := ejvw.ensureElementValue(mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '{')
+	ejvw.push(mDocument)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDouble(f float64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	s := formatDouble(f)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberDouble", s, true)
+	} else {
+		switch s {
+		case "Infinity":
+			fallthrough
+		case "-Infinity":
+			fallthrough
+		case "NaN":
+			s = fmt.Sprintf(`{"$numberDouble":"%s"}`, s)
+		}
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt32(i int32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(int64(i), 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberInt", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt64(i int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(i, 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberLong", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteJavascript(code string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("code", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMaxKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("maxKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMinKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("minKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteNull() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte("null")...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("oid", oid.Hex(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteRegex(pattern string, options string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$regularExpression":{"pattern":`)
+	writeStringWithEscapes(pattern, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"options":"`)
+	buf.WriteString(sortStringAlphebeticAscending(options))
+	buf.WriteString(`"}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteString(s string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(s, &buf, ejvw.escapeHTML)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteSymbol(symbol string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(symbol, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("symbol", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$timestamp":{"t":`)
+	buf.WriteString(strconv.FormatUint(uint64(t), 10))
+	buf.WriteString(`,"i":`)
+	buf.WriteString(strconv.FormatUint(uint64(i), 10))
+	buf.WriteString(`}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteUndefined() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("undefined", "true", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+		ejvw.buf = append(ejvw.buf, []byte(fmt.Sprintf(`"%s":`, key))...)
+		ejvw.push(mElement)
+	default:
+		return nil, ejvw.invalidTransitionErr(mElement, "WriteDocumentElement", []mode{mDocument, mTopLevel, mCodeWithScope})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	// close the document
+	if ejvw.buf[len(ejvw.buf)-1] == ',' {
+		ejvw.buf[len(ejvw.buf)-1] = '}'
+	} else {
+		ejvw.buf = append(ejvw.buf, '}')
+	}
+
+	switch ejvw.stack[ejvw.frame].mode {
+	case mCodeWithScope:
+		ejvw.buf = append(ejvw.buf, '}')
+		fallthrough
+	case mDocument:
+		ejvw.buf = append(ejvw.buf, ',')
+	case mTopLevel:
+		if ejvw.w != nil {
+			if _, err := ejvw.w.Write(ejvw.buf); err != nil {
+				return err
+			}
+			ejvw.buf = ejvw.buf[:0]
+		}
+	}
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayElement() (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		ejvw.push(mValue)
+	default:
+		return nil, ejvw.invalidTransitionErr(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		// close the array
+		if ejvw.buf[len(ejvw.buf)-1] == ',' {
+			ejvw.buf[len(ejvw.buf)-1] = ']'
+		} else {
+			ejvw.buf = append(ejvw.buf, ']')
+		}
+
+		ejvw.buf = append(ejvw.buf, ',')
+
+		ejvw.pop()
+	default:
+		return fmt.Errorf("incorrect mode to end array: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	return nil
+}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, 'E') && !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+var hexChars = "0123456789abcdef"
+
+func writeStringWithEscapes(s string, buf *bytes.Buffer, escapeHTML bool) {
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	oldI := ss[i]
+	ss[i] = ss[j]
+	ss[j] = oldI
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go
new file mode 100644
index 0000000..03aabf5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go
@@ -0,0 +1,439 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+type jsonTokenType byte
+
+const (
+	jttBeginObject jsonTokenType = iota
+	jttEndObject
+	jttBeginArray
+	jttEndArray
+	jttColon
+	jttComma
+	jttInt32
+	jttInt64
+	jttDouble
+	jttString
+	jttBool
+	jttNull
+	jttEOF
+)
+
+type jsonToken struct {
+	t jsonTokenType
+	v interface{}
+	p int
+}
+
+type jsonScanner struct {
+	r           io.Reader
+	buf         []byte
+	pos         int
+	lastReadErr error
+}
+
+// nextToken returns the next JSON token if one exists. A token is a character
+// of the JSON grammar, a number, a string, or a literal.
+func (js *jsonScanner) nextToken() (*jsonToken, error) {
+	c, err := js.readNextByte()
+
+	// keep reading until a non-space is encountered (break on read error or EOF)
+	for isWhiteSpace(c) && err == nil {
+		c, err = js.readNextByte()
+	}
+
+	if err == io.EOF {
+		return &jsonToken{t: jttEOF}, nil
+	} else if err != nil {
+		return nil, err
+	}
+
+	// switch on the character
+	switch c {
+	case '{':
+		return &jsonToken{t: jttBeginObject, v: byte('{'), p: js.pos - 1}, nil
+	case '}':
+		return &jsonToken{t: jttEndObject, v: byte('}'), p: js.pos - 1}, nil
+	case '[':
+		return &jsonToken{t: jttBeginArray, v: byte('['), p: js.pos - 1}, nil
+	case ']':
+		return &jsonToken{t: jttEndArray, v: byte(']'), p: js.pos - 1}, nil
+	case ':':
+		return &jsonToken{t: jttColon, v: byte(':'), p: js.pos - 1}, nil
+	case ',':
+		return &jsonToken{t: jttComma, v: byte(','), p: js.pos - 1}, nil
+	case '"': // RFC-8259 only allows for double quotes (") not single (')
+		return js.scanString()
+	default:
+		// check if it's a number
+		if c == '-' || isDigit(c) {
+			return js.scanNumber(c)
+		} else if c == 't' || c == 'f' || c == 'n' {
+			// maybe a literal
+			return js.scanLiteral(c)
+		} else {
+			return nil, fmt.Errorf("invalid JSON input. Position: %d. Character: %c", js.pos-1, c)
+		}
+	}
+}
+
+// readNextByte attempts to read the next byte from the buffer. If the buffer
+// has been exhausted, this function calls readIntoBuf, thus refilling the
+// buffer and resetting the read position to 0
+func (js *jsonScanner) readNextByte() (byte, error) {
+	if js.pos >= len(js.buf) {
+		err := js.readIntoBuf()
+
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	b := js.buf[js.pos]
+	js.pos++
+
+	return b, nil
+}
+
+// readNNextBytes reads n bytes into dst, starting at offset
+func (js *jsonScanner) readNNextBytes(dst []byte, n, offset int) error {
+	var err error
+
+	for i := 0; i < n; i++ {
+		dst[i+offset], err = js.readNextByte()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// readIntoBuf reads up to 512 bytes from the scanner's io.Reader into the buffer
+func (js *jsonScanner) readIntoBuf() error {
+	if js.lastReadErr != nil {
+		js.buf = js.buf[:0]
+		js.pos = 0
+		return js.lastReadErr
+	}
+
+	if cap(js.buf) == 0 {
+		js.buf = make([]byte, 0, 512)
+	}
+
+	n, err := js.r.Read(js.buf[:cap(js.buf)])
+	if err != nil {
+		js.lastReadErr = err
+		if n > 0 {
+			err = nil
+		}
+	}
+	js.buf = js.buf[:n]
+	js.pos = 0
+
+	return err
+}
+
+func isWhiteSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func isDigit(c byte) bool {
+	return unicode.IsDigit(rune(c))
+}
+
+func isValueTerminator(c byte) bool {
+	return c == ',' || c == '}' || c == ']' || isWhiteSpace(c)
+}
+
+// scanString reads from an opening '"' to a closing '"' and handles escaped characters
+func (js *jsonScanner) scanString() (*jsonToken, error) {
+	var b bytes.Buffer
+	var c byte
+	var err error
+
+	p := js.pos - 1
+
+	for {
+		c, err = js.readNextByte()
+		if err != nil {
+			if err == io.EOF {
+				return nil, errors.New("end of input in JSON string")
+			}
+			return nil, err
+		}
+
+		switch c {
+		case '\\':
+			c, err = js.readNextByte()
+			switch c {
+			case '"', '\\', '/', '\'':
+				b.WriteByte(c)
+			case 'b':
+				b.WriteByte('\b')
+			case 'f':
+				b.WriteByte('\f')
+			case 'n':
+				b.WriteByte('\n')
+			case 'r':
+				b.WriteByte('\r')
+			case 't':
+				b.WriteByte('\t')
+			case 'u':
+				us := make([]byte, 4)
+				err = js.readNNextBytes(us, 4, 0)
+				if err != nil {
+					return nil, fmt.Errorf("invalid unicode sequence in JSON string: %s", us)
+				}
+
+				s := fmt.Sprintf(`\u%s`, us)
+				s, err = strconv.Unquote(strings.Replace(strconv.Quote(s), `\\u`, `\u`, 1))
+				if err != nil {
+					return nil, err
+				}
+
+				b.WriteString(s)
+			default:
+				return nil, fmt.Errorf("invalid escape sequence in JSON string '\\%c'", c)
+			}
+		case '"':
+			return &jsonToken{t: jttString, v: b.String(), p: p}, nil
+		default:
+			b.WriteByte(c)
+		}
+	}
+}
+
+// scanLiteral reads an unquoted sequence of characters and determines if it is one of
+// three valid JSON literals (true, false, null); if so, it returns the appropriate
+// jsonToken; otherwise, it returns an error
+func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) {
+	p := js.pos - 1
+
+	lit := make([]byte, 4)
+	lit[0] = first
+
+	err := js.readNNextBytes(lit, 3, 1)
+	if err != nil {
+		return nil, err
+	}
+
+	c5, err := js.readNextByte()
+
+	if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) {
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttBool, v: true, p: p}, nil
+	} else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) {
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttNull, v: nil, p: p}, nil
+	} else if bytes.Equal([]byte("fals"), lit) {
+		if c5 == 'e' {
+			c5, err = js.readNextByte()
+
+			if isValueTerminator(c5) || err == io.EOF {
+				js.pos = int(math.Max(0, float64(js.pos-1)))
+				return &jsonToken{t: jttBool, v: false, p: p}, nil
+			}
+		}
+	}
+
+	return nil, fmt.Errorf("invalid JSON literal. Position: %d, literal: %s", p, lit)
+}
+
+type numberScanState byte
+
+const (
+	nssSawLeadingMinus numberScanState = iota
+	nssSawLeadingZero
+	nssSawIntegerDigits
+	nssSawDecimalPoint
+	nssSawFractionDigits
+	nssSawExponentLetter
+	nssSawExponentSign
+	nssSawExponentDigits
+	nssDone
+	nssInvalid
+)
+
+// scanNumber reads a JSON number (according to RFC-8259)
+func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
+	var b bytes.Buffer
+	var s numberScanState
+	var c byte
+	var err error
+
+	t := jttInt64 // assume it's an int64 until the type can be determined
+	start := js.pos - 1
+
+	b.WriteByte(first)
+
+	switch first {
+	case '-':
+		s = nssSawLeadingMinus
+	case '0':
+		s = nssSawLeadingZero
+	default:
+		s = nssSawIntegerDigits
+	}
+
+	for {
+		c, err = js.readNextByte()
+
+		if err != nil && err != io.EOF {
+			return nil, err
+		}
+
+		switch s {
+		case nssSawLeadingMinus:
+			switch c {
+			case '0':
+				s = nssSawLeadingZero
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawLeadingZero:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawIntegerDigits:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawDecimalPoint:
+			t = jttDouble
+			if isDigit(c) {
+				s = nssSawFractionDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawFractionDigits:
+			switch c {
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawFractionDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentLetter:
+			t = jttDouble
+			switch c {
+			case '+', '-':
+				s = nssSawExponentSign
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentSign:
+			if isDigit(c) {
+				s = nssSawExponentDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawExponentDigits:
+			switch c {
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		}
+
+		switch s {
+		case nssInvalid:
+			return nil, fmt.Errorf("invalid JSON number. Position: %d", start)
+		case nssDone:
+			js.pos = int(math.Max(0, float64(js.pos-1)))
+			if t != jttDouble {
+				v, err := strconv.ParseInt(b.String(), 10, 64)
+				if err == nil {
+					if v < math.MinInt32 || v > math.MaxInt32 {
+						return &jsonToken{t: jttInt64, v: v, p: start}, nil
+					}
+
+					return &jsonToken{t: jttInt32, v: int32(v), p: start}, nil
+				}
+			}
+
+			v, err := strconv.ParseFloat(b.String(), 64)
+			if err != nil {
+				return nil, err
+			}
+
+			return &jsonToken{t: jttDouble, v: v, p: start}, nil
+		}
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go
new file mode 100644
index 0000000..617b5e2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+)
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+func (m mode) TypeString() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "Document"
+	case mArray:
+		str = "Array"
+	case mValue:
+		str = "Value"
+	case mElement:
+		str = "Element"
+	case mCodeWithScope:
+		str = "CodeWithScope"
+	case mSpacer:
+		str = "CodeWithScopeSpacer"
+	default:
+		str = "Unknown"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+// If read is false, the error is for writing
+type TransitionError struct {
+	name        string
+	parent      mode
+	current     mode
+	destination mode
+	modes       []mode
+	action      string
+}
+
+func (te TransitionError) Error() string {
+	errString := fmt.Sprintf("%s can only %s", te.name, te.action)
+	if te.destination != mode(0) {
+		errString = fmt.Sprintf("%s a %s", errString, te.destination.TypeString())
+	}
+	errString = fmt.Sprintf("%s while positioned on a", errString)
+	for ind, m := range te.modes {
+		if ind != 0 && len(te.modes) > 2 {
+			errString = fmt.Sprintf("%s,", errString)
+		}
+		if ind == len(te.modes)-1 && len(te.modes) > 1 {
+			errString = fmt.Sprintf("%s or", errString)
+		}
+		errString = fmt.Sprintf("%s %s", errString, m.TypeString())
+	}
+	errString = fmt.Sprintf("%s but is positioned on a %s", errString, te.current.TypeString())
+	if te.parent != mode(0) {
+		errString = fmt.Sprintf("%s with parent %s", errString, te.parent.TypeString())
+	}
+	return errString
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go
new file mode 100644
index 0000000..ecae739
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ArrayReader is implemented by types that allow reading values from a BSON
+// array.
+type ArrayReader interface {
+	ReadValue() (ValueReader, error)
+}
+
+// DocumentReader is implemented by types that allow reading elements from a
+// BSON document.
+type DocumentReader interface {
+	ReadElement() (string, ValueReader, error)
+}
+
+// ValueReader is a generic interface used to read values from BSON. This type
+// is implemented by several types with different underlying representations of
+// BSON, such as a bson.Document, raw BSON bytes, or extended JSON.
+type ValueReader interface {
+	Type() bsontype.Type
+	Skip() error
+
+	ReadArray() (ArrayReader, error)
+	ReadBinary() (b []byte, btype byte, err error)
+	ReadBoolean() (bool, error)
+	ReadDocument() (DocumentReader, error)
+	ReadCodeWithScope() (code string, dr DocumentReader, err error)
+	ReadDBPointer() (ns string, oid primitive.ObjectID, err error)
+	ReadDateTime() (int64, error)
+	ReadDecimal128() (primitive.Decimal128, error)
+	ReadDouble() (float64, error)
+	ReadInt32() (int32, error)
+	ReadInt64() (int64, error)
+	ReadJavascript() (code string, err error)
+	ReadMaxKey() error
+	ReadMinKey() error
+	ReadNull() error
+	ReadObjectID() (primitive.ObjectID, error)
+	ReadRegex() (pattern, options string, err error)
+	ReadString() (string, error)
+	ReadSymbol() (symbol string, err error)
+	ReadTimestamp() (t, i uint32, err error)
+	ReadUndefined() error
+}
+
+// BytesReader is a generic interface used to read BSON bytes from a
+// ValueReader. This imterface is meant to be a superset of ValueReader, so that
+// types that implement ValueReader may also implement this interface.
+//
+// The bytes of the value will be appended to dst.
+type BytesReader interface {
+	ReadValueBytes(dst []byte) (bsontype.Type, []byte, error)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go
new file mode 100644
index 0000000..5fc0d8a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go
@@ -0,0 +1,882 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sync"
+	"unicode"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+var _ ValueReader = (*valueReader)(nil)
+
+var vrPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueReader)
+	},
+}
+
+// BSONValueReaderPool is a pool for ValueReaders that read BSON.
+type BSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueReaderPool instantiates a new BSONValueReaderPool.
+func NewBSONValueReaderPool() *BSONValueReaderPool {
+	return &BSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying BSON.
+func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
+	vr := bvrp.pool.Get().(*valueReader)
+	vr.reset(src)
+	return vr
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*valueReader)
+	if !ok {
+		return false
+	}
+
+	bvr.reset(nil)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+// ErrEOA is the error returned when the end of a BSON array has been reached.
+var ErrEOA = errors.New("end of array")
+
+// ErrEOD is the error returned when the end of a BSON document has been reached.
+var ErrEOD = errors.New("end of document")
+
+type vrState struct {
+	mode  mode
+	vType bsontype.Type
+	end   int64
+}
+
+// valueReader is for reading BSON values.
+type valueReader struct {
+	offset int64
+	d      []byte
+
+	stack []vrState
+	frame int64
+}
+
+// NewBSONDocumentReader returns a ValueReader using b for the underlying BSON
+// representation. Parameter b must be a BSON Document.
+//
+// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes
+// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes
+// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a
+// thing that can return the finished []byte since it might be reallocated when appended to.
+func NewBSONDocumentReader(b []byte) ValueReader {
+	return newValueReader(b)
+}
+
+// NewBSONValueReader returns a ValueReader that starts in the Value mode instead of in top
+// level document mode. This enables the creation of a ValueReader for a single BSON value.
+func NewBSONValueReader(t bsontype.Type, val []byte) ValueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode:  mValue,
+		vType: t,
+	}
+	return &valueReader{
+		d:     val,
+		stack: stack,
+	}
+}
+
+func newValueReader(b []byte) *valueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode: mTopLevel,
+	}
+	return &valueReader{
+		d:     b,
+		stack: stack,
+	}
+}
+
+func (vr *valueReader) reset(b []byte) {
+	if vr.stack == nil {
+		vr.stack = make([]vrState, 1, 5)
+	}
+	vr.stack = vr.stack[:1]
+	vr.stack[0] = vrState{mode: mTopLevel}
+	vr.d = b
+	vr.offset = 0
+	vr.frame = 0
+}
+
+func (vr *valueReader) advanceFrame() {
+	if vr.frame+1 >= int64(len(vr.stack)) { // We need to grow the stack
+		length := len(vr.stack)
+		if length+1 >= cap(vr.stack) {
+			// double it
+			buf := make([]vrState, 2*cap(vr.stack)+1)
+			copy(buf, vr.stack)
+			vr.stack = buf
+		}
+		vr.stack = vr.stack[:length+1]
+	}
+	vr.frame++
+
+	// Clean the stack
+	vr.stack[vr.frame].mode = 0
+	vr.stack[vr.frame].vType = 0
+	vr.stack[vr.frame].end = 0
+}
+
+func (vr *valueReader) pushDocument() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mDocument
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushArray() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mArray
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushElement(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mElement
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushValue(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mValue
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushCodeWithScope() (int64, error) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mCodeWithScope
+
+	size, err := vr.readLength()
+	if err != nil {
+		return 0, err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return int64(size), nil
+}
+
+func (vr *valueReader) pop() {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		vr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (vr *valueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vr.stack[vr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if vr.frame != 0 {
+		te.parent = vr.stack[vr.frame-1].mode
+	}
+	return te
+}
+
+func (vr *valueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", vr.stack[vr.frame].vType, t)
+}
+
+func (vr *valueReader) invalidDocumentLengthError() error {
+	return fmt.Errorf("document is invalid, end byte is at %d, but null byte found at %d", vr.stack[vr.frame].end, vr.offset)
+}
+
+func (vr *valueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string) error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != t {
+			return vr.typeError(t)
+		}
+	default:
+		return vr.invalidTransitionErr(destination, callerName, []mode{mElement, mValue})
+	}
+
+	return nil
+}
+
+func (vr *valueReader) Type() bsontype.Type {
+	return vr.stack[vr.frame].vType
+}
+
+func (vr *valueReader) nextElementLength() (int32, error) {
+	var length int32
+	var err error
+	switch vr.stack[vr.frame].vType {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, err = vr.peekLength()
+	case bsontype.Binary:
+		length, err = vr.peekLength()
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, err = vr.peekLength()
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, err = vr.peekLength()
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+		if regex < 0 {
+			err = io.EOF
+			break
+		}
+		pattern := bytes.IndexByte(vr.d[regex+1:], 0x00)
+		if pattern < 0 {
+			err = io.EOF
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1 - vr.offset)
+	default:
+		return 0, fmt.Errorf("attempted to read bytes of unknown BSON type %v", vr.stack[vr.frame].vType)
+	}
+
+	return length, err
+}
+
+func (vr *valueReader) ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		length, err := vr.peekLength()
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		dst, err = vr.appendBytes(dst, length)
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		return bsontype.Type(0), dst, nil
+	case mElement, mValue:
+		length, err := vr.nextElementLength()
+		if err != nil {
+			return bsontype.Type(0), dst, err
+		}
+
+		dst, err = vr.appendBytes(dst, length)
+		t := vr.stack[vr.frame].vType
+		vr.pop()
+		return t, dst, err
+	default:
+		return bsontype.Type(0), nil, vr.invalidTransitionErr(0, "ReadValueBytes", []mode{mElement, mValue})
+	}
+}
+
+func (vr *valueReader) Skip() error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+	default:
+		return vr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	length, err := vr.nextElementLength()
+	if err != nil {
+		return err
+	}
+
+	err = vr.skipBytes(length)
+	vr.pop()
+	return err
+}
+
+func (vr *valueReader) ReadArray() (ArrayReader, error) {
+	if err := vr.ensureElementValue(bsontype.Array, mArray, "ReadArray"); err != nil {
+		return nil, err
+	}
+
+	err := vr.pushArray()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := vr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	length, err := vr.readLength()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	btype, err = vr.readByte()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if btype == 0x02 {
+		length, err = vr.readLength()
+		if err != nil {
+			return nil, 0, err
+		}
+	}
+
+	b, err = vr.readBytes(length)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	vr.pop()
+	return b, btype, nil
+}
+
+func (vr *valueReader) ReadBoolean() (bool, error) {
+	if err := vr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	b, err := vr.readByte()
+	if err != nil {
+		return false, err
+	}
+
+	if b > 1 {
+		return false, fmt.Errorf("invalid byte for boolean, %b", b)
+	}
+
+	vr.pop()
+	return b == 1, nil
+}
+
+func (vr *valueReader) ReadDocument() (DocumentReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		// read size
+		size, err := vr.readLength()
+		if err != nil {
+			return nil, err
+		}
+		if int(size) != len(vr.d) {
+			return nil, fmt.Errorf("invalid document length")
+		}
+		vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+		return vr, nil
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, vr.typeError(bsontype.EmbeddedDocument)
+		}
+	default:
+		return nil, vr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+
+	err := vr.pushDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err := vr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	totalLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	strLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	strBytes, err := vr.readBytes(strLength)
+	if err != nil {
+		return "", nil, err
+	}
+	code = string(strBytes[:len(strBytes)-1])
+
+	size, err := vr.pushCodeWithScope()
+	if err != nil {
+		return "", nil, err
+	}
+
+	// The total length should equal:
+	// 4 (total length) + strLength + 4 (the length of str itself) + (document length)
+	componentsLength := int64(4+strLength+4) + size
+	if int64(totalLength) != componentsLength {
+		return "", nil, fmt.Errorf(
+			"length of CodeWithScope does not match lengths of components; total: %d; components: %d",
+			totalLength, componentsLength,
+		)
+	}
+	return code, vr, nil
+}
+
+func (vr *valueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err := vr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", oid, err
+	}
+
+	ns, err = vr.readString()
+	if err != nil {
+		return "", oid, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return "", oid, err
+	}
+
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return ns, oid, nil
+}
+
+func (vr *valueReader) ReadDateTime() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	i, err := vr.readi64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return i, nil
+}
+
+func (vr *valueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := vr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	b, err := vr.readBytes(16)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	l := binary.LittleEndian.Uint64(b[0:8])
+	h := binary.LittleEndian.Uint64(b[8:16])
+
+	vr.pop()
+	return primitive.NewDecimal128(h, l), nil
+}
+
+func (vr *valueReader) ReadDouble() (float64, error) {
+	if err := vr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	u, err := vr.readu64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return math.Float64frombits(u), nil
+}
+
+func (vr *valueReader) ReadInt32() (int32, error) {
+	if err := vr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi32()
+}
+
+func (vr *valueReader) ReadInt64() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi64()
+}
+
+func (vr *valueReader) ReadJavascript() (code string, err error) {
+	if err := vr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadMaxKey() error {
+	if err := vr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadMinKey() error {
+	if err := vr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadNull() error {
+	if err := vr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := vr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	var oid primitive.ObjectID
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return oid, nil
+}
+
+func (vr *valueReader) ReadRegex() (string, string, error) {
+	if err := vr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	pattern, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	options, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	vr.pop()
+	return pattern, options, nil
+}
+
+func (vr *valueReader) ReadString() (string, error) {
+	if err := vr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadSymbol() (symbol string, err error) {
+	if err := vr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err := vr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	i, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	vr.pop()
+	return t, i, nil
+}
+
+func (vr *valueReader) ReadUndefined() error {
+	if err := vr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadElement() (string, ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, vr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return "", nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return "", nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return "", nil, ErrEOD
+	}
+
+	name, err := vr.readCString()
+	if err != nil {
+		return "", nil, err
+	}
+
+	vr.pushElement(bsontype.Type(t))
+	return name, vr, nil
+}
+
+func (vr *valueReader) ReadValue() (ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mArray:
+	default:
+		return nil, vr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return nil, ErrEOA
+	}
+
+	_, err = vr.readCString()
+	if err != nil {
+		return nil, err
+	}
+
+	vr.pushValue(bsontype.Type(t))
+	return vr, nil
+}
+
+func (vr *valueReader) readBytes(length int32) ([]byte, error) {
+	if length < 0 {
+		return nil, fmt.Errorf("invalid length: %d", length)
+	}
+
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return vr.d[start : start+int64(length)], nil
+}
+
+func (vr *valueReader) appendBytes(dst []byte, length int32) ([]byte, error) {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return append(dst, vr.d[start:start+int64(length)]...), nil
+}
+
+func (vr *valueReader) skipBytes(length int32) error {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return io.EOF
+	}
+
+	vr.offset += int64(length)
+	return nil
+}
+
+func (vr *valueReader) readByte() (byte, error) {
+	if vr.offset+1 > int64(len(vr.d)) {
+		return 0x0, io.EOF
+	}
+
+	vr.offset++
+	return vr.d[vr.offset-1], nil
+}
+
+func (vr *valueReader) readCString() (string, error) {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return "", io.EOF
+	}
+	start := vr.offset
+	// idx does not include the null byte
+	vr.offset += int64(idx) + 1
+	return string(vr.d[start : start+int64(idx)]), nil
+}
+
+func (vr *valueReader) skipCString() error {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return io.EOF
+	}
+	// idx does not include the null byte
+	vr.offset += int64(idx) + 1
+	return nil
+}
+
+func (vr *valueReader) readString() (string, error) {
+	length, err := vr.readLength()
+	if err != nil {
+		return "", err
+	}
+
+	if int64(length)+vr.offset > int64(len(vr.d)) {
+		return "", io.EOF
+	}
+
+	if length <= 0 {
+		return "", fmt.Errorf("invalid string length: %d", length)
+	}
+
+	if vr.d[vr.offset+int64(length)-1] != 0x00 {
+		return "", fmt.Errorf("string does not end with null byte, but with %v", vr.d[vr.offset+int64(length)-1])
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+
+	if length == 2 {
+		asciiByte := vr.d[start]
+		if asciiByte > unicode.MaxASCII {
+			return "", fmt.Errorf("invalid ascii byte")
+		}
+	}
+
+	return string(vr.d[start : start+int64(length)-1]), nil
+}
+
+func (vr *valueReader) peekLength() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readLength() (int32, error) { return vr.readi32() }
+
+func (vr *valueReader) readi32() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readu32() (uint32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readi64() (int64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 |
+		int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil
+}
+
+func (vr *valueReader) readu64() (uint64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 |
+		uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go
new file mode 100644
index 0000000..2400bac
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go
@@ -0,0 +1,589 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var _ ValueWriter = (*valueWriter)(nil)
+
+var vwPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueWriter)
+	},
+}
+
+// BSONValueWriterPool is a pool for BSON ValueWriters.
+type BSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON.
+func NewBSONValueWriterPool() *BSONValueWriterPool {
+	return &BSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination.
+func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
+	vw := bvwp.pool.Get().(*valueWriter)
+	if writer, ok := w.(*SliceWriter); ok {
+		vw.reset(*writer)
+		vw.w = writer
+		return vw
+	}
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing
+// happens and ok will be false.
+func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*valueWriter)
+	if !ok {
+		return false
+	}
+
+	if _, ok := bvw.w.(*SliceWriter); ok {
+		bvw.buf = nil
+	}
+	bvw.w = nil
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+// This is here so that during testing we can change it and not require
+// allocating a 4GB slice.
+var maxSize = math.MaxInt32
+
+var errNilWriter = errors.New("cannot create a ValueWriter from a nil io.Writer")
+
+type errMaxDocumentSizeExceeded struct {
+	size int64
+}
+
+func (mdse errMaxDocumentSizeExceeded) Error() string {
+	return fmt.Sprintf("document size (%d) is larger than the max int32", mdse.size)
+}
+
+type vwMode int
+
+const (
+	_ vwMode = iota
+	vwTopLevel
+	vwDocument
+	vwArray
+	vwValue
+	vwElement
+	vwCodeWithScope
+)
+
+func (vm vwMode) String() string {
+	var str string
+
+	switch vm {
+	case vwTopLevel:
+		str = "TopLevel"
+	case vwDocument:
+		str = "DocumentMode"
+	case vwArray:
+		str = "ArrayMode"
+	case vwValue:
+		str = "ValueMode"
+	case vwElement:
+		str = "ElementMode"
+	case vwCodeWithScope:
+		str = "CodeWithScopeMode"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+type vwState struct {
+	mode   mode
+	key    string
+	arrkey int
+	start  int32
+}
+
+type valueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack []vwState
+	frame int64
+}
+
+func (vw *valueWriter) advanceFrame() {
+	if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack
+		length := len(vw.stack)
+		if length+1 >= cap(vw.stack) {
+			// double it
+			buf := make([]vwState, 2*cap(vw.stack)+1)
+			copy(buf, vw.stack)
+			vw.stack = buf
+		}
+		vw.stack = vw.stack[:length+1]
+	}
+	vw.frame++
+}
+
+func (vw *valueWriter) push(m mode) {
+	vw.advanceFrame()
+
+	// Clean the stack
+	vw.stack[vw.frame].mode = m
+	vw.stack[vw.frame].key = ""
+	vw.stack[vw.frame].arrkey = 0
+	vw.stack[vw.frame].start = 0
+
+	vw.stack[vw.frame].mode = m
+	switch m {
+	case mDocument, mArray, mCodeWithScope:
+		vw.reserveLength()
+	}
+}
+
+func (vw *valueWriter) reserveLength() {
+	vw.stack[vw.frame].start = int32(len(vw.buf))
+	vw.buf = append(vw.buf, 0x00, 0x00, 0x00, 0x00)
+}
+
+func (vw *valueWriter) pop() {
+	switch vw.stack[vw.frame].mode {
+	case mElement, mValue:
+		vw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+// NewBSONValueWriter creates a ValueWriter that writes BSON to w.
+//
+// This ValueWriter will only write entire documents to the io.Writer and it
+// will buffer the document as it is built.
+func NewBSONValueWriter(w io.Writer) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+	return newValueWriter(w), nil
+}
+
+func newValueWriter(w io.Writer) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.w = w
+	vw.stack = stack
+
+	return vw
+}
+
+func newValueWriterFromSlice(buf []byte) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.stack = stack
+	vw.buf = buf
+
+	return vw
+}
+
+func (vw *valueWriter) reset(buf []byte) {
+	if vw.stack == nil {
+		vw.stack = make([]vwState, 1, 5)
+	}
+	vw.stack = vw.stack[:1]
+	vw.stack[0] = vwState{mode: mTopLevel}
+	vw.buf = buf
+	vw.frame = 0
+	vw.w = nil
+}
+
+func (vw *valueWriter) invalidTransitionError(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vw.stack[vw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if vw.frame != 0 {
+		te.parent = vw.stack[vw.frame-1].mode
+	}
+	return te
+}
+
+func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
+	switch vw.stack[vw.frame].mode {
+	case mElement:
+		vw.buf = bsoncore.AppendHeader(vw.buf, t, vw.stack[vw.frame].key)
+	case mValue:
+		// TODO: Do this with a cache of the first 1000 or so array keys.
+		vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey))
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return vw.invalidTransitionError(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (vw *valueWriter) WriteValueBytes(t bsontype.Type, b []byte) error {
+	if err := vw.writeElementHeader(t, mode(0), "WriteValueBytes"); err != nil {
+		return err
+	}
+	vw.buf = append(vw.buf, b...)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteArray() (ArrayWriter, error) {
+	if err := vw.writeElementHeader(bsontype.Array, mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	vw.push(mArray)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteBinary(b []byte) error {
+	return vw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (vw *valueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := vw.writeElementHeader(bsontype.Binary, mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBinary(vw.buf, btype, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteBoolean(b bool) error {
+	if err := vw.writeElementHeader(bsontype.Boolean, mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBoolean(vw.buf, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := vw.writeElementHeader(bsontype.CodeWithScope, mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	// CodeWithScope is a different than other types because we need an extra
+	// frame on the stack. In the EndDocument code, we write the document
+	// length, pop, write the code with scope length, and pop. To simplify the
+	// pop code, we push a spacer frame that we'll always jump over.
+	vw.push(mCodeWithScope)
+	vw.buf = bsoncore.AppendString(vw.buf, code)
+	vw.push(mSpacer)
+	vw.push(mDocument)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.DBPointer, mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDBPointer(vw.buf, ns, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDateTime(dt int64) error {
+	if err := vw.writeElementHeader(bsontype.DateTime, mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDateTime(vw.buf, dt)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDecimal128(d128 primitive.Decimal128) error {
+	if err := vw.writeElementHeader(bsontype.Decimal128, mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDecimal128(vw.buf, d128)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDouble(f float64) error {
+	if err := vw.writeElementHeader(bsontype.Double, mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDouble(vw.buf, f)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt32(i32 int32) error {
+	if err := vw.writeElementHeader(bsontype.Int32, mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt32(vw.buf, i32)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt64(i64 int64) error {
+	if err := vw.writeElementHeader(bsontype.Int64, mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt64(vw.buf, i64)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteJavascript(code string) error {
+	if err := vw.writeElementHeader(bsontype.JavaScript, mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendJavaScript(vw.buf, code)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMaxKey() error {
+	if err := vw.writeElementHeader(bsontype.MaxKey, mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMinKey() error {
+	if err := vw.writeElementHeader(bsontype.MinKey, mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteNull() error {
+	if err := vw.writeElementHeader(bsontype.Null, mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.ObjectID, mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendObjectID(vw.buf, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteRegex(pattern string, options string) error {
+	if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendRegex(vw.buf, pattern, sortStringAlphebeticAscending(options))
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteString(s string) error {
+	if err := vw.writeElementHeader(bsontype.String, mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendString(vw.buf, s)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocument() (DocumentWriter, error) {
+	if vw.stack[vw.frame].mode == mTopLevel {
+		vw.reserveLength()
+		return vw, nil
+	}
+	if err := vw.writeElementHeader(bsontype.EmbeddedDocument, mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	vw.push(mDocument)
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteSymbol(symbol string) error {
+	if err := vw.writeElementHeader(bsontype.Symbol, mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendSymbol(vw.buf, symbol)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := vw.writeElementHeader(bsontype.Timestamp, mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendTimestamp(vw.buf, t, i)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteUndefined() error {
+	if err := vw.writeElementHeader(bsontype.Undefined, mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return nil, vw.invalidTransitionError(mElement, "WriteDocumentElement", []mode{mTopLevel, mDocument})
+	}
+
+	vw.push(mElement)
+	vw.stack[vw.frame].key = key
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDocumentEnd() error {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	if vw.stack[vw.frame].mode == mTopLevel {
+		if vw.w != nil {
+			if sw, ok := vw.w.(*SliceWriter); ok {
+				*sw = vw.buf
+			} else {
+				_, err = vw.w.Write(vw.buf)
+				if err != nil {
+					return err
+				}
+				// reset buffer
+				vw.buf = vw.buf[:0]
+			}
+		}
+	}
+
+	vw.pop()
+
+	if vw.stack[vw.frame].mode == mCodeWithScope {
+		// We ignore the error here because of the gaurantee of writeLength.
+		// See the docs for writeLength for more info.
+		_ = vw.writeLength()
+		vw.pop()
+	}
+	return nil
+}
+
+func (vw *valueWriter) WriteArrayElement() (ValueWriter, error) {
+	if vw.stack[vw.frame].mode != mArray {
+		return nil, vw.invalidTransitionError(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	arrkey := vw.stack[vw.frame].arrkey
+	vw.stack[vw.frame].arrkey++
+
+	vw.push(mValue)
+	vw.stack[vw.frame].arrkey = arrkey
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteArrayEnd() error {
+	if vw.stack[vw.frame].mode != mArray {
+		return fmt.Errorf("incorrect mode to end array: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+// NOTE: We assume that if we call writeLength more than once the same function
+// within the same function without altering the vw.buf that this method will
+// not return an error. If this changes ensure that the following methods are
+// updated:
+//
+// - WriteDocumentEnd
+func (vw *valueWriter) writeLength() error {
+	length := len(vw.buf)
+	if length > maxSize {
+		return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))}
+	}
+	length = length - int(vw.stack[vw.frame].start)
+	start := vw.stack[vw.frame].start
+
+	vw.buf[start+0] = byte(length)
+	vw.buf[start+1] = byte(length >> 8)
+	vw.buf[start+2] = byte(length >> 16)
+	vw.buf[start+3] = byte(length >> 24)
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go
new file mode 100644
index 0000000..6ae4322
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go
@@ -0,0 +1,96 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ArrayWriter is the interface used to create a BSON or BSON adjacent array.
+// Callers must ensure they call WriteArrayEnd when they have finished creating
+// the array.
+type ArrayWriter interface {
+	WriteArrayElement() (ValueWriter, error)
+	WriteArrayEnd() error
+}
+
+// DocumentWriter is the interface used to create a BSON or BSON adjacent
+// document. Callers must ensure they call WriteDocumentEnd when they have
+// finished creating the document.
+type DocumentWriter interface {
+	WriteDocumentElement(string) (ValueWriter, error)
+	WriteDocumentEnd() error
+}
+
+// ValueWriter is the interface used to write BSON values. Implementations of
+// this interface handle creating BSON or BSON adjacent representations of the
+// values.
+type ValueWriter interface {
+	WriteArray() (ArrayWriter, error)
+	WriteBinary(b []byte) error
+	WriteBinaryWithSubtype(b []byte, btype byte) error
+	WriteBoolean(bool) error
+	WriteCodeWithScope(code string) (DocumentWriter, error)
+	WriteDBPointer(ns string, oid primitive.ObjectID) error
+	WriteDateTime(dt int64) error
+	WriteDecimal128(primitive.Decimal128) error
+	WriteDouble(float64) error
+	WriteInt32(int32) error
+	WriteInt64(int64) error
+	WriteJavascript(code string) error
+	WriteMaxKey() error
+	WriteMinKey() error
+	WriteNull() error
+	WriteObjectID(primitive.ObjectID) error
+	WriteRegex(pattern, options string) error
+	WriteString(string) error
+	WriteDocument() (DocumentWriter, error)
+	WriteSymbol(symbol string) error
+	WriteTimestamp(t, i uint32) error
+	WriteUndefined() error
+}
+
+// BytesWriter is the interface used to write BSON bytes to a ValueWriter.
+// This interface is meant to be a superset of ValueWriter, so that types that
+// implement ValueWriter may also implement this interface.
+type BytesWriter interface {
+	WriteValueBytes(t bsontype.Type, b []byte) error
+}
+
+// SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer.
+type SliceWriter []byte
+
+func (sw *SliceWriter) Write(p []byte) (int, error) {
+	written := len(p)
+	*sw = append(*sw, p...)
+	return written, nil
+}
+
+type writer []byte
+
+func (w *writer) Write(p []byte) (int, error) {
+	index := len(*w)
+	return w.WriteAt(p, int64(index))
+}
+
+func (w *writer) WriteAt(p []byte, off int64) (int, error) {
+	newend := off + int64(len(p))
+	if newend < int64(len(*w)) {
+		newend = int64(len(*w))
+	}
+
+	if newend > int64(cap(*w)) {
+		buf := make([]byte, int64(2*cap(*w))+newend)
+		copy(buf, *w)
+		*w = buf
+	}
+
+	*w = []byte(*w)[:newend]
+	copy([]byte(*w)[off:], p)
+	return len(p), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go
new file mode 100644
index 0000000..a17e5f5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsontype is a utility package that contains types for each BSON type and the
+// a stringifier for the Type to enable easier debugging when working with BSON.
+package bsontype
+
+// These constants uniquely refer to each BSON type.
+const (
+	Double           Type = 0x01
+	String           Type = 0x02
+	EmbeddedDocument Type = 0x03
+	Array            Type = 0x04
+	Binary           Type = 0x05
+	Undefined        Type = 0x06
+	ObjectID         Type = 0x07
+	Boolean          Type = 0x08
+	DateTime         Type = 0x09
+	Null             Type = 0x0A
+	Regex            Type = 0x0B
+	DBPointer        Type = 0x0C
+	JavaScript       Type = 0x0D
+	Symbol           Type = 0x0E
+	CodeWithScope    Type = 0x0F
+	Int32            Type = 0x10
+	Timestamp        Type = 0x11
+	Int64            Type = 0x12
+	Decimal128       Type = 0x13
+	MinKey           Type = 0xFF
+	MaxKey           Type = 0x7F
+)
+
+// Type represents a BSON type.
+type Type byte
+
+// String returns the string representation of the BSON type's name.
+func (bt Type) String() string {
+	switch bt {
+	case '\x01':
+		return "double"
+	case '\x02':
+		return "string"
+	case '\x03':
+		return "embedded document"
+	case '\x04':
+		return "array"
+	case '\x05':
+		return "binary"
+	case '\x06':
+		return "undefined"
+	case '\x07':
+		return "objectID"
+	case '\x08':
+		return "boolean"
+	case '\x09':
+		return "UTC datetime"
+	case '\x0A':
+		return "null"
+	case '\x0B':
+		return "regex"
+	case '\x0C':
+		return "dbPointer"
+	case '\x0D':
+		return "javascript"
+	case '\x0E':
+		return "symbol"
+	case '\x0F':
+		return "code with scope"
+	case '\x10':
+		return "32-bit integer"
+	case '\x11':
+		return "timestamp"
+	case '\x12':
+		return "64-bit integer"
+	case '\x13':
+		return "128-bit decimal"
+	case '\xFF':
+		return "min key"
+	case '\x7F':
+		return "max key"
+	default:
+		return "invalid"
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go b/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go
new file mode 100644
index 0000000..03c86a1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+// This pool is used to keep the allocations of Decoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Decoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var decPool = sync.Pool{
+	New: func() interface{} {
+		return new(Decoder)
+	},
+}
+
+// A Decoder reads and decodes BSON documents from a stream. It reads from a bsonrw.ValueReader as
+// the source of BSON data.
+type Decoder struct {
+	dc bsoncodec.DecodeContext
+	vr bsonrw.ValueReader
+}
+
+// NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr.
+func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) {
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: bsoncodec.DecodeContext{Registry: DefaultRegistry},
+		vr: vr,
+	}, nil
+}
+
+// NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr.
+func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) {
+	if dc.Registry == nil {
+		dc.Registry = DefaultRegistry
+	}
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: dc,
+		vr: vr,
+	}, nil
+}
+
+// Decode reads the next BSON document from the stream and decodes it into the
+// value pointed to by val.
+//
+// The documentation for Unmarshal contains details about of BSON into a Go
+// value.
+func (d *Decoder) Decode(val interface{}) error {
+	if unmarshaler, ok := val.(Unmarshaler); ok {
+		// TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method.
+		buf, err := bsonrw.Copier{}.CopyDocumentToBytes(d.vr)
+		if err != nil {
+			return err
+		}
+		return unmarshaler.UnmarshalBSON(buf)
+	}
+
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Decode must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	decoder, err := d.dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return decoder.DecodeValue(d.dc, d.vr, rval)
+}
+
+// Reset will reset the state of the decoder, using the same *DecodeContext used in
+// the original construction but using vr for reading.
+func (d *Decoder) Reset(vr bsonrw.ValueReader) error {
+	d.vr = vr
+	return nil
+}
+
+// SetRegistry replaces the current registry of the decoder with r.
+func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error {
+	d.dc.Registry = r
+	return nil
+}
+
+// SetContext replaces the current registry of the decoder with dc.
+func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error {
+	d.dc = dc
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go
new file mode 100644
index 0000000..b3f6c52
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bson is a library for reading, writing, and manipulating BSON. The
+// library has two families of types for representing BSON.
+//
+// The Raw family of types is used to validate and retrieve elements from a slice of bytes. This
+// type is most useful when you want do lookups on BSON bytes without unmarshaling it into another
+// type.
+//
+// Example:
+// 		var raw bson.Raw = ... // bytes from somewhere
+// 		err := raw.Validate()
+// 		if err != nil { return err }
+// 		val := raw.Lookup("foo")
+// 		i32, ok := val.Int32OK()
+// 		// do something with i32...
+//
+// The D family of types is used to build concise representations of BSON using native Go types.
+// These types do not support automatic lookup.
+//
+// Example:
+// 		bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+//
+// Marshaling and Unmarshaling are handled with the Marshal and Unmarshal family of functions. If
+// you need to write or read BSON from a non-slice source, an Encoder or Decoder can be used with a
+// bsonrw.ValueWriter or bsonrw.ValueReader.
+//
+// Example:
+// 		b, err := bson.Marshal(bson.D{{"foo", "bar"}})
+// 		if err != nil { return err }
+// 		var fooer struct {
+// 			Foo string
+// 		}
+// 		err = bson.Unmarshal(b, &fooer)
+// 		if err != nil { return err }
+// 		// do something with fooer...
+package bson
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go b/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go
new file mode 100644
index 0000000..3c40425
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go
@@ -0,0 +1,99 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+// This pool is used to keep the allocations of Encoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Encoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var encPool = sync.Pool{
+	New: func() interface{} {
+		return new(Encoder)
+	},
+}
+
+// An Encoder writes a serialization format to an output stream. It writes to a bsonrw.ValueWriter
+// as the destination of BSON data.
+type Encoder struct {
+	ec bsoncodec.EncodeContext
+	vw bsonrw.ValueWriter
+}
+
+// NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw.
+func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: bsoncodec.EncodeContext{Registry: DefaultRegistry},
+		vw: vw,
+	}, nil
+}
+
+// NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw.
+func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) {
+	if ec.Registry == nil {
+		ec = bsoncodec.EncodeContext{Registry: DefaultRegistry}
+	}
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: ec,
+		vw: vw,
+	}, nil
+}
+
+// Encode writes the BSON encoding of val to the stream.
+//
+// The documentation for Marshal contains details about the conversion of Go
+// values to BSON.
+func (e *Encoder) Encode(val interface{}) error {
+	if marshaler, ok := val.(Marshaler); ok {
+		// TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse?
+		buf, err := marshaler.MarshalBSON()
+		if err != nil {
+			return err
+		}
+		return bsonrw.Copier{}.CopyDocumentFromBytes(e.vw, buf)
+	}
+
+	encoder, err := e.ec.LookupEncoder(reflect.TypeOf(val))
+	if err != nil {
+		return err
+	}
+	return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val))
+}
+
+// Reset will reset the state of the encoder, using the same *EncodeContext used in
+// the original construction but using vw.
+func (e *Encoder) Reset(vw bsonrw.ValueWriter) error {
+	e.vw = vw
+	return nil
+}
+
+// SetRegistry replaces the current registry of the encoder with r.
+func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error {
+	e.ec.Registry = r
+	return nil
+}
+
+// SetContext replaces the current EncodeContext of the encoder with er.
+func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error {
+	e.ec = ec
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go
new file mode 100644
index 0000000..398fb63
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go
@@ -0,0 +1,156 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const defaultDstCap = 256
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+var extjPool = bsonrw.NewExtJSONValueWriterPool()
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Marshal returns the BSON encoding of val.
+//
+// Marshal will use the default registry created by NewRegistry to recursively
+// marshal val into a []byte. Marshal will inspect struct tags and alter the
+// marshaling process accordingly.
+func Marshal(val interface{}) ([]byte, error) {
+	return MarshalWithRegistry(DefaultRegistry, val)
+}
+
+// MarshalAppend will append the BSON encoding of val to dst. If dst is not
+// large enough to hold the BSON encoding of val, dst will be grown.
+func MarshalAppend(dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithRegistry(DefaultRegistry, dst, val)
+}
+
+// MarshalWithRegistry returns the BSON encoding of val using Registry r.
+func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0, 256) // TODO: make the default cap a constant
+	return MarshalAppendWithRegistry(r, dst, val)
+}
+
+// MarshalWithContext returns the BSON encoding of val using EncodeContext ec.
+func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0, 256) // TODO: make the default cap a constant
+	return MarshalAppendWithContext(ec, dst, val)
+}
+
+// MarshalAppendWithRegistry will append the BSON encoding of val to dst using
+// Registry r. If dst is not large enough to hold the BSON encoding of val, dst
+// will be grown.
+func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
+}
+
+// MarshalAppendWithContext will append the BSON encoding of val to dst using
+// EncodeContext ec. If dst is not large enough to hold the BSON encoding of val, dst
+// will be grown.
+func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) {
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	vw := bvwPool.Get(sw)
+	defer bvwPool.Put(vw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(vw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return *sw, nil
+}
+
+// MarshalExtJSON returns the extended JSON encoding of val.
+func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONWithRegistry(DefaultRegistry, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppend will append the extended JSON encoding of val to dst.
+// If dst is not large enough to hold the extended JSON encoding of val, dst
+// will be grown.
+func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r.
+func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r.
+func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithContext will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	ejvw := extjPool.Get(sw, canonical, escapeHTML)
+	defer extjPool.Put(ejvw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(ejvw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return *sw, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go
new file mode 100644
index 0000000..d7fdb22
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go
@@ -0,0 +1,307 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+// NewDecimal128 creates a Decimal128 using the provide high and low uint64s.
+func NewDecimal128(h, l uint64) Decimal128 {
+	return Decimal128{h: h, l: l}
+}
+
+// GetBytes returns the underlying bytes of the BSON decimal value as two uint16 values. The first
+// contains the most first 8 bytes of the value and the second contains the latter.
+func (d Decimal128) GetBytes() (uint64, uint64) {
+	return d.h, d.l
+}
+
+// String returns a string representation of the decimal value.
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Infinity"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+//ParseDecimal128 takes the given string and attempts to parse it into a valid
+// Decimal128 value.
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go
new file mode 100644
index 0000000..9eaaa06
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"sync/atomic"
+	"time"
+)
+
+// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
+var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
+
+// ObjectID is the BSON ObjectID type.
+type ObjectID [12]byte
+
+// NilObjectID is the zero value for ObjectID.
+var NilObjectID ObjectID
+
+var objectIDCounter = readRandomUint32()
+var processUnique = processUniqueBytes()
+
+// NewObjectID generates a new ObjectID.
+func NewObjectID() ObjectID {
+	var b [12]byte
+
+	binary.BigEndian.PutUint32(b[0:4], uint32(time.Now().Unix()))
+	copy(b[4:9], processUnique[:])
+	putUint24(b[9:12], atomic.AddUint32(&objectIDCounter, 1))
+
+	return b
+}
+
+// Hex returns the hex encoding of the ObjectID as a string.
+func (id ObjectID) Hex() string {
+	return hex.EncodeToString(id[:])
+}
+
+func (id ObjectID) String() string {
+	return fmt.Sprintf("ObjectID(%q)", id.Hex())
+}
+
+// IsZero returns true if id is the empty ObjectID.
+func (id ObjectID) IsZero() bool {
+	return bytes.Equal(id[:], NilObjectID[:])
+}
+
+// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
+// valid ObjectID.
+func ObjectIDFromHex(s string) (ObjectID, error) {
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		return NilObjectID, err
+	}
+
+	if len(b) != 12 {
+		return NilObjectID, ErrInvalidHex
+	}
+
+	var oid [12]byte
+	copy(oid[:], b[:])
+
+	return oid, nil
+}
+
+// MarshalJSON returns the ObjectID as a string
+func (id ObjectID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(id.Hex())
+}
+
+// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 64 bytes long, it
+// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
+// long, it will be populated with the BSON representation of the ObjectID. Otherwise, it will
+// return an error.
+func (id *ObjectID) UnmarshalJSON(b []byte) error {
+	var err error
+	switch len(b) {
+	case 12:
+		copy(id[:], b)
+	default:
+		// Extended JSON
+		var res interface{}
+		err := json.Unmarshal(b, &res)
+		if err != nil {
+			return err
+		}
+		str, ok := res.(string)
+		if !ok {
+			m, ok := res.(map[string]interface{})
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			oid, ok := m["$oid"]
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			str, ok = oid.(string)
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+		}
+
+		if len(str) != 24 {
+			return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 12 but it is %d", len(str))
+		}
+
+		_, err = hex.Decode(id[:], []byte(str))
+		if err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
+func processUniqueBytes() [5]byte {
+	var b [5]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
+	}
+
+	return b
+}
+
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
+	}
+
+	return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+}
+
+func putUint24(b []byte, v uint32) {
+	b[0] = byte(v >> 16)
+	b[1] = byte(v >> 8)
+	b[2] = byte(v)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go
new file mode 100644
index 0000000..b9b43f4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go
@@ -0,0 +1,149 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package primitive contains types similar to Go primitives for BSON types can do not have direct
+// Go primitive representations.
+package primitive
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Binary represents a BSON binary value.
+type Binary struct {
+	Subtype byte
+	Data    []byte
+}
+
+// Equal compaes bp to bp2 and returns true is the are equal.
+func (bp Binary) Equal(bp2 Binary) bool {
+	if bp.Subtype != bp2.Subtype {
+		return false
+	}
+	return bytes.Equal(bp.Data, bp2.Data)
+}
+
+// Undefined represents the BSON undefined value type.
+type Undefined struct{}
+
+// DateTime represents the BSON datetime value.
+type DateTime int64
+
+// Null repreesnts the BSON null value.
+type Null struct{}
+
+// Regex represents a BSON regex value.
+type Regex struct {
+	Pattern string
+	Options string
+}
+
+func (rp Regex) String() string {
+	return fmt.Sprintf(`{"pattern": "%s", "options": "%s"}`, rp.Pattern, rp.Options)
+}
+
+// Equal compaes rp to rp2 and returns true is the are equal.
+func (rp Regex) Equal(rp2 Regex) bool {
+	return rp.Pattern == rp2.Pattern && rp.Options == rp.Options
+}
+
+// DBPointer represents a BSON dbpointer value.
+type DBPointer struct {
+	DB      string
+	Pointer ObjectID
+}
+
+func (d DBPointer) String() string {
+	return fmt.Sprintf(`{"db": "%s", "pointer": "%s"}`, d.DB, d.Pointer)
+}
+
+// Equal compaes d to d2 and returns true is the are equal.
+func (d DBPointer) Equal(d2 DBPointer) bool {
+	return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:])
+}
+
+// JavaScript represents a BSON JavaScript code value.
+type JavaScript string
+
+// Symbol represents a BSON symbol value.
+type Symbol string
+
+// CodeWithScope represents a BSON JavaScript code with scope value.
+type CodeWithScope struct {
+	Code  JavaScript
+	Scope interface{}
+}
+
+func (cws CodeWithScope) String() string {
+	return fmt.Sprintf(`{"code": "%s", "scope": %v}`, cws.Code, cws.Scope)
+}
+
+// Timestamp represents a BSON timestamp value.
+type Timestamp struct {
+	T uint32
+	I uint32
+}
+
+// Equal compaes tp to tp2 and returns true is the are equal.
+func (tp Timestamp) Equal(tp2 Timestamp) bool {
+	return tp.T == tp2.T && tp.I == tp2.I
+}
+
+// MinKey represents the BSON minkey value.
+type MinKey struct{}
+
+// MaxKey represents the BSON maxkey value.
+type MaxKey struct{}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D []E
+
+// Map creates a map from the elements of the D.
+func (d D) Map() M {
+	m := make(M, len(d))
+	for _, e := range d {
+		m[e.Key] = e.Value
+	}
+	return m
+}
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E struct {
+	Key   string
+	Value interface{}
+}
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M map[string]interface{}
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
+//
+type A []interface{}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go
new file mode 100644
index 0000000..6536b2e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+var primitiveCodecs PrimitiveCodecs
+
+// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
+// defined in this package.
+type PrimitiveCodecs struct{}
+
+// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
+// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
+func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
+	}
+
+	rb.
+		RegisterEncoder(tRawValue, bsoncodec.ValueEncoderFunc(pc.RawValueEncodeValue)).
+		RegisterEncoder(tRaw, bsoncodec.ValueEncoderFunc(pc.RawEncodeValue)).
+		RegisterDecoder(tRawValue, bsoncodec.ValueDecoderFunc(pc.RawValueDecodeValue)).
+		RegisterDecoder(tRaw, bsoncodec.ValueDecoderFunc(pc.RawDecodeValue))
+}
+
+// RawValueEncodeValue is the ValueEncoderFunc for RawValue.
+func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRawValue {
+		return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val}
+	}
+
+	rawvalue := val.Interface().(RawValue)
+
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value)
+}
+
+// RawValueDecodeValue is the ValueDecoderFunc for RawValue.
+func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRawValue {
+		return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val}
+	}
+
+	t, value, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(RawValue{Type: t, Value: value}))
+	return nil
+}
+
+// RawEncodeValue is the ValueEncoderFunc for Reader.
+func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRaw {
+		return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	rdr := val.Interface().(Raw)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, rdr)
+}
+
+// RawDecodeValue is the ValueDecoderFunc for Reader.
+func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRaw {
+		return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	rdr, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(Raw), vr)
+	val.Set(reflect.ValueOf(rdr))
+	return err
+}
+
+func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error {
+	var copier bsonrw.Copier
+	elems, err := raw.Elements()
+	if err != nil {
+		return err
+	}
+	for _, elem := range elems {
+		dvw, err := dw.WriteDocumentElement(elem.Key())
+		if err != nil {
+			return err
+		}
+
+		val := elem.Value()
+		err = copier.CopyValueFromBytes(dvw, val.Type, val.Value)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go
new file mode 100644
index 0000000..abd6e38
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go
@@ -0,0 +1,92 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilReader indicates that an operation was attempted on a nil bson.Reader.
+var ErrNilReader = errors.New("nil reader")
+var errValidateDone = errors.New("validation loop complete")
+
+// Raw is a wrapper around a byte slice. It will interpret the slice as a
+// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the
+// methods on this type and associated types come from the bsoncore package.
+type Raw []byte
+
+// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from
+// it.
+func NewFromIOReader(r io.Reader) (Raw, error) {
+	doc, err := bsoncore.NewDocumentFromReader(r)
+	return Raw(doc), err
+}
+
+// Validate validates the document. This method only validates the first document in
+// the slice, to validate other documents, the slice must be resliced.
+func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() }
+
+// Lookup search the document, potentially recursively, for the given key. If
+// there are multiple keys provided, this method will recurse down, as long as
+// the top and intermediate nodes are either documents or arrays.If an error
+// occurs or if the value doesn't exist, an empty RawValue is returned.
+func (r Raw) Lookup(key ...string) RawValue {
+	return convertFromCoreValue(bsoncore.Document(r).Lookup(key...))
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (r Raw) LookupErr(key ...string) (RawValue, error) {
+	val, err := bsoncore.Document(r).LookupErr(key...)
+	return convertFromCoreValue(val), err
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (r Raw) Elements() ([]RawElement, error) {
+	elems, err := bsoncore.Document(r).Elements()
+	relems := make([]RawElement, 0, len(elems))
+	for _, elem := range elems {
+		relems = append(relems, RawElement(elem))
+	}
+	return relems, err
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (r Raw) Values() ([]RawValue, error) {
+	vals, err := bsoncore.Document(r).Values()
+	rvals := make([]RawValue, 0, len(vals))
+	for _, val := range vals {
+		rvals = append(rvals, convertFromCoreValue(val))
+	}
+	return rvals, err
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (r Raw) Index(index uint) RawElement { return RawElement(bsoncore.Document(r).Index(index)) }
+
+// IndexErr searches for and retrieves the element at the given index.
+func (r Raw) IndexErr(index uint) (RawElement, error) {
+	elem, err := bsoncore.Document(r).IndexErr(index)
+	return RawElement(elem), err
+}
+
+// String implements the fmt.Stringer interface.
+func (r Raw) String() string { return bsoncore.Document(r).String() }
+
+// readi32 is a helper function for reading an int32 from slice of bytes.
+func readi32(b []byte) int32 {
+	_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+	return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go
new file mode 100644
index 0000000..2a01390
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// RawElement represents a BSON element in byte form. This type provides a simple way to
+// transform a slice of bytes into a BSON element and extract information from it.
+//
+// RawElement is a thin wrapper around a bsoncore.Element.
+type RawElement []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (re RawElement) Key() string { return bsoncore.Element(re).Key() }
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (re RawElement) KeyErr() (string, error) { return bsoncore.Element(re).KeyErr() }
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (re RawElement) Value() RawValue { return convertFromCoreValue(bsoncore.Element(re).Value()) }
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (re RawElement) ValueErr() (RawValue, error) {
+	val, err := bsoncore.Element(re).ValueErr()
+	return convertFromCoreValue(val), err
+}
+
+// Validate ensures re is a valid BSON element.
+func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() }
+
+// String implements the fmt.Stringer interface. The output will be in extended JSON format.
+func (re RawElement) String() string {
+	doc := bsoncore.BuildDocument(nil, re)
+	j, err := MarshalExtJSON(Raw(doc), true, false)
+	if err != nil {
+		return "<malformed>"
+	}
+	return string(j)
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (re RawElement) DebugString() string { return bsoncore.Element(re).DebugString() }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go
new file mode 100644
index 0000000..2803b80
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go
@@ -0,0 +1,287 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilContext is returned when the provided DecodeContext is nil.
+var ErrNilContext = errors.New("DecodeContext cannot be nil")
+
+// ErrNilRegistry is returned when the provided registry is nil.
+var ErrNilRegistry = errors.New("Registry cannot be nil")
+
+// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to
+// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that
+// represent the element.
+//
+// This type wraps bsoncore.Value for most of it's functionality.
+type RawValue struct {
+	Type  bsontype.Type
+	Value []byte
+
+	r *bsoncodec.Registry
+}
+
+// Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an
+// error is returned. This method will use the registry used to create the RawValue, if the RawValue
+// was created from partial BSON processing, or it will use the default registry. Users wishing to
+// specify the registry to use should use UnmarshalWithRegistry.
+func (rv RawValue) Unmarshal(val interface{}) error {
+	reg := rv.r
+	if reg == nil {
+		reg = DefaultRegistry
+	}
+	return rv.UnmarshalWithRegistry(reg, val)
+}
+
+// Equal compares rv and rv2 and returns true if they are equal.
+func (rv RawValue) Equal(rv2 RawValue) bool {
+	if rv.Type != rv2.Type {
+		return false
+	}
+
+	if !bytes.Equal(rv.Value, rv2.Value) {
+		return false
+	}
+
+	return true
+}
+
+// UnmarshalWithRegistry performs the same unmarshalling as Unmarshal but uses the provided registry
+// instead of the one attached or the default registry.
+func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) error {
+	if r == nil {
+		return ErrNilRegistry
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := r.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval)
+}
+
+// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext
+// instead of the one attached or the default registry.
+func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error {
+	if dc == nil {
+		return ErrNilContext
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(*dc, vr, rval)
+}
+
+func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} }
+func convertToCoreValue(v RawValue) bsoncore.Value   { return bsoncore.Value{Type: v.Type, Data: v.Value} }
+
+// Validate ensures the value is a valid BSON value.
+func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() }
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (rv RawValue) IsNumber() bool { return convertToCoreValue(rv).IsNumber() }
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (rv RawValue) String() string { return convertToCoreValue(rv).String() }
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (rv RawValue) DebugString() string { return convertToCoreValue(rv).DebugString() }
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (rv RawValue) Double() float64 { return convertToCoreValue(rv).Double() }
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (rv RawValue) DoubleOK() (float64, bool) { return convertToCoreValue(rv).DoubleOK() }
+
+// StringValue returns the string value for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (rv RawValue) StringValue() string { return convertToCoreValue(rv).StringValue() }
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (rv RawValue) StringValueOK() (string, bool) { return convertToCoreValue(rv).StringValueOK() }
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (rv RawValue) Document() Raw { return Raw(convertToCoreValue(rv).Document()) }
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (rv RawValue) DocumentOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).DocumentOK()
+	return Raw(doc), ok
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (rv RawValue) Array() Raw { return Raw(convertToCoreValue(rv).Array()) }
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (rv RawValue) ArrayOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).ArrayOK()
+	return Raw(doc), ok
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (rv RawValue) Binary() (subtype byte, data []byte) { return convertToCoreValue(rv).Binary() }
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BinaryOK() (subtype byte, data []byte, ok bool) {
+	return convertToCoreValue(rv).BinaryOK()
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv).ObjectID() }
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { return convertToCoreValue(rv).ObjectIDOK() }
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (rv RawValue) Boolean() bool { return convertToCoreValue(rv).Boolean() }
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BooleanOK() (bool, bool) { return convertToCoreValue(rv).BooleanOK() }
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (rv RawValue) DateTime() int64 { return convertToCoreValue(rv).DateTime() }
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) DateTimeOK() (int64, bool) { return convertToCoreValue(rv).DateTimeOK() }
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (rv RawValue) Time() time.Time { return convertToCoreValue(rv).Time() }
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) TimeOK() (time.Time, bool) { return convertToCoreValue(rv).TimeOK() }
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (rv RawValue) Regex() (pattern, options string) { return convertToCoreValue(rv).Regex() }
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) RegexOK() (pattern, options string, ok bool) {
+	return convertToCoreValue(rv).RegexOK()
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (rv RawValue) DBPointer() (string, primitive.ObjectID) { return convertToCoreValue(rv).DBPointer() }
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) DBPointerOK() (string, primitive.ObjectID, bool) {
+	return convertToCoreValue(rv).DBPointerOK()
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (rv RawValue) JavaScript() string { return convertToCoreValue(rv).JavaScript() }
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) JavaScriptOK() (string, bool) { return convertToCoreValue(rv).JavaScriptOK() }
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (rv RawValue) Symbol() string { return convertToCoreValue(rv).Symbol() }
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) SymbolOK() (string, bool) { return convertToCoreValue(rv).SymbolOK() }
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (rv RawValue) CodeWithScope() (string, Raw) {
+	code, scope := convertToCoreValue(rv).CodeWithScope()
+	return code, Raw(scope)
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) CodeWithScopeOK() (string, Raw, bool) {
+	code, scope, ok := convertToCoreValue(rv).CodeWithScopeOK()
+	return code, Raw(scope), ok
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (rv RawValue) Int32() int32 { return convertToCoreValue(rv).Int32() }
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() }
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() }
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) TimestampOK() (t, i uint32, ok bool) { return convertToCoreValue(rv).TimestampOK() }
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (rv RawValue) Int64() int64 { return convertToCoreValue(rv).Int64() }
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() }
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() }
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) Decimal128OK() (primitive.Decimal128, bool) {
+	return convertToCoreValue(rv).Decimal128OK()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go b/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go
new file mode 100644
index 0000000..c9ef029
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go
@@ -0,0 +1,24 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import "github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+
+// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
+// primitive codecs.
+var DefaultRegistry = NewRegistryBuilder().Build()
+
+// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
+// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
+// PrimitiveCodecs type in this package.
+func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
+	rb := bsoncodec.NewRegistryBuilder()
+	bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
+	bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
+	primitiveCodecs.RegisterPrimitiveCodecs(rb)
+	return rb
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/types.go b/vendor/github.com/mongodb/mongo-go-driver/bson/types.go
new file mode 100644
index 0000000..7109254
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/types.go
@@ -0,0 +1,85 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// These constants uniquely refer to each BSON type.
+const (
+	TypeDouble           = bsontype.Double
+	TypeString           = bsontype.String
+	TypeEmbeddedDocument = bsontype.EmbeddedDocument
+	TypeArray            = bsontype.Array
+	TypeBinary           = bsontype.Binary
+	TypeUndefined        = bsontype.Undefined
+	TypeObjectID         = bsontype.ObjectID
+	TypeBoolean          = bsontype.Boolean
+	TypeDateTime         = bsontype.DateTime
+	TypeNull             = bsontype.Null
+	TypeRegex            = bsontype.Regex
+	TypeDBPointer        = bsontype.DBPointer
+	TypeJavaScript       = bsontype.JavaScript
+	TypeSymbol           = bsontype.Symbol
+	TypeCodeWithScope    = bsontype.CodeWithScope
+	TypeInt32            = bsontype.Int32
+	TypeTimestamp        = bsontype.Timestamp
+	TypeInt64            = bsontype.Int64
+	TypeDecimal128       = bsontype.Decimal128
+	TypeMinKey           = bsontype.MinKey
+	TypeMaxKey           = bsontype.MaxKey
+)
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tBool = reflect.TypeOf(false)
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tD = reflect.TypeOf(D{})
+var tA = reflect.TypeOf(A{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRawValue = reflect.TypeOf(RawValue{})
+var tFloat32 = reflect.TypeOf(float32(0))
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt = reflect.TypeOf(int(0))
+var tInt8 = reflect.TypeOf(int8(0))
+var tInt16 = reflect.TypeOf(int16(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tRaw = reflect.TypeOf(Raw(nil))
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tString = reflect.TypeOf("")
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTime = reflect.TypeOf(time.Time{})
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tUint = reflect.TypeOf(uint(0))
+var tUint8 = reflect.TypeOf(uint8(0))
+var tUint16 = reflect.TypeOf(uint16(0))
+var tUint32 = reflect.TypeOf(uint32(0))
+var tUint64 = reflect.TypeOf(uint64(0))
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tEmptySlice = reflect.TypeOf([]interface{}(nil))
+
+var zeroVal reflect.Value
+
+// this references the quantity of milliseconds between zero time and
+// the unix epoch. useful for making sure that we convert time.Time
+// objects correctly to match the legacy bson library's handling of
+// time.Time values.
+const zeroEpochMs = int64(62135596800000)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go
new file mode 100644
index 0000000..2b3cca8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go
@@ -0,0 +1,101 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representaiton of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// Unmarshal parses the BSON-encoded data and stores the result in the value
+// pointed to by val. If val is nil or not a pointer, Unmarshal returns
+// InvalidUnmarshalError.
+func Unmarshal(data []byte, val interface{}) error {
+	return UnmarshalWithRegistry(DefaultRegistry, data, val)
+}
+
+// UnmarshalWithRegistry parses the BSON-encoded data using Registry r and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
+}
+
+// UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(dc, vr, val)
+}
+
+// UnmarshalExtJSON parses the extended JSON-encoded data and stores the result
+// in the value pointed to by val. If val is nil or not a pointer, Unmarshal
+// returns InvalidUnmarshalError.
+func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error {
+	return UnmarshalExtJSONWithRegistry(DefaultRegistry, data, canonical, val)
+}
+
+// UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using
+// Registry r and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, ejvr, val)
+}
+
+// UnmarshalExtJSONWithContext parses the extended JSON-encoded data using
+// DecodeContext dc and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(dc, ejvr, val)
+}
+
+func unmarshalFromReader(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val interface{}) error {
+	dec := decPool.Get().(*Decoder)
+	defer decPool.Put(dec)
+
+	err := dec.Reset(vr)
+	if err != nil {
+		return err
+	}
+	err = dec.SetContext(dc)
+	if err != nil {
+		return err
+	}
+
+	return dec.Decode(val)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl b/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl
new file mode 100755
index 0000000..bebc7db
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl
@@ -0,0 +1,99 @@
+#!/usr/bin/env perl
+use v5.10;
+use strict;
+use warnings;
+use utf8;
+use open qw/:std :utf8/;
+use File::Find qw/find/;
+
+my @license_files;
+
+find(
+    sub {
+        return unless lc($_) eq 'license';
+        push @license_files, [ $File::Find::dir, $File::Find::name ];
+    },
+    'vendor'
+);
+
+print forked_licenses();
+
+for my $entry (sort { $a->[0] cmp $b->[0] } @license_files) {
+    ( my $package_name = $entry->[0] ) =~ s{vendor/}{};
+    my $license_text = do { local ( @ARGV, $/ ) = $entry->[1]; <> };
+    $license_text =~ s/ +$//mg;
+    say "" x 70;
+    say "-" x 70;
+    say "License notice for $package_name";
+    say "-" x 70;
+    say "";
+    print $license_text;
+}
+
+# These licenses are the originals for forked code; they must
+# be included along with license from the vendor directory
+sub forked_licenses {
+    return <<'HERE';
+---------------------------------------------------------------------
+License notice for gopkg.in/mgo.v2/bson
+---------------------------------------------------------------------
+
+BSON library for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---------------------------------------------------------------------
+License notice for JSON and CSV code from github.com/golang/go
+---------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+HERE
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go b/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go
new file mode 100644
index 0000000..293fd40
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package event
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// CommandStartedEvent represents an event generated when a command is sent to a server.
+type CommandStartedEvent struct {
+	Command      bson.Raw
+	DatabaseName string
+	CommandName  string
+	RequestID    int64
+	ConnectionID string
+}
+
+// CommandFinishedEvent represents a generic command finishing.
+type CommandFinishedEvent struct {
+	DurationNanos int64
+	CommandName   string
+	RequestID     int64
+	ConnectionID  string
+}
+
+// CommandSucceededEvent represents an event generated when a command's execution succeeds.
+type CommandSucceededEvent struct {
+	CommandFinishedEvent
+	Reply bson.Raw
+}
+
+// CommandFailedEvent represents an event generated when a command's execution fails.
+type CommandFailedEvent struct {
+	CommandFinishedEvent
+	Failure string
+}
+
+// CommandMonitor represents a monitor that is triggered for different events.
+type CommandMonitor struct {
+	Started   func(context.Context, *CommandStartedEvent)
+	Succeeded func(context.Context, *CommandSucceededEvent)
+	Failed    func(context.Context, *CommandFailedEvent)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go b/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go
new file mode 100644
index 0000000..770cb33
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go
@@ -0,0 +1,74 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"errors"
+	"fmt"
+)
+
+// Implements the connection.Connection interface by reading and writing wire messages
+// to a channel
+type ChannelConn struct {
+	WriteErr error
+	Written  chan wiremessage.WireMessage
+	ReadResp chan wiremessage.WireMessage
+	ReadErr  chan error
+}
+
+func (c *ChannelConn) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	select {
+	case c.Written <- wm:
+	default:
+		c.WriteErr = errors.New("could not write wiremessage to written channel")
+	}
+	return c.WriteErr
+}
+
+func (c *ChannelConn) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	var wm wiremessage.WireMessage
+	var err error
+	select {
+	case wm = <-c.ReadResp:
+	case err = <-c.ReadErr:
+	case <-ctx.Done():
+	}
+	return wm, err
+}
+
+func (c *ChannelConn) Close() error {
+	return nil
+}
+
+func (c *ChannelConn) Expired() bool {
+	return false
+}
+
+func (c *ChannelConn) Alive() bool {
+	return true
+}
+
+func (c *ChannelConn) ID() string {
+	return "faked"
+}
+
+// Create a OP_REPLY wiremessage from a BSON document
+func MakeReply(doc bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := doc.MarshalBSON()
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("could not create document: %v", err))
+	}
+	return wiremessage.Reply{
+		NumberReturned: 1,
+		Documents:      []bson.Raw{rdr},
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/const.go b/vendor/github.com/mongodb/mongo-go-driver/internal/const.go
new file mode 100644
index 0000000..7100e31
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/const.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+// Version is the current version of the driver.
+var Version = "local build"
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/error.go b/vendor/github.com/mongodb/mongo-go-driver/internal/error.go
new file mode 100644
index 0000000..6a105af
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/error.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"fmt"
+)
+
+// WrappedError represents an error that contains another error.
+type WrappedError interface {
+	// Message gets the basic message of the error.
+	Message() string
+	// Inner gets the inner error if one exists.
+	Inner() error
+}
+
+// RolledUpErrorMessage gets a flattened error message.
+func RolledUpErrorMessage(err error) string {
+	if wrappedErr, ok := err.(WrappedError); ok {
+		inner := wrappedErr.Inner()
+		if inner != nil {
+			return fmt.Sprintf("%s: %s", wrappedErr.Message(), RolledUpErrorMessage(inner))
+		}
+
+		return wrappedErr.Message()
+	}
+
+	return err.Error()
+}
+
+//UnwrapError attempts to unwrap the error down to its root cause.
+func UnwrapError(err error) error {
+
+	switch tErr := err.(type) {
+	case WrappedError:
+		return UnwrapError(tErr.Inner())
+	case *multiError:
+		return UnwrapError(tErr.errors[0])
+	}
+
+	return err
+}
+
+// WrapError wraps an error with a message.
+func WrapError(inner error, message string) error {
+	return &wrappedError{message, inner}
+}
+
+// WrapErrorf wraps an error with a message.
+func WrapErrorf(inner error, format string, args ...interface{}) error {
+	return &wrappedError{fmt.Sprintf(format, args...), inner}
+}
+
+// MultiError combines multiple errors into a single error. If there are no errors,
+// nil is returned. If there is 1 error, it is returned. Otherwise, they are combined.
+func MultiError(errors ...error) error {
+
+	// remove nils from the error list
+	var nonNils []error
+	for _, e := range errors {
+		if e != nil {
+			nonNils = append(nonNils, e)
+		}
+	}
+
+	switch len(nonNils) {
+	case 0:
+		return nil
+	case 1:
+		return nonNils[0]
+	default:
+		return &multiError{
+			message: "multiple errors encountered",
+			errors:  nonNils,
+		}
+	}
+}
+
+type multiError struct {
+	message string
+	errors  []error
+}
+
+func (e *multiError) Message() string {
+	return e.message
+}
+
+func (e *multiError) Error() string {
+	result := e.message
+	for _, e := range e.errors {
+		result += fmt.Sprintf("\n  %s", e)
+	}
+	return result
+}
+
+func (e *multiError) Errors() []error {
+	return e.errors
+}
+
+type wrappedError struct {
+	message string
+	inner   error
+}
+
+func (e *wrappedError) Message() string {
+	return e.message
+}
+
+func (e *wrappedError) Error() string {
+	return RolledUpErrorMessage(e)
+}
+
+func (e *wrappedError) Inner() error {
+	return e.inner
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/results.go b/vendor/github.com/mongodb/mongo-go-driver/internal/results.go
new file mode 100644
index 0000000..7879ebe
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/results.go
@@ -0,0 +1,54 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"time"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// IsMasterResult is the result of executing this
+// ismaster command.
+type IsMasterResult struct {
+	Arbiters            []string          `bson:"arbiters,omitempty"`
+	ArbiterOnly         bool              `bson:"arbiterOnly,omitempty"`
+	ElectionID          primitive.ObjectID `bson:"electionId,omitempty"`
+	Hidden              bool              `bson:"hidden,omitempty"`
+	Hosts               []string          `bson:"hosts,omitempty"`
+	IsMaster            bool              `bson:"ismaster,omitempty"`
+	IsReplicaSet        bool              `bson:"isreplicaset,omitempty"`
+	LastWriteTimestamp  time.Time         `bson:"lastWriteDate,omitempty"`
+	MaxBSONObjectSize   uint32            `bson:"maxBsonObjectSize,omitempty"`
+	MaxMessageSizeBytes uint32            `bson:"maxMessageSizeBytes,omitempty"`
+	MaxWriteBatchSize   uint16            `bson:"maxWriteBatchSize,omitempty"`
+	Me                  string            `bson:"me,omitempty"`
+	MaxWireVersion      int32             `bson:"maxWireVersion,omitempty"`
+	MinWireVersion      int32             `bson:"minWireVersion,omitempty"`
+	Msg                 string            `bson:"msg,omitempty"`
+	OK                  int32             `bson:"ok"`
+	Passives            []string          `bson:"passives,omitempty"`
+	ReadOnly            bool              `bson:"readOnly,omitempty"`
+	Secondary           bool              `bson:"secondary,omitempty"`
+	SetName             string            `bson:"setName,omitempty"`
+	SetVersion          uint32            `bson:"setVersion,omitempty"`
+	Tags                map[string]string `bson:"tags,omitempty"`
+}
+
+// BuildInfoResult is the result of executing the
+// buildInfo command.
+type BuildInfoResult struct {
+	OK           bool    `bson:"ok"`
+	GitVersion   string  `bson:"gitVersion,omitempty"`
+	Version      string  `bson:"version,omitempty"`
+	VersionArray []uint8 `bson:"versionArray,omitempty"`
+}
+
+// GetLastErrorResult is the result of executing the
+// getLastError command.
+type GetLastErrorResult struct {
+	ConnectionID uint32 `bson:"connectionId"`
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go b/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go
new file mode 100644
index 0000000..792e531
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"context"
+	"errors"
+)
+
+// NewSemaphore creates a new semaphore.
+func NewSemaphore(slots uint64) *Semaphore {
+	ch := make(chan struct{}, slots)
+	for i := uint64(0); i < slots; i++ {
+		ch <- struct{}{}
+	}
+
+	return &Semaphore{
+		permits: ch,
+	}
+}
+
+// Semaphore is a synchronization primitive that controls access
+// to a common resource.
+type Semaphore struct {
+	permits chan struct{}
+}
+
+// Len gets the number of permits available.
+func (s *Semaphore) Len() uint64 {
+	return uint64(len(s.permits))
+}
+
+// Wait waits until a resource is available or until the context
+// is done.
+func (s *Semaphore) Wait(ctx context.Context) error {
+	select {
+	case <-s.permits:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+// Release releases a resource back into the pool.
+func (s *Semaphore) Release() error {
+	select {
+	case s.permits <- struct{}{}:
+	default:
+		return errors.New("internal.Semaphore.Release: attempt to release more resources than are available")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go
new file mode 100644
index 0000000..a7ce0b5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go
@@ -0,0 +1,30 @@
+package mongo
+
+import (
+	"context"
+)
+
+// batchCursor is the interface implemented by types that can provide batches of document results.
+// The Cursor type is built on top of this type.
+type batchCursor interface {
+	// ID returns the ID of the cursor.
+	ID() int64
+
+	// Next returns true if there is a batch available.
+	Next(context.Context) bool
+
+	// Batch appends the current batch of documents to dst. RequiredBytes can be used to determine
+	// the length of the current batch of documents.
+	//
+	// If there is no batch available, this method should do nothing.
+	Batch(dst []byte) []byte
+
+	// RequiredBytes returns the number of bytes required fo rthe current batch.
+	RequiredBytes() int
+
+	// Err returns the last error encountered.
+	Err() error
+
+	// Close closes the cursor.
+	Close(context.Context) error
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go
new file mode 100644
index 0000000..f086189
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go
@@ -0,0 +1,341 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+)
+
+// WriteModel is the interface satisfied by all models for bulk writes.
+type WriteModel interface {
+	convertModel() driver.WriteModel
+}
+
+// InsertOneModel is the write model for insert operations.
+type InsertOneModel struct {
+	Document interface{}
+}
+
+// NewInsertOneModel creates a new InsertOneModel.
+func NewInsertOneModel() *InsertOneModel {
+	return &InsertOneModel{}
+}
+
+// SetDocument sets the BSON document for the InsertOneModel.
+func (iom *InsertOneModel) SetDocument(doc interface{}) *InsertOneModel {
+	iom.Document = doc
+	return iom
+}
+
+func (iom *InsertOneModel) convertModel() driver.WriteModel {
+	return driver.InsertOneModel{
+		Document: iom.Document,
+	}
+}
+
+// DeleteOneModel is the write model for delete operations.
+type DeleteOneModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+// NewDeleteOneModel creates a new DeleteOneModel.
+func NewDeleteOneModel() *DeleteOneModel {
+	return &DeleteOneModel{}
+}
+
+// SetFilter sets the filter for the DeleteOneModel.
+func (dom *DeleteOneModel) SetFilter(filter interface{}) *DeleteOneModel {
+	dom.Filter = filter
+	return dom
+}
+
+// SetCollation sets the collation for the DeleteOneModel.
+func (dom *DeleteOneModel) SetCollation(collation *options.Collation) *DeleteOneModel {
+	dom.Collation = collation
+	return dom
+}
+
+func (dom *DeleteOneModel) convertModel() driver.WriteModel {
+	return driver.DeleteOneModel{
+		Collation: dom.Collation,
+		Filter:    dom.Filter,
+	}
+}
+
+// DeleteManyModel is the write model for deleteMany operations.
+type DeleteManyModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+// NewDeleteManyModel creates a new DeleteManyModel.
+func NewDeleteManyModel() *DeleteManyModel {
+	return &DeleteManyModel{}
+}
+
+// SetFilter sets the filter for the DeleteManyModel.
+func (dmm *DeleteManyModel) SetFilter(filter interface{}) *DeleteManyModel {
+	dmm.Filter = filter
+	return dmm
+}
+
+// SetCollation sets the collation for the DeleteManyModel.
+func (dmm *DeleteManyModel) SetCollation(collation *options.Collation) *DeleteManyModel {
+	dmm.Collation = collation
+	return dmm
+}
+
+func (dmm *DeleteManyModel) convertModel() driver.WriteModel {
+	return driver.DeleteManyModel{
+		Collation: dmm.Collation,
+		Filter:    dmm.Filter,
+	}
+}
+
+// ReplaceOneModel is the write model for replace operations.
+type ReplaceOneModel struct {
+	Collation   *options.Collation
+	Upsert      *bool
+	Filter      interface{}
+	Replacement interface{}
+}
+
+// NewReplaceOneModel creates a new ReplaceOneModel.
+func NewReplaceOneModel() *ReplaceOneModel {
+	return &ReplaceOneModel{}
+}
+
+// SetFilter sets the filter for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel {
+	rom.Filter = filter
+	return rom
+}
+
+// SetReplacement sets the replacement document for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel {
+	rom.Replacement = rep
+	return rom
+}
+
+// SetCollation sets the collation for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetCollation(collation *options.Collation) *ReplaceOneModel {
+	rom.Collation = collation
+	return rom
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (rom *ReplaceOneModel) SetUpsert(upsert bool) *ReplaceOneModel {
+	rom.Upsert = &upsert
+	return rom
+}
+
+func (rom *ReplaceOneModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: rom.Collation,
+	}
+	if rom.Upsert != nil {
+		um.Upsert = *rom.Upsert
+		um.UpsertSet = true
+	}
+
+	return driver.ReplaceOneModel{
+		UpdateModel: um,
+		Filter:      rom.Filter,
+		Replacement: rom.Replacement,
+	}
+}
+
+// UpdateOneModel is the write model for update operations.
+type UpdateOneModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+}
+
+// NewUpdateOneModel creates a new UpdateOneModel.
+func NewUpdateOneModel() *UpdateOneModel {
+	return &UpdateOneModel{}
+}
+
+// SetFilter sets the filter for the UpdateOneModel.
+func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel {
+	uom.Filter = filter
+	return uom
+}
+
+// SetUpdate sets the update document for the UpdateOneModel.
+func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel {
+	uom.Update = update
+	return uom
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
+func (uom *UpdateOneModel) SetArrayFilters(filters options.ArrayFilters) *UpdateOneModel {
+	uom.ArrayFilters = &filters
+	return uom
+}
+
+// SetCollation sets the collation for the UpdateOneModel.
+func (uom *UpdateOneModel) SetCollation(collation *options.Collation) *UpdateOneModel {
+	uom.Collation = collation
+	return uom
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (uom *UpdateOneModel) SetUpsert(upsert bool) *UpdateOneModel {
+	uom.Upsert = &upsert
+	return uom
+}
+
+func (uom *UpdateOneModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: uom.Collation,
+	}
+	if uom.Upsert != nil {
+		um.Upsert = *uom.Upsert
+		um.UpsertSet = true
+	}
+
+	converted := driver.UpdateOneModel{
+		UpdateModel: um,
+		Filter:      uom.Filter,
+		Update:      uom.Update,
+	}
+	if uom.ArrayFilters != nil {
+		converted.ArrayFilters = *uom.ArrayFilters
+		converted.ArrayFiltersSet = true
+	}
+
+	return converted
+}
+
+// UpdateManyModel is the write model for updateMany operations.
+type UpdateManyModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+}
+
+// NewUpdateManyModel creates a new UpdateManyModel.
+func NewUpdateManyModel() *UpdateManyModel {
+	return &UpdateManyModel{}
+}
+
+// SetFilter sets the filter for the UpdateManyModel.
+func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel {
+	umm.Filter = filter
+	return umm
+}
+
+// SetUpdate sets the update document for the UpdateManyModel.
+func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel {
+	umm.Update = update
+	return umm
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
+func (umm *UpdateManyModel) SetArrayFilters(filters options.ArrayFilters) *UpdateManyModel {
+	umm.ArrayFilters = &filters
+	return umm
+}
+
+// SetCollation sets the collation for the UpdateManyModel.
+func (umm *UpdateManyModel) SetCollation(collation *options.Collation) *UpdateManyModel {
+	umm.Collation = collation
+	return umm
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (umm *UpdateManyModel) SetUpsert(upsert bool) *UpdateManyModel {
+	umm.Upsert = &upsert
+	return umm
+}
+
+func (umm *UpdateManyModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: umm.Collation,
+	}
+	if umm.Upsert != nil {
+		um.Upsert = *umm.Upsert
+		um.UpsertSet = true
+	}
+
+	converted := driver.UpdateManyModel{
+		UpdateModel: um,
+		Filter:      umm.Filter,
+		Update:      umm.Update,
+	}
+	if umm.ArrayFilters != nil {
+		converted.ArrayFilters = *umm.ArrayFilters
+		converted.ArrayFiltersSet = true
+	}
+
+	return converted
+}
+
+func dispatchToMongoModel(model driver.WriteModel) WriteModel {
+	switch conv := model.(type) {
+	case driver.InsertOneModel:
+		return &InsertOneModel{
+			Document: conv.Document,
+		}
+	case driver.DeleteOneModel:
+		return &DeleteOneModel{
+			Filter:    conv.Filter,
+			Collation: conv.Collation,
+		}
+	case driver.DeleteManyModel:
+		return &DeleteManyModel{
+			Filter:    conv.Filter,
+			Collation: conv.Collation,
+		}
+	case driver.ReplaceOneModel:
+		rom := &ReplaceOneModel{
+			Filter:      conv.Filter,
+			Replacement: conv.Replacement,
+			Collation:   conv.Collation,
+		}
+		if conv.UpsertSet {
+			rom.Upsert = &conv.Upsert
+		}
+		return rom
+	case driver.UpdateOneModel:
+		uom := &UpdateOneModel{
+			Filter:    conv.Filter,
+			Update:    conv.Update,
+			Collation: conv.Collation,
+		}
+		if conv.UpsertSet {
+			uom.Upsert = &conv.Upsert
+		}
+		if conv.ArrayFiltersSet {
+			uom.ArrayFilters = &conv.ArrayFilters
+		}
+		return uom
+	case driver.UpdateManyModel:
+		umm := &UpdateManyModel{
+			Filter:    conv.Filter,
+			Update:    conv.Update,
+			Collation: conv.Collation,
+		}
+		if conv.UpsertSet {
+			umm.Upsert = &conv.Upsert
+		}
+		if conv.ArrayFiltersSet {
+			umm.ArrayFilters = &conv.ArrayFilters
+		}
+		return umm
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go
new file mode 100644
index 0000000..5330117
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go
@@ -0,0 +1,508 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const errorInterrupted int32 = 11601
+const errorCappedPositionLost int32 = 136
+const errorCursorKilled int32 = 237
+
+// ErrMissingResumeToken indicates that a change stream notification from the server did not
+// contain a resume token.
+var ErrMissingResumeToken = errors.New("cannot provide resume functionality when the resume token is missing")
+
+// ErrNilCursor indicates that the cursor for the change stream is nil.
+var ErrNilCursor = errors.New("cursor is nil")
+
+// ChangeStream instances iterate a stream of change documents. Each document can be decoded via the
+// Decode method. Resume tokens should be retrieved via the ResumeToken method and can be stored to
+// resume the change stream at a specific point in time.
+//
+// A typical usage of the ChangeStream type would be:
+type ChangeStream struct {
+	// Current is the BSON bytes of the current change document. This property is only valid until
+	// the next call to Next or Close. If continued access is required to the bson.Raw, you must
+	// make a copy of it.
+	Current bson.Raw
+
+	cmd        bsonx.Doc // aggregate command to run to create stream and rebuild cursor
+	pipeline   bsonx.Arr
+	options    *options.ChangeStreamOptions
+	coll       *Collection
+	db         *Database
+	ns         command.Namespace
+	cursor     *Cursor
+	cursorOpts bsonx.Doc
+
+	resumeToken bsonx.Doc
+	err         error
+	streamType  StreamType
+	client      *Client
+	sess        Session
+	readPref    *readpref.ReadPref
+	readConcern *readconcern.ReadConcern
+	registry    *bsoncodec.Registry
+}
+
+func (cs *ChangeStream) replaceOptions(desc description.SelectedServer) {
+	// if cs has not received any changes and resumeAfter not specified and max wire version >= 7, run known agg cmd
+	// with startAtOperationTime set to startAtOperationTime provided by user or saved from initial agg
+	// must not send resumeAfter key
+
+	// else: run known agg cmd with resumeAfter set to last known resumeToken
+	// must not set startAtOperationTime (remove if originally in cmd)
+
+	if cs.options.ResumeAfter == nil && desc.WireVersion.Max >= 7 && cs.resumeToken == nil {
+		cs.options.SetStartAtOperationTime(cs.sess.OperationTime())
+	} else {
+		if cs.resumeToken == nil {
+			return // restart stream without the resume token
+		}
+
+		cs.options.SetResumeAfter(cs.resumeToken)
+		// remove startAtOperationTime
+		cs.options.SetStartAtOperationTime(nil)
+	}
+}
+
+// Create options docs for the pipeline and cursor
+func createCmdDocs(csType StreamType, opts *options.ChangeStreamOptions, registry *bsoncodec.Registry) (bsonx.Doc,
+	bsonx.Doc, bsonx.Doc, error) {
+
+	pipelineDoc := bsonx.Doc{}
+	cursorDoc := bsonx.Doc{}
+	optsDoc := bsonx.Doc{}
+
+	if csType == ClientStream {
+		pipelineDoc = pipelineDoc.Append("allChangesForCluster", bsonx.Boolean(true))
+	}
+
+	if opts.BatchSize != nil {
+		cursorDoc = cursorDoc.Append("batchSize", bsonx.Int32(*opts.BatchSize))
+	}
+	if opts.Collation != nil {
+		optsDoc = optsDoc.Append("collation", bsonx.Document(opts.Collation.ToDocument()))
+	}
+	if opts.FullDocument != nil {
+		pipelineDoc = pipelineDoc.Append("fullDocument", bsonx.String(string(*opts.FullDocument)))
+	}
+	if opts.MaxAwaitTime != nil {
+		ms := int64(time.Duration(*opts.MaxAwaitTime) / time.Millisecond)
+		pipelineDoc = pipelineDoc.Append("maxAwaitTimeMS", bsonx.Int64(ms))
+	}
+	if opts.ResumeAfter != nil {
+		rt, err := transformDocument(registry, opts.ResumeAfter)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+
+		pipelineDoc = pipelineDoc.Append("resumeAfter", bsonx.Document(rt))
+	}
+	if opts.StartAtOperationTime != nil {
+		pipelineDoc = pipelineDoc.Append("startAtOperationTime",
+			bsonx.Timestamp(opts.StartAtOperationTime.T, opts.StartAtOperationTime.I))
+	}
+
+	return pipelineDoc, cursorDoc, optsDoc, nil
+}
+
+func getSession(ctx context.Context, client *Client) (Session, error) {
+	sess := sessionFromContext(ctx)
+	if err := client.ValidSession(sess); err != nil {
+		return nil, err
+	}
+
+	var mongoSess Session
+	if sess != nil {
+		mongoSess = &sessionImpl{
+			Client: sess,
+		}
+	} else {
+		// create implicit session because it will be needed
+		newSess, err := session.NewClientSession(client.topology.SessionPool, client.id, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+
+		mongoSess = &sessionImpl{
+			Client: newSess,
+		}
+	}
+
+	return mongoSess, nil
+}
+
+func parseOptions(csType StreamType, opts *options.ChangeStreamOptions, registry *bsoncodec.Registry) (bsonx.Doc,
+	bsonx.Doc, bsonx.Doc, error) {
+
+	if opts.FullDocument == nil {
+		opts = opts.SetFullDocument(options.Default)
+	}
+
+	pipelineDoc, cursorDoc, optsDoc, err := createCmdDocs(csType, opts, registry)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+
+	return pipelineDoc, cursorDoc, optsDoc, nil
+}
+
+func (cs *ChangeStream) runCommand(ctx context.Context, replaceOptions bool) error {
+	ss, err := cs.client.topology.SelectServer(ctx, cs.db.writeSelector)
+	if err != nil {
+		return err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+
+	if replaceOptions {
+		cs.replaceOptions(desc)
+		optionsDoc, _, _, err := createCmdDocs(cs.streamType, cs.options, cs.registry)
+		if err != nil {
+			return err
+		}
+
+		changeStreamDoc := bsonx.Doc{
+			{"$changeStream", bsonx.Document(optionsDoc)},
+		}
+		cs.pipeline[0] = bsonx.Document(changeStreamDoc)
+		cs.cmd.Set("pipeline", bsonx.Array(cs.pipeline))
+	}
+
+	readCmd := command.Read{
+		DB:          cs.db.name,
+		Command:     cs.cmd,
+		Session:     cs.sess.(*sessionImpl).Client,
+		Clock:       cs.client.clock,
+		ReadPref:    cs.readPref,
+		ReadConcern: cs.readConcern,
+	}
+
+	rdr, err := readCmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+
+	batchCursor, err := driver.NewBatchCursor(bsoncore.Document(rdr), readCmd.Session, readCmd.Clock, ss.Server)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+	cursor, err := newCursor(batchCursor, cs.registry)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+	cs.cursor = cursor
+
+	cursorValue, err := rdr.LookupErr("cursor")
+	if err != nil {
+		return err
+	}
+	cursorDoc := cursorValue.Document()
+	cs.ns = command.ParseNamespace(cursorDoc.Lookup("ns").StringValue())
+
+	return nil
+}
+
+func newChangeStream(ctx context.Context, coll *Collection, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(coll.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(CollectionStream, csOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, coll.client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.String(coll.name)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      coll.client,
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		coll:        coll,
+		db:          coll.db,
+		streamType:  CollectionStream,
+		readPref:    coll.readPreference,
+		readConcern: coll.readConcern,
+		options:     csOpts,
+		registry:    coll.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func newDbChangeStream(ctx context.Context, db *Database, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(db.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(DatabaseStream, csOpts, db.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, db.client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.Int32(1)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      db.client,
+		db:          db,
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		streamType:  DatabaseStream,
+		readPref:    db.readPreference,
+		readConcern: db.readConcern,
+		options:     csOpts,
+		registry:    db.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func newClientChangeStream(ctx context.Context, client *Client, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(client.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(ClientStream, csOpts, client.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.Int32(1)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      client,
+		db:          client.Database("admin"),
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		streamType:  ClientStream,
+		readPref:    client.readPreference,
+		readConcern: client.readConcern,
+		options:     csOpts,
+		registry:    client.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func (cs *ChangeStream) storeResumeToken() error {
+	idVal, err := cs.cursor.Current.LookupErr("_id")
+	if err != nil {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+
+	var idDoc bson.Raw
+	idDoc, ok := idVal.DocumentOK()
+	if !ok {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+	tokenDoc, err := bsonx.ReadDoc(idDoc)
+	if err != nil {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+
+	cs.resumeToken = tokenDoc
+	return nil
+}
+
+// ID returns the cursor ID for this change stream.
+func (cs *ChangeStream) ID() int64 {
+	if cs.cursor == nil {
+		return 0
+	}
+
+	return cs.cursor.ID()
+}
+
+// Next gets the next result from this change stream. Returns true if there were no errors and the next
+// result is available for decoding.
+func (cs *ChangeStream) Next(ctx context.Context) bool {
+	// execute in a loop to retry resume-able errors and advance the underlying cursor
+	for {
+		if cs.cursor == nil {
+			return false
+		}
+
+		if cs.cursor.Next(ctx) {
+			err := cs.storeResumeToken()
+			if err != nil {
+				cs.err = err
+				return false
+			}
+
+			cs.Current = cs.cursor.Current
+			return true
+		}
+
+		err := cs.cursor.Err()
+		if err == nil {
+			return false
+		}
+
+		switch t := err.(type) {
+		case command.Error:
+			if t.Code == errorInterrupted || t.Code == errorCappedPositionLost || t.Code == errorCursorKilled {
+				return false
+			}
+		}
+
+		killCursors := command.KillCursors{
+			NS:  cs.ns,
+			IDs: []int64{cs.ID()},
+		}
+
+		_, _ = driver.KillCursors(ctx, killCursors, cs.client.topology, cs.db.writeSelector)
+		cs.err = cs.runCommand(ctx, true)
+		if cs.err != nil {
+			return false
+		}
+	}
+}
+
+// Decode will decode the current document into val.
+func (cs *ChangeStream) Decode(out interface{}) error {
+	if cs.cursor == nil {
+		return ErrNilCursor
+	}
+
+	return bson.UnmarshalWithRegistry(cs.registry, cs.Current, out)
+}
+
+// Err returns the current error.
+func (cs *ChangeStream) Err() error {
+	if cs.err != nil {
+		return cs.err
+	}
+	if cs.cursor == nil {
+		return nil
+	}
+
+	return cs.cursor.Err()
+}
+
+// Close closes this cursor.
+func (cs *ChangeStream) Close(ctx context.Context) error {
+	if cs.cursor == nil {
+		return nil // cursor is already closed
+	}
+
+	return cs.cursor.Close(ctx)
+}
+
+// StreamType represents the type of a change stream.
+type StreamType uint8
+
+// These constants represent valid change stream types. A change stream can be initialized over a collection, all
+// collections in a database, or over a whole client.
+const (
+	CollectionStream StreamType = iota
+	DatabaseStream
+	ClientStream
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go
new file mode 100644
index 0000000..7984bc0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go
@@ -0,0 +1,454 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/tag"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const defaultLocalThreshold = 15 * time.Millisecond
+
+// Client performs operations on a given topology.
+type Client struct {
+	id              uuid.UUID
+	topologyOptions []topology.Option
+	topology        *topology.Topology
+	connString      connstring.ConnString
+	localThreshold  time.Duration
+	retryWrites     bool
+	clock           *session.ClusterClock
+	readPreference  *readpref.ReadPref
+	readConcern     *readconcern.ReadConcern
+	writeConcern    *writeconcern.WriteConcern
+	registry        *bsoncodec.Registry
+	marshaller      BSONAppender
+}
+
+// Connect creates a new Client and then initializes it using the Connect method.
+func Connect(ctx context.Context, uri string, opts ...*options.ClientOptions) (*Client, error) {
+	c, err := NewClientWithOptions(uri, opts...)
+	if err != nil {
+		return nil, err
+	}
+	err = c.Connect(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// NewClient creates a new client to connect to a cluster specified by the uri.
+func NewClient(uri string) (*Client, error) {
+	cs, err := connstring.Parse(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	return newClient(cs)
+}
+
+// NewClientWithOptions creates a new client to connect to to a cluster specified by the connection
+// string and the options manually passed in. If the same option is configured in both the
+// connection string and the manual options, the manual option will be ignored.
+func NewClientWithOptions(uri string, opts ...*options.ClientOptions) (*Client, error) {
+	cs, err := connstring.Parse(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	return newClient(cs, opts...)
+}
+
+// Connect initializes the Client by starting background monitoring goroutines.
+// This method must be called before a Client can be used.
+func (c *Client) Connect(ctx context.Context) error {
+	err := c.topology.Connect(ctx)
+	if err != nil {
+		return replaceTopologyErr(err)
+	}
+
+	return nil
+
+}
+
+// Disconnect closes sockets to the topology referenced by this Client. It will
+// shut down any monitoring goroutines, close the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and closed before returning. If the context expires via cancellation,
+// deadline, or timeout before the in use connections have returned, the in use
+// connections will be closed, resulting in the failure of any in flight read
+// or write operations. If this method returns with no errors, all connections
+// associated with this Client have been closed.
+func (c *Client) Disconnect(ctx context.Context) error {
+	c.endSessions(ctx)
+	return replaceTopologyErr(c.topology.Disconnect(ctx))
+}
+
+// Ping verifies that the client can connect to the topology.
+// If readPreference is nil then will use the client's default read
+// preference.
+func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if rp == nil {
+		rp = c.readPreference
+	}
+
+	_, err := c.topology.SelectServer(ctx, description.ReadPrefSelector(rp))
+	return replaceTopologyErr(err)
+}
+
+// StartSession starts a new session.
+func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) {
+	if c.topology.SessionPool == nil {
+		return nil, ErrClientDisconnected
+	}
+
+	sopts := options.MergeSessionOptions(opts...)
+	coreOpts := &session.ClientOptions{
+		DefaultReadConcern:    c.readConcern,
+		DefaultReadPreference: c.readPreference,
+		DefaultWriteConcern:   c.writeConcern,
+	}
+	if sopts.CausalConsistency != nil {
+		coreOpts.CausalConsistency = sopts.CausalConsistency
+	}
+	if sopts.DefaultReadConcern != nil {
+		coreOpts.DefaultReadConcern = sopts.DefaultReadConcern
+	}
+	if sopts.DefaultWriteConcern != nil {
+		coreOpts.DefaultWriteConcern = sopts.DefaultWriteConcern
+	}
+	if sopts.DefaultReadPreference != nil {
+		coreOpts.DefaultReadPreference = sopts.DefaultReadPreference
+	}
+
+	sess, err := session.NewClientSession(c.topology.SessionPool, c.id, session.Explicit, coreOpts)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	sess.RetryWrite = c.retryWrites
+
+	return &sessionImpl{
+		Client: sess,
+		topo:   c.topology,
+	}, nil
+}
+
+func (c *Client) endSessions(ctx context.Context) {
+	if c.topology.SessionPool == nil {
+		return
+	}
+	cmd := command.EndSessions{
+		Clock:      c.clock,
+		SessionIDs: c.topology.SessionPool.IDSlice(),
+	}
+
+	_, _ = driver.EndSessions(ctx, cmd, c.topology, description.ReadPrefSelector(readpref.PrimaryPreferred()))
+}
+
+func newClient(cs connstring.ConnString, opts ...*options.ClientOptions) (*Client, error) {
+	clientOpt := options.MergeClientOptions(cs, opts...)
+
+	client := &Client{
+		topologyOptions: clientOpt.TopologyOptions,
+		connString:      clientOpt.ConnString,
+		localThreshold:  defaultLocalThreshold,
+		readPreference:  clientOpt.ReadPreference,
+		readConcern:     clientOpt.ReadConcern,
+		writeConcern:    clientOpt.WriteConcern,
+		registry:        clientOpt.Registry,
+	}
+
+	if client.connString.RetryWritesSet {
+		client.retryWrites = client.connString.RetryWrites
+	}
+	if clientOpt.RetryWrites != nil {
+		client.retryWrites = *clientOpt.RetryWrites
+	}
+
+	clientID, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+	client.id = clientID
+
+	topts := append(
+		client.topologyOptions,
+		topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return client.connString }),
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(opts, topology.WithClock(func(clock *session.ClusterClock) *session.ClusterClock {
+				return client.clock
+			}), topology.WithRegistry(func(registry *bsoncodec.Registry) *bsoncodec.Registry {
+				return client.registry
+			}))
+		}),
+	)
+	topo, err := topology.New(topts...)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+	client.topology = topo
+	client.clock = &session.ClusterClock{}
+
+	if client.readConcern == nil {
+		client.readConcern = readConcernFromConnString(&client.connString)
+
+		if client.readConcern == nil {
+			// no read concern in conn string
+			client.readConcern = readconcern.New()
+		}
+	}
+
+	if client.writeConcern == nil {
+		client.writeConcern = writeConcernFromConnString(&client.connString)
+	}
+	if client.readPreference == nil {
+		rp, err := readPreferenceFromConnString(&client.connString)
+		if err != nil {
+			return nil, err
+		}
+		if rp != nil {
+			client.readPreference = rp
+		} else {
+			client.readPreference = readpref.Primary()
+		}
+	}
+
+	if client.registry == nil {
+		client.registry = bson.DefaultRegistry
+	}
+	return client, nil
+}
+
+func readConcernFromConnString(cs *connstring.ConnString) *readconcern.ReadConcern {
+	if len(cs.ReadConcernLevel) == 0 {
+		return nil
+	}
+
+	rc := &readconcern.ReadConcern{}
+	readconcern.Level(cs.ReadConcernLevel)(rc)
+
+	return rc
+}
+
+func writeConcernFromConnString(cs *connstring.ConnString) *writeconcern.WriteConcern {
+	var wc *writeconcern.WriteConcern
+
+	if len(cs.WString) > 0 {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.WTagSet(cs.WString)(wc)
+	} else if cs.WNumberSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.W(cs.WNumber)(wc)
+	}
+
+	if cs.JSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.J(cs.J)(wc)
+	}
+
+	if cs.WTimeoutSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.WTimeout(cs.WTimeout)(wc)
+	}
+
+	return wc
+}
+
+func readPreferenceFromConnString(cs *connstring.ConnString) (*readpref.ReadPref, error) {
+	var rp *readpref.ReadPref
+	var err error
+	options := make([]readpref.Option, 0, 1)
+
+	tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets)
+	if len(tagSets) > 0 {
+		options = append(options, readpref.WithTagSets(tagSets...))
+	}
+
+	if cs.MaxStaleness != 0 {
+		options = append(options, readpref.WithMaxStaleness(cs.MaxStaleness))
+	}
+
+	if len(cs.ReadPreference) > 0 {
+		if rp == nil {
+			mode, _ := readpref.ModeFromString(cs.ReadPreference)
+			rp, err = readpref.New(mode, options...)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return rp, nil
+}
+
+// ValidSession returns an error if the session doesn't belong to the client
+func (c *Client) ValidSession(sess *session.Client) error {
+	if sess != nil && !uuid.Equal(sess.ClientID, c.id) {
+		return ErrWrongClient
+	}
+	return nil
+}
+
+// Database returns a handle for a given database.
+func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Database {
+	return newDatabase(c, name, opts...)
+}
+
+// ConnectionString returns the connection string of the cluster the client is connected to.
+func (c *Client) ConnectionString() string {
+	return c.connString.Original
+}
+
+// ListDatabases returns a ListDatabasesResult.
+func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := c.ValidSession(sess)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	f, err := transformDocument(c.registry, filter)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	cmd := command.ListDatabases{
+		Filter:  f,
+		Session: sess,
+		Clock:   c.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(c.localThreshold),
+	})
+	res, err := driver.ListDatabases(
+		ctx, cmd,
+		c.topology,
+		readSelector,
+		c.id,
+		c.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return ListDatabasesResult{}, replaceTopologyErr(err)
+	}
+
+	return (ListDatabasesResult{}).fromResult(res), nil
+}
+
+// ListDatabaseNames returns a slice containing the names of all of the databases on the server.
+func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) {
+	opts = append(opts, options.ListDatabases().SetNameOnly(true))
+
+	res, err := c.ListDatabases(ctx, filter, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	names := make([]string, 0)
+	for _, spec := range res.Databases {
+		names = append(names, spec.Name)
+	}
+
+	return names, nil
+}
+
+// WithSession allows a user to start a session themselves and manage
+// its lifetime. The only way to provide a session to a CRUD method is
+// to invoke that CRUD method with the mongo.SessionContext within the
+// closure. The mongo.SessionContext can be used as a regular context,
+// so methods like context.WithDeadline and context.WithTimeout are
+// supported.
+//
+// If the context.Context already has a mongo.Session attached, that
+// mongo.Session will be replaced with the one provided.
+//
+// Errors returned from the closure are transparently returned from
+// this function.
+func WithSession(ctx context.Context, sess Session, fn func(SessionContext) error) error {
+	return fn(contextWithSession(ctx, sess))
+}
+
+// UseSession creates a default session, that is only valid for the
+// lifetime of the closure. No cleanup outside of closing the session
+// is done upon exiting the closure. This means that an outstanding
+// transaction will be aborted, even if the closure returns an error.
+//
+// If ctx already contains a mongo.Session, that mongo.Session will be
+// replaced with the newly created mongo.Session.
+//
+// Errors returned from the closure are transparently returned from
+// this method.
+func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) error {
+	return c.UseSessionWithOptions(ctx, options.Session(), fn)
+}
+
+// UseSessionWithOptions works like UseSession but allows the caller
+// to specify the options used to create the session.
+func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error {
+	defaultSess, err := c.StartSession(opts)
+	if err != nil {
+		return err
+	}
+
+	defer defaultSess.EndSession(ctx)
+
+	sessCtx := sessionContext{
+		Context: context.WithValue(ctx, sessionKey{}, defaultSess),
+		Session: defaultSess,
+	}
+
+	return fn(sessCtx)
+}
+
+// Watch returns a change stream cursor used to receive information of changes to the client. This method is preferred
+// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
+// The client must have read concern majority or no read concern for a change stream to be created successfully.
+func (c *Client) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	return newClientChangeStream(ctx, c, pipeline, opts...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go
new file mode 100644
index 0000000..fb16775
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go
@@ -0,0 +1,1298 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Collection performs operations on a given collection.
+type Collection struct {
+	client         *Client
+	db             *Database
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	registry       *bsoncodec.Registry
+}
+
+func newCollection(db *Database, name string, opts ...*options.CollectionOptions) *Collection {
+	collOpt := options.MergeCollectionOptions(opts...)
+
+	rc := db.readConcern
+	if collOpt.ReadConcern != nil {
+		rc = collOpt.ReadConcern
+	}
+
+	wc := db.writeConcern
+	if collOpt.WriteConcern != nil {
+		wc = collOpt.WriteConcern
+	}
+
+	rp := db.readPreference
+	if collOpt.ReadPreference != nil {
+		rp = collOpt.ReadPreference
+	}
+
+	reg := db.registry
+	if collOpt.Registry != nil {
+		reg = collOpt.Registry
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(rp),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	writeSelector := description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	coll := &Collection{
+		client:         db.client,
+		db:             db,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		readSelector:   readSelector,
+		writeSelector:  writeSelector,
+		registry:       reg,
+	}
+
+	return coll
+}
+
+func (coll *Collection) copy() *Collection {
+	return &Collection{
+		client:         coll.client,
+		db:             coll.db,
+		name:           coll.name,
+		readConcern:    coll.readConcern,
+		writeConcern:   coll.writeConcern,
+		readPreference: coll.readPreference,
+		readSelector:   coll.readSelector,
+		writeSelector:  coll.writeSelector,
+		registry:       coll.registry,
+	}
+}
+
+// Clone creates a copy of this collection with updated options, if any are given.
+func (coll *Collection) Clone(opts ...*options.CollectionOptions) (*Collection, error) {
+	copyColl := coll.copy()
+	optsColl := options.MergeCollectionOptions(opts...)
+
+	if optsColl.ReadConcern != nil {
+		copyColl.readConcern = optsColl.ReadConcern
+	}
+
+	if optsColl.WriteConcern != nil {
+		copyColl.writeConcern = optsColl.WriteConcern
+	}
+
+	if optsColl.ReadPreference != nil {
+		copyColl.readPreference = optsColl.ReadPreference
+	}
+
+	if optsColl.Registry != nil {
+		copyColl.registry = optsColl.Registry
+	}
+
+	copyColl.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(copyColl.readPreference),
+		description.LatencySelector(copyColl.client.localThreshold),
+	})
+
+	return copyColl, nil
+}
+
+// Name provides access to the name of the collection.
+func (coll *Collection) Name() string {
+	return coll.name
+}
+
+// namespace returns the namespace of the collection.
+func (coll *Collection) namespace() command.Namespace {
+	return command.NewNamespace(coll.db.name, coll.name)
+}
+
+// Database provides access to the database that contains the collection.
+func (coll *Collection) Database() *Database {
+	return coll.db
+}
+
+// BulkWrite performs a bulk write operation.
+//
+// See https://docs.mongodb.com/manual/core/bulk-write-operations/.
+func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel,
+	opts ...*options.BulkWriteOptions) (*BulkWriteResult, error) {
+
+	if len(models) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	dispatchModels := make([]driver.WriteModel, len(models))
+	for i, model := range models {
+		if model == nil {
+			return nil, ErrNilDocument
+		}
+		dispatchModels[i] = model.convertModel()
+	}
+
+	res, err := driver.BulkWrite(
+		ctx,
+		coll.namespace(),
+		dispatchModels,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		sess,
+		coll.writeConcern,
+		coll.client.clock,
+		coll.registry,
+		opts...,
+	)
+
+	if err != nil {
+		if conv, ok := err.(driver.BulkWriteException); ok {
+			return &BulkWriteResult{}, BulkWriteException{
+				WriteConcernError: convertWriteConcernError(conv.WriteConcernError),
+				WriteErrors:       convertBulkWriteErrors(conv.WriteErrors),
+			}
+		}
+
+		return &BulkWriteResult{}, replaceTopologyErr(err)
+	}
+
+	return &BulkWriteResult{
+		InsertedCount: res.InsertedCount,
+		MatchedCount:  res.MatchedCount,
+		ModifiedCount: res.ModifiedCount,
+		DeletedCount:  res.DeletedCount,
+		UpsertedCount: res.UpsertedCount,
+		UpsertedIDs:   res.UpsertedIDs,
+	}, nil
+}
+
+// InsertOne inserts a single document into the collection.
+func (coll *Collection) InsertOne(ctx context.Context, document interface{},
+	opts ...*options.InsertOneOptions) (*InsertOneResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	doc, insertedID, err := transformAndEnsureID(coll.registry, document)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+	oldns := coll.namespace()
+	cmd := command.Insert{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         []bsonx.Doc{doc},
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	// convert to InsertManyOptions so these can be argued to dispatch.Insert
+	insertOpts := make([]*options.InsertManyOptions, len(opts))
+	for i, opt := range opts {
+		insertOpts[i] = options.InsertMany()
+		insertOpts[i].BypassDocumentValidation = opt.BypassDocumentValidation
+	}
+
+	res, err := driver.Insert(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		insertOpts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+
+	return &InsertOneResult{InsertedID: insertedID}, err
+}
+
+// InsertMany inserts the provided documents.
+func (coll *Collection) InsertMany(ctx context.Context, documents []interface{},
+	opts ...*options.InsertManyOptions) (*InsertManyResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if len(documents) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	result := make([]interface{}, len(documents))
+	docs := make([]bsonx.Doc, len(documents))
+
+	for i, doc := range documents {
+		if doc == nil {
+			return nil, ErrNilDocument
+		}
+		bdoc, insertedID, err := transformAndEnsureID(coll.registry, doc)
+		if err != nil {
+			return nil, err
+		}
+
+		docs[i] = bdoc
+		result[i] = insertedID
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Insert{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         docs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Insert(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+
+	switch err {
+	case nil:
+	case command.ErrUnacknowledgedWrite:
+		return &InsertManyResult{InsertedIDs: result}, ErrUnacknowledgedWrite
+	default:
+		return nil, replaceTopologyErr(err)
+	}
+	if len(res.WriteErrors) > 0 || res.WriteConcernError != nil {
+		bwErrors := make([]BulkWriteError, 0, len(res.WriteErrors))
+		for _, we := range res.WriteErrors {
+			bwErrors = append(bwErrors, BulkWriteError{
+				WriteError{
+					Index:   we.Index,
+					Code:    we.Code,
+					Message: we.ErrMsg,
+				},
+				nil,
+			})
+		}
+
+		err = BulkWriteException{
+			WriteErrors:       bwErrors,
+			WriteConcernError: convertWriteConcernError(res.WriteConcernError),
+		}
+	}
+
+	return &InsertManyResult{InsertedIDs: result}, err
+}
+
+// DeleteOne deletes a single document from the collection.
+func (coll *Collection) DeleteOne(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+	deleteDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(f)},
+			{"limit", bsonx.Int32(1)},
+		},
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Delete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Deletes:      deleteDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Delete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+	return &DeleteResult{DeletedCount: int64(res.N)}, err
+}
+
+// DeleteMany deletes multiple documents from the collection.
+func (coll *Collection) DeleteMany(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+	deleteDocs := []bsonx.Doc{{{"q", bsonx.Document(f)}, {"limit", bsonx.Int32(0)}}}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Delete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Deletes:      deleteDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Delete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		false,
+		opts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrMany == 0 {
+		return nil, err
+	}
+	return &DeleteResult{DeletedCount: int64(res.N)}, err
+}
+
+func (coll *Collection) updateOrReplaceOne(ctx context.Context, filter,
+	update bsonx.Doc, sess *session.Client, opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	// TODO: should session be taken from ctx or left as argument?
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	updateDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(filter)},
+			{"u", bsonx.Document(update)},
+			{"multi", bsonx.Boolean(false)},
+		},
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Update{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         updateDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	r, err := driver.Update(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+	if err != nil && err != command.ErrUnacknowledgedWrite {
+		return nil, replaceTopologyErr(err)
+	}
+
+	res := &UpdateResult{
+		MatchedCount:  r.MatchedCount,
+		ModifiedCount: r.ModifiedCount,
+		UpsertedCount: int64(len(r.Upserted)),
+	}
+	if len(r.Upserted) > 0 {
+		res.UpsertedID = r.Upserted[0].ID
+		res.MatchedCount--
+	}
+
+	rr, err := processWriteError(r.WriteConcernError, r.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+	return res, err
+}
+
+// UpdateOne updates a single document in the collection.
+func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := ensureDollarKey(u); err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	return coll.updateOrReplaceOne(ctx, f, u, sess, opts...)
+}
+
+// UpdateMany updates multiple documents in the collection.
+func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = ensureDollarKey(u); err != nil {
+		return nil, err
+	}
+
+	updateDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(f)},
+			{"u", bsonx.Document(u)},
+			{"multi", bsonx.Boolean(true)},
+		},
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Update{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         updateDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	r, err := driver.Update(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		false,
+		opts...,
+	)
+	if err != nil && err != command.ErrUnacknowledgedWrite {
+		return nil, replaceTopologyErr(err)
+	}
+	res := &UpdateResult{
+		MatchedCount:  r.MatchedCount,
+		ModifiedCount: r.ModifiedCount,
+		UpsertedCount: int64(len(r.Upserted)),
+	}
+	// TODO(skriptble): Is this correct? Do we only return the first upserted ID for an UpdateMany?
+	if len(r.Upserted) > 0 {
+		res.UpsertedID = r.Upserted[0].ID
+		res.MatchedCount--
+	}
+
+	rr, err := processWriteError(r.WriteConcernError, r.WriteErrors, err)
+	if rr&rrMany == 0 {
+		return nil, err
+	}
+	return res, err
+}
+
+// ReplaceOne replaces a single document in the collection.
+func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	r, err := transformDocument(coll.registry, replacement)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(r) > 0 && strings.HasPrefix(r[0].Key, "$") {
+		return nil, errors.New("replacement document cannot contains keys beginning with '$")
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	updateOptions := make([]*options.UpdateOptions, 0, len(opts))
+	for _, opt := range opts {
+		uOpts := options.Update()
+		uOpts.BypassDocumentValidation = opt.BypassDocumentValidation
+		uOpts.Collation = opt.Collation
+		uOpts.Upsert = opt.Upsert
+		updateOptions = append(updateOptions, uOpts)
+	}
+
+	return coll.updateOrReplaceOne(ctx, f, r, sess, updateOptions...)
+}
+
+// Aggregate runs an aggregation framework pipeline.
+//
+// See https://docs.mongodb.com/manual/aggregation/.
+func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{},
+	opts ...*options.AggregateOptions) (*Cursor, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	pipelineArr, err := transformAggregatePipeline(coll.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	aggOpts := options.MergeAggregateOptions(opts...)
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Aggregate{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Pipeline:     pipelineArr,
+		ReadPref:     coll.readPreference,
+		WriteConcern: wc,
+		ReadConcern:  rc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	batchCursor, err := driver.Aggregate(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		aggOpts,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// Count gets the number of documents matching the filter.
+func (coll *Collection) Count(ctx context.Context, filter interface{},
+	opts ...*options.CountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return 0, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Count{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:       f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	count, err := driver.Count(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		opts...,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// CountDocuments gets the number of documents matching the filter.
+func (coll *Collection) CountDocuments(ctx context.Context, filter interface{},
+	opts ...*options.CountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	pipelineArr, err := countDocumentsAggregatePipeline(coll.registry, filter, countOpts)
+	if err != nil {
+		return 0, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.CountDocuments{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Pipeline:    pipelineArr,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	count, err := driver.CountDocuments(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		countOpts,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// EstimatedDocumentCount gets an estimate of the count of documents in a collection using collection metadata.
+func (coll *Collection) EstimatedDocumentCount(ctx context.Context,
+	opts ...*options.EstimatedDocumentCountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Count{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:       bsonx.Doc{},
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	countOpts := options.Count()
+	if len(opts) >= 1 {
+		countOpts = countOpts.SetMaxTime(*opts[len(opts)-1].MaxTime)
+	}
+
+	count, err := driver.Count(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		countOpts,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// Distinct finds the distinct values for a specified field across a single
+// collection.
+func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{},
+	opts ...*options.DistinctOptions) ([]interface{}, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Distinct{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Field:       fieldName,
+		Query:       f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	res, err := driver.Distinct(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	return res.Values, nil
+}
+
+// Find finds the documents matching a model.
+func (coll *Collection) Find(ctx context.Context, filter interface{},
+	opts ...*options.FindOptions) (*Cursor, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Find{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Filter:      f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	batchCursor, err := driver.Find(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// FindOne returns up to one document that matches the model.
+func (coll *Collection) FindOne(ctx context.Context, filter interface{},
+	opts ...*options.FindOneOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Find{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Filter:      f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	findOpts := make([]*options.FindOptions, len(opts))
+	for i, opt := range opts {
+		findOpts[i] = &options.FindOptions{
+			AllowPartialResults: opt.AllowPartialResults,
+			BatchSize:           opt.BatchSize,
+			Collation:           opt.Collation,
+			Comment:             opt.Comment,
+			CursorType:          opt.CursorType,
+			Hint:                opt.Hint,
+			Max:                 opt.Max,
+			MaxAwaitTime:        opt.MaxAwaitTime,
+			Min:                 opt.Min,
+			NoCursorTimeout:     opt.NoCursorTimeout,
+			OplogReplay:         opt.OplogReplay,
+			Projection:          opt.Projection,
+			ReturnKey:           opt.ReturnKey,
+			ShowRecordID:        opt.ShowRecordID,
+			Skip:                opt.Skip,
+			Snapshot:            opt.Snapshot,
+			Sort:                opt.Sort,
+		}
+	}
+
+	batchCursor, err := driver.Find(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		findOpts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return &SingleResult{cur: cursor, reg: coll.registry, err: replaceTopologyErr(err)}
+}
+
+// FindOneAndDelete find a single document and deletes it, returning the
+// original in result.
+func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{},
+	opts ...*options.FindOneAndDeleteOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	oldns := coll.namespace()
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	cmd := command.FindOneAndDelete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndDelete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// FindOneAndReplace finds a single document and replaces it, returning either
+// the original or the replaced document.
+func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	r, err := transformDocument(coll.registry, replacement)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	if len(r) > 0 && strings.HasPrefix(r[0].Key, "$") {
+		return &SingleResult{err: errors.New("replacement document cannot contains keys beginning with '$")}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.FindOneAndReplace{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		Replacement:  r,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndReplace(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// FindOneAndUpdate finds a single document and updates it, returning either
+// the original or the updated.
+func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{},
+	update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	err = ensureDollarKey(u)
+	if err != nil {
+		return &SingleResult{
+			err: err,
+		}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.FindOneAndUpdate{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		Update:       u,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndUpdate(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// Watch returns a change stream cursor used to receive notifications of changes to the collection.
+//
+// This method is preferred to running a raw aggregation with a $changeStream stage because it
+// supports resumability in the case of some errors. The collection must have read concern majority or no read concern
+// for a change stream to be created successfully.
+func (coll *Collection) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+	return newChangeStream(ctx, coll, pipeline, opts...)
+}
+
+// Indexes returns the index view for this collection.
+func (coll *Collection) Indexes() IndexView {
+	return IndexView{coll: coll}
+}
+
+// Drop drops this collection from database.
+func (coll *Collection) Drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	cmd := command.DropCollection{
+		DB:           coll.db.name,
+		Collection:   coll.name,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+	_, err = driver.DropCollection(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+	)
+	if err != nil && !command.IsNotFound(err) {
+		return replaceTopologyErr(err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go
new file mode 100644
index 0000000..a9dc13d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go
@@ -0,0 +1,137 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+)
+
+// Cursor is used to iterate a stream of documents. Each document is decoded into the result
+// according to the rules of the bson package.
+//
+// A typical usage of the Cursor type would be:
+//
+//		var cur *Cursor
+//		ctx := context.Background()
+//		defer cur.Close(ctx)
+//
+// 		for cur.Next(ctx) {
+//			elem := &bson.D{}
+//			if err := cur.Decode(elem); err != nil {
+// 				log.Fatal(err)
+// 			}
+//
+// 			// do something with elem....
+//		}
+//
+// 		if err := cur.Err(); err != nil {
+//			log.Fatal(err)
+//		}
+//
+type Cursor struct {
+	// Current is the BSON bytes of the current document. This property is only valid until the next
+	// call to Next or Close. If continued access is required to the bson.Raw, you must make a copy
+	// of it.
+	Current bson.Raw
+
+	bc       batchCursor
+	pos      int
+	batch    []byte
+	registry *bsoncodec.Registry
+
+	err error
+}
+
+func newCursor(bc batchCursor, registry *bsoncodec.Registry) (*Cursor, error) {
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &Cursor{bc: bc, pos: 0, batch: make([]byte, 0, 256), registry: registry}, nil
+}
+
+func newEmptyCursor() *Cursor {
+	return &Cursor{bc: driver.NewEmptyBatchCursor()}
+}
+
+// ID returns the ID of this cursor.
+func (c *Cursor) ID() int64 { return c.bc.ID() }
+
+func (c *Cursor) advanceCurrentDocument() bool {
+	if len(c.batch[c.pos:]) < 4 {
+		c.err = errors.New("could not read next document: insufficient bytes")
+		return false
+	}
+	length := (int(c.batch[c.pos]) | int(c.batch[c.pos+1])<<8 | int(c.batch[c.pos+2])<<16 | int(c.batch[c.pos+3])<<24)
+	if len(c.batch[c.pos:]) < length {
+		c.err = errors.New("could not read next document: insufficient bytes")
+		return false
+	}
+	if len(c.Current) > 4 {
+		c.Current[0], c.Current[1], c.Current[2], c.Current[3] = 0x00, 0x00, 0x00, 0x00 // Invalidate the current document
+	}
+	c.Current = c.batch[c.pos : c.pos+length]
+	c.pos += length
+	return true
+}
+
+// Next gets the next result from this cursor. Returns true if there were no errors and the next
+// result is available for decoding.
+func (c *Cursor) Next(ctx context.Context) bool {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+	if c.pos < len(c.batch) {
+		return c.advanceCurrentDocument()
+	}
+
+	// clear the batch
+	c.batch = c.batch[:0]
+	c.pos = 0
+	c.Current = c.Current[:0]
+
+	// call the Next method in a loop until at least one document is returned in the next batch or
+	// the context times out.
+	for len(c.batch) == 0 {
+		// If we don't have a next batch
+		if !c.bc.Next(ctx) {
+			// Do we have an error? If so we return false.
+			c.err = c.bc.Err()
+			if c.err != nil {
+				return false
+			}
+			// Is the cursor ID zero?
+			if c.bc.ID() == 0 {
+				return false
+			}
+			// empty batch, but cursor is still valid, so continue.
+			continue
+		}
+
+		c.batch = c.bc.Batch(c.batch[:0])
+	}
+
+	return c.advanceCurrentDocument()
+}
+
+// Decode will decode the current document into val.
+func (c *Cursor) Decode(val interface{}) error {
+	return bson.UnmarshalWithRegistry(c.registry, c.Current, val)
+}
+
+// Err returns the current error.
+func (c *Cursor) Err() error { return c.err }
+
+// Close closes this cursor.
+func (c *Cursor) Close(ctx context.Context) error { return c.bc.Close(ctx) }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go
new file mode 100644
index 0000000..9575e06
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go
@@ -0,0 +1,282 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Database performs operations on a given database.
+type Database struct {
+	client         *Client
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	registry       *bsoncodec.Registry
+}
+
+func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) *Database {
+	dbOpt := options.MergeDatabaseOptions(opts...)
+
+	rc := client.readConcern
+	if dbOpt.ReadConcern != nil {
+		rc = dbOpt.ReadConcern
+	}
+
+	rp := client.readPreference
+	if dbOpt.ReadPreference != nil {
+		rp = dbOpt.ReadPreference
+	}
+
+	wc := client.writeConcern
+	if dbOpt.WriteConcern != nil {
+		wc = dbOpt.WriteConcern
+	}
+
+	db := &Database{
+		client:         client,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		registry:       client.registry,
+	}
+
+	db.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(db.readPreference),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	db.writeSelector = description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	return db
+}
+
+// Client returns the Client the database was created from.
+func (db *Database) Client() *Client {
+	return db.client
+}
+
+// Name returns the name of the database.
+func (db *Database) Name() string {
+	return db.name
+}
+
+// Collection gets a handle for a given collection in the database.
+func (db *Database) Collection(name string, opts ...*options.CollectionOptions) *Collection {
+	return newCollection(db, name, opts...)
+}
+
+func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, opts ...*options.RunCmdOptions) (command.Read,
+	description.ServerSelector, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	runCmd := options.MergeRunCmdOptions(opts...)
+
+	if err := db.client.ValidSession(sess); err != nil {
+		return command.Read{}, nil, err
+	}
+
+	rp := runCmd.ReadPreference
+	if rp == nil {
+		if sess != nil && sess.TransactionRunning() {
+			rp = sess.CurrentRp // override with transaction read pref if specified
+		}
+		if rp == nil {
+			rp = readpref.Primary() // set to primary if nothing specified in options
+		}
+	}
+
+	runCmdDoc, err := transformDocument(db.registry, cmd)
+	if err != nil {
+		return command.Read{}, nil, err
+	}
+
+	readSelect := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(rp),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	return command.Read{
+		DB:       db.Name(),
+		Command:  runCmdDoc,
+		ReadPref: rp,
+		Session:  sess,
+		Clock:    db.client.clock,
+	}, readSelect, nil
+}
+
+// RunCommand runs a command on the database. A user can supply a custom
+// context to this method, or nil to default to context.Background().
+func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	readCmd, readSelect, err := db.processRunCommand(ctx, runCommand, opts...)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	doc, err := driver.Read(ctx,
+		readCmd,
+		db.client.topology,
+		readSelect,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+
+	return &SingleResult{err: replaceTopologyErr(err), rdr: doc, reg: db.registry}
+}
+
+// RunCommandCursor runs a command on the database and returns a cursor over the resulting reader. A user can supply
+// a custom context to this method, or nil to default to context.Background().
+func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	readCmd, readSelect, err := db.processRunCommand(ctx, runCommand, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	batchCursor, err := driver.ReadCursor(
+		ctx,
+		readCmd,
+		db.client.topology,
+		readSelect,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, db.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// Drop drops this database from mongodb.
+func (db *Database) Drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := db.client.ValidSession(sess)
+	if err != nil {
+		return err
+	}
+
+	cmd := command.DropDatabase{
+		DB:      db.name,
+		Session: sess,
+		Clock:   db.client.clock,
+	}
+	_, err = driver.DropDatabase(
+		ctx, cmd,
+		db.client.topology,
+		db.writeSelector,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+	if err != nil && !command.IsNotFound(err) {
+		return replaceTopologyErr(err)
+	}
+	return nil
+}
+
+// ListCollections list collections from mongodb database.
+func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := db.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	filterDoc, err := transformDocument(db.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.ListCollections{
+		DB:       db.name,
+		Filter:   filterDoc,
+		ReadPref: readpref.Primary(), // list collections must be run on a primary by default
+		Session:  sess,
+		Clock:    db.client.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(db.client.localThreshold),
+	})
+	batchCursor, err := driver.ListCollections(
+		ctx, cmd,
+		db.client.topology,
+		readSelector,
+		db.client.id,
+		db.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, db.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// ReadConcern returns the read concern of this database.
+func (db *Database) ReadConcern() *readconcern.ReadConcern {
+	return db.readConcern
+}
+
+// ReadPreference returns the read preference of this database.
+func (db *Database) ReadPreference() *readpref.ReadPref {
+	return db.readPreference
+}
+
+// WriteConcern returns the write concern of this database.
+func (db *Database) WriteConcern() *writeconcern.WriteConcern {
+	return db.writeConcern
+}
+
+// Watch returns a change stream cursor used to receive information of changes to the database. This method is preferred
+// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
+// The database must have read concern majority or no read concern for a change stream to be created successfully.
+func (db *Database) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	return newDbChangeStream(ctx, db, pipeline, opts...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go
new file mode 100644
index 0000000..4f7f819
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// NOTE: This documentation should be kept in line with the Example* test functions.
+
+// Package mongo provides a MongoDB Driver API for Go.
+//
+// Basic usage of the driver starts with creating a Client from a connection
+// string. To do so, call the NewClient and Connect functions:
+//
+// 		client, err := NewClient("mongodb://foo:bar@localhost:27017")
+// 		if err != nil { return err }
+// 		ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+// 		defer cancel()
+// 		err = client.Connect(ctx)
+// 		if err != nil { return err }
+//
+// This will create a new client and start monitoring the MongoDB server on localhost.
+// The Database and Collection types can be used to access the database:
+//
+//    collection := client.Database("baz").Collection("qux")
+//
+// A Collection can be used to query the database or insert documents:
+//
+//    res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"})
+//    if err != nil { return err }
+//    id := res.InsertedID
+//
+// Several methods return a cursor, which can be used like this:
+//
+//    cur, err := collection.Find(context.Background(), bson.D{})
+//    if err != nil { log.Fatal(err) }
+//    defer cur.Close(context.Background())
+//    for cur.Next(context.Background()) {
+//       raw, err := cur.DecodeBytes()
+//       if err != nil { log.Fatal(err) }
+//       // do something with elem....
+//    }
+//    if err := cur.Err(); err != nil {
+//    		return err
+//    }
+//
+// Methods that only return a single document will return a *SingleResult, which works
+// like a *sql.Row:
+//
+// 	  result := struct{
+// 	  	Foo string
+// 	  	Bar int32
+// 	  }{}
+//    filter := bson.D{{"hello", "world"}}
+//    err := collection.FindOne(context.Background(), filter).Decode(&result)
+//    if err != nil { return err }
+//    // do something with result...
+//
+// Additional examples can be found under the examples directory in the driver's repository and
+// on the MongoDB website.
+package mongo
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go
new file mode 100644
index 0000000..3505bcc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go
@@ -0,0 +1,186 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
+// write concern.
+var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+
+// ErrClientDisconnected is returned when a user attempts to call a method on a
+// disconnected client
+var ErrClientDisconnected = errors.New("client is disconnected")
+
+// ErrNilDocument is returned when a user attempts to pass a nil document or filter
+// to a function where the field is required.
+var ErrNilDocument = errors.New("document is nil")
+
+// ErrEmptySlice is returned when a user attempts to pass an empty slice as input
+// to a function wehere the field is required.
+var ErrEmptySlice = errors.New("must provide at least one element in input slice")
+
+func replaceTopologyErr(err error) error {
+	if err == topology.ErrTopologyClosed {
+		return ErrClientDisconnected
+	}
+	return err
+}
+
+// WriteError is a non-write concern failure that occurred as a result of a write
+// operation.
+type WriteError struct {
+	Index   int
+	Code    int
+	Message string
+}
+
+func (we WriteError) Error() string { return we.Message }
+
+// WriteErrors is a group of non-write concern failures that occurred as a result
+// of a write operation.
+type WriteErrors []WriteError
+
+func (we WriteErrors) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "write errors: [")
+	for idx, err := range we {
+		if idx != 0 {
+			fmt.Fprintf(&buf, ", ")
+		}
+		fmt.Fprintf(&buf, "{%s}", err)
+	}
+	fmt.Fprint(&buf, "]")
+	return buf.String()
+}
+
+func writeErrorsFromResult(rwes []result.WriteError) WriteErrors {
+	wes := make(WriteErrors, 0, len(rwes))
+	for _, err := range rwes {
+		wes = append(wes, WriteError{Index: err.Index, Code: err.Code, Message: err.ErrMsg})
+	}
+	return wes
+}
+
+// WriteConcernError is a write concern failure that occurred as a result of a
+// write operation.
+type WriteConcernError struct {
+	Code    int
+	Message string
+	Details bson.Raw
+}
+
+func (wce WriteConcernError) Error() string { return wce.Message }
+
+// WriteException is an error for a non-bulk write operation.
+type WriteException struct {
+	WriteConcernError *WriteConcernError
+	WriteErrors       WriteErrors
+}
+
+func (mwe WriteException) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "multiple write errors: [")
+	fmt.Fprintf(&buf, "{%s}, ", mwe.WriteErrors)
+	fmt.Fprintf(&buf, "{%s}]", mwe.WriteConcernError)
+	return buf.String()
+}
+
+func convertBulkWriteErrors(errors []driver.BulkWriteError) []BulkWriteError {
+	bwErrors := make([]BulkWriteError, 0, len(errors))
+	for _, err := range errors {
+		bwErrors = append(bwErrors, BulkWriteError{
+			WriteError{
+				Index:   err.Index,
+				Code:    err.Code,
+				Message: err.ErrMsg,
+			},
+			dispatchToMongoModel(err.Model),
+		})
+	}
+
+	return bwErrors
+}
+
+func convertWriteConcernError(wce *result.WriteConcernError) *WriteConcernError {
+	if wce == nil {
+		return nil
+	}
+
+	return &WriteConcernError{Code: wce.Code, Message: wce.ErrMsg, Details: wce.ErrInfo}
+}
+
+// BulkWriteError is an error for one operation in a bulk write.
+type BulkWriteError struct {
+	WriteError
+	Request WriteModel
+}
+
+func (bwe BulkWriteError) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "{%s}", bwe.WriteError)
+	return buf.String()
+}
+
+// BulkWriteException is an error for a bulk write operation.
+type BulkWriteException struct {
+	WriteConcernError *WriteConcernError
+	WriteErrors       []BulkWriteError
+}
+
+func (bwe BulkWriteException) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "bulk write error: [")
+	fmt.Fprintf(&buf, "{%s}, ", bwe.WriteErrors)
+	fmt.Fprintf(&buf, "{%s}]", bwe.WriteConcernError)
+	return buf.String()
+}
+
+// returnResult is used to determine if a function calling processWriteError should return
+// the result or return nil. Since the processWriteError function is used by many different
+// methods, both *One and *Many, we need a way to differentiate if the method should return
+// the result and the error.
+type returnResult int
+
+const (
+	rrNone returnResult = 1 << iota // None means do not return the result ever.
+	rrOne                           // One means return the result if this was called by a *One method.
+	rrMany                          // Many means return the result is this was called by a *Many method.
+
+	rrAll returnResult = rrOne | rrMany // All means always return the result.
+)
+
+// processWriteError handles processing the result of a write operation. If the retrunResult matches
+// the calling method's type, it should return the result object in addition to the error.
+// This function will wrap the errors from other packages and return them as errors from this package.
+//
+// WriteConcernError will be returned over WriteErrors if both are present.
+func processWriteError(wce *result.WriteConcernError, wes []result.WriteError, err error) (returnResult, error) {
+	switch {
+	case err == command.ErrUnacknowledgedWrite:
+		return rrAll, ErrUnacknowledgedWrite
+	case err != nil:
+		return rrNone, replaceTopologyErr(err)
+	case wce != nil || len(wes) > 0:
+		return rrMany, WriteException{
+			WriteConcernError: convertWriteConcernError(wce),
+			WriteErrors:       writeErrorsFromResult(wes),
+		}
+	default:
+		return rrAll, nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go
new file mode 100644
index 0000000..abc1514
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go
@@ -0,0 +1,134 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// IndexOptionsBuilder constructs a BSON document for index options
+type IndexOptionsBuilder struct {
+	document bson.D
+}
+
+// NewIndexOptionsBuilder creates a new instance of IndexOptionsBuilder
+func NewIndexOptionsBuilder() *IndexOptionsBuilder {
+	return &IndexOptionsBuilder{}
+}
+
+// Background sets the background option
+func (iob *IndexOptionsBuilder) Background(background bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"background", background})
+	return iob
+}
+
+// ExpireAfterSeconds sets the expireAfterSeconds option
+func (iob *IndexOptionsBuilder) ExpireAfterSeconds(expireAfterSeconds int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"expireAfterSeconds", expireAfterSeconds})
+	return iob
+}
+
+// Name sets the name option
+func (iob *IndexOptionsBuilder) Name(name string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"name", name})
+	return iob
+}
+
+// Sparse sets the sparse option
+func (iob *IndexOptionsBuilder) Sparse(sparse bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"sparse", sparse})
+	return iob
+}
+
+// StorageEngine sets the storageEngine option
+func (iob *IndexOptionsBuilder) StorageEngine(storageEngine interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"storageEngine", storageEngine})
+	return iob
+}
+
+// Unique sets the unique option
+func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"unique", unique})
+	return iob
+}
+
+// Version sets the version option
+func (iob *IndexOptionsBuilder) Version(version int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"v", version})
+	return iob
+}
+
+// DefaultLanguage sets the defaultLanguage option
+func (iob *IndexOptionsBuilder) DefaultLanguage(defaultLanguage string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"default_language", defaultLanguage})
+	return iob
+}
+
+// LanguageOverride sets the languageOverride option
+func (iob *IndexOptionsBuilder) LanguageOverride(languageOverride string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"language_override", languageOverride})
+	return iob
+}
+
+// TextVersion sets the textVersion option
+func (iob *IndexOptionsBuilder) TextVersion(textVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"textIndexVersion", textVersion})
+	return iob
+}
+
+// Weights sets the weights option
+func (iob *IndexOptionsBuilder) Weights(weights interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"weights", weights})
+	return iob
+}
+
+// SphereVersion sets the sphereVersion option
+func (iob *IndexOptionsBuilder) SphereVersion(sphereVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"2dsphereIndexVersion", sphereVersion})
+	return iob
+}
+
+// Bits sets the bits option
+func (iob *IndexOptionsBuilder) Bits(bits int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bits", bits})
+	return iob
+}
+
+// Max sets the max option
+func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"max", max})
+	return iob
+}
+
+// Min sets the min option
+func (iob *IndexOptionsBuilder) Min(min float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"min", min})
+	return iob
+}
+
+// BucketSize sets the bucketSize option
+func (iob *IndexOptionsBuilder) BucketSize(bucketSize int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bucketSize", bucketSize})
+	return iob
+}
+
+// PartialFilterExpression sets the partialFilterExpression option
+func (iob *IndexOptionsBuilder) PartialFilterExpression(partialFilterExpression interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"partialFilterExpression", partialFilterExpression})
+	return iob
+}
+
+// Collation sets the collation option
+func (iob *IndexOptionsBuilder) Collation(collation interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"collation", collation})
+	return iob
+}
+
+// Build returns the BSON document from the builder
+func (iob *IndexOptionsBuilder) Build() bson.D {
+	return iob.document
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go
new file mode 100644
index 0000000..1ff8d49
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go
@@ -0,0 +1,343 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrInvalidIndexValue indicates that the index Keys document has a value that isn't either a number or a string.
+var ErrInvalidIndexValue = errors.New("invalid index value")
+
+// ErrNonStringIndexName indicates that the index name specified in the options is not a string.
+var ErrNonStringIndexName = errors.New("index name must be a string")
+
+// ErrMultipleIndexDrop indicates that multiple indexes would be dropped from a call to IndexView.DropOne.
+var ErrMultipleIndexDrop = errors.New("multiple indexes would be dropped")
+
+// IndexView is used to create, drop, and list indexes on a given collection.
+type IndexView struct {
+	coll *Collection
+}
+
+// IndexModel contains information about an index.
+type IndexModel struct {
+	Keys    interface{}
+	Options *options.IndexOptions
+}
+
+// List returns a cursor iterating over all the indexes in the collection.
+func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) {
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	listCmd := command.ListIndexes{
+		NS:      iv.coll.namespace(),
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(iv.coll.client.localThreshold),
+	})
+	batchCursor, err := driver.ListIndexes(
+		ctx, listCmd,
+		iv.coll.client.topology,
+		readSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		if err == command.ErrEmptyCursor {
+			return newEmptyCursor(), nil
+		}
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, iv.coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// CreateOne creates a single index in the collection specified by the model.
+func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*options.CreateIndexesOptions) (string, error) {
+	names, err := iv.CreateMany(ctx, []IndexModel{model}, opts...)
+	if err != nil {
+		return "", err
+	}
+
+	return names[0], nil
+}
+
+// CreateMany creates multiple indexes in the collection specified by the models. The names of the
+// creates indexes are returned.
+func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) {
+	names := make([]string, 0, len(models))
+	indexes := bsonx.Arr{}
+
+	for _, model := range models {
+		if model.Keys == nil {
+			return nil, fmt.Errorf("index model keys cannot be nil")
+		}
+
+		name, err := getOrGenerateIndexName(iv.coll.registry, model)
+		if err != nil {
+			return nil, err
+		}
+
+		names = append(names, name)
+
+		keys, err := transformDocument(iv.coll.registry, model.Keys)
+		if err != nil {
+			return nil, err
+		}
+		index := bsonx.Doc{{"key", bsonx.Document(keys)}}
+		if model.Options != nil {
+			optsDoc, err := iv.createOptionsDoc(model.Options)
+			if err != nil {
+				return nil, err
+			}
+
+			index = append(index, optsDoc...)
+		}
+		index = index.Set("name", bsonx.String(name))
+
+		indexes = append(indexes, bsonx.Document(index))
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.CreateIndexes{
+		NS:      iv.coll.namespace(),
+		Indexes: indexes,
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	_, err = driver.CreateIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return names, nil
+}
+
+func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsonx.Doc, error) {
+	optsDoc := bsonx.Doc{}
+	if opts.Background != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"background", bsonx.Boolean(*opts.Background)})
+	}
+	if opts.ExpireAfterSeconds != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"expireAfterSeconds", bsonx.Int32(*opts.ExpireAfterSeconds)})
+	}
+	if opts.Name != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"name", bsonx.String(*opts.Name)})
+	}
+	if opts.Sparse != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"sparse", bsonx.Boolean(*opts.Sparse)})
+	}
+	if opts.StorageEngine != nil {
+		doc, err := transformDocument(iv.coll.registry, opts.StorageEngine)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"storageEngine", bsonx.Document(doc)})
+	}
+	if opts.Unique != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"unique", bsonx.Boolean(*opts.Unique)})
+	}
+	if opts.Version != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"v", bsonx.Int32(*opts.Version)})
+	}
+	if opts.DefaultLanguage != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"default_language", bsonx.String(*opts.DefaultLanguage)})
+	}
+	if opts.LanguageOverride != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"language_override", bsonx.String(*opts.LanguageOverride)})
+	}
+	if opts.TextVersion != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"textIndexVersion", bsonx.Int32(*opts.TextVersion)})
+	}
+	if opts.Weights != nil {
+		weightsDoc, err := transformDocument(iv.coll.registry, opts.Weights)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"weights", bsonx.Document(weightsDoc)})
+	}
+	if opts.SphereVersion != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"2dsphereIndexVersion", bsonx.Int32(*opts.SphereVersion)})
+	}
+	if opts.Bits != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"bits", bsonx.Int32(*opts.Bits)})
+	}
+	if opts.Max != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"max", bsonx.Double(*opts.Max)})
+	}
+	if opts.Min != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"min", bsonx.Double(*opts.Min)})
+	}
+	if opts.BucketSize != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"bucketSize", bsonx.Int32(*opts.BucketSize)})
+	}
+	if opts.PartialFilterExpression != nil {
+		doc, err := transformDocument(iv.coll.registry, opts.PartialFilterExpression)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"partialFilterExpression", bsonx.Document(doc)})
+	}
+	if opts.Collation != nil {
+		doc := opts.Collation.ToDocument()
+		optsDoc = append(optsDoc, bsonx.Elem{"collation", bsonx.Document(doc)})
+	}
+
+	return optsDoc, nil
+}
+
+// DropOne drops the index with the given name from the collection.
+func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	if name == "*" {
+		return nil, ErrMultipleIndexDrop
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.DropIndexes{
+		NS:      iv.coll.namespace(),
+		Index:   name,
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	return driver.DropIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+}
+
+// DropAll drops all indexes in the collection.
+func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.DropIndexes{
+		NS:      iv.coll.namespace(),
+		Index:   "*",
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	return driver.DropIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+}
+
+func getOrGenerateIndexName(registry *bsoncodec.Registry, model IndexModel) (string, error) {
+	if model.Options != nil && model.Options.Name != nil {
+		return *model.Options.Name, nil
+	}
+
+	name := bytes.NewBufferString("")
+	first := true
+
+	keys, err := transformDocument(registry, model.Keys)
+	if err != nil {
+		return "", err
+	}
+	for _, elem := range keys {
+		if !first {
+			_, err := name.WriteRune('_')
+			if err != nil {
+				return "", err
+			}
+		}
+
+		_, err := name.WriteString(elem.Key)
+		if err != nil {
+			return "", err
+		}
+
+		_, err = name.WriteRune('_')
+		if err != nil {
+			return "", err
+		}
+
+		var value string
+
+		switch elem.Value.Type() {
+		case bsontype.Int32:
+			value = fmt.Sprintf("%d", elem.Value.Int32())
+		case bsontype.Int64:
+			value = fmt.Sprintf("%d", elem.Value.Int64())
+		case bsontype.String:
+			value = elem.Value.StringValue()
+		default:
+			return "", ErrInvalidIndexValue
+		}
+
+		_, err = name.WriteString(value)
+		if err != nil {
+			return "", err
+		}
+
+		first = false
+	}
+
+	return name.String(), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go
new file mode 100644
index 0000000..9757d9b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go
@@ -0,0 +1,242 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"reflect"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// BSONAppender is an interface implemented by types that can marshal a
+// provided type into BSON bytes and append those bytes to the provided []byte.
+// The AppendBSON can return a non-nil error and non-nil []byte. The AppendBSON
+// method may also write incomplete BSON to the []byte.
+type BSONAppender interface {
+	AppendBSON([]byte, interface{}) ([]byte, error)
+}
+
+// BSONAppenderFunc is an adapter function that allows any function that
+// satisfies the AppendBSON method signature to be used where a BSONAppender is
+// used.
+type BSONAppenderFunc func([]byte, interface{}) ([]byte, error)
+
+// AppendBSON implements the BSONAppender interface
+func (baf BSONAppenderFunc) AppendBSON(dst []byte, val interface{}) ([]byte, error) {
+	return baf(dst, val)
+}
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a *bsonx.Document", reflect.TypeOf(me.Value))
+}
+
+// Pipeline is a type that makes creating aggregation pipelines easier. It is a
+// helper and is intended for serializing to BSON.
+//
+// Example usage:
+//
+//		mongo.Pipeline{
+//			{{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}},
+//			{{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}},
+//		}
+//
+type Pipeline []bson.D
+
+// transformAndEnsureID is a hack that makes it easy to get a RawValue as the _id value. This will
+// be removed when we switch from using bsonx to bsoncore for the driver package.
+func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, interface{}, error) {
+	// TODO: performance is going to be pretty bad for bsonx.Doc here since we turn it into a []byte
+	// only to turn it back into a bsonx.Doc. We can fix this post beta1 when we refactor the driver
+	// package to use bsoncore.Document instead of bsonx.Doc.
+	if registry == nil {
+		registry = bson.NewRegistryBuilder().Build()
+	}
+	switch tt := val.(type) {
+	case nil:
+		return nil, nil, ErrNilDocument
+	case bsonx.Doc:
+		val = tt.Copy()
+	case []byte:
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(tt)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf, val)
+	if err != nil {
+		return nil, nil, MarshalError{Value: val, Err: err}
+	}
+
+	d, err := bsonx.ReadDoc(b)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var id interface{}
+
+	idx := d.IndexOf("_id")
+	var idElem bsonx.Elem
+	switch idx {
+	case -1:
+		idElem = bsonx.Elem{"_id", bsonx.ObjectID(primitive.NewObjectID())}
+		d = append(d, bsonx.Elem{})
+		copy(d[1:], d)
+		d[0] = idElem
+	default:
+		idElem = d[idx]
+		copy(d[1:idx+1], d[0:idx])
+		d[0] = idElem
+	}
+
+	t, data, err := idElem.Value.MarshalAppendBSONValue(buf[:0])
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = bson.RawValue{Type: t, Value: data}.UnmarshalWithRegistry(registry, &id)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return d, id, nil
+}
+
+func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) {
+	if registry == nil {
+		registry = bson.NewRegistryBuilder().Build()
+	}
+	if val == nil {
+		return nil, ErrNilDocument
+	}
+	if doc, ok := val.(bsonx.Doc); ok {
+		return doc.Copy(), nil
+	}
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf[:0], val)
+	if err != nil {
+		return nil, MarshalError{Value: val, Err: err}
+	}
+	return bsonx.ReadDoc(b)
+}
+
+func ensureID(d bsonx.Doc) (bsonx.Doc, interface{}) {
+	var id interface{}
+
+	elem, err := d.LookupElementErr("_id")
+	switch err.(type) {
+	case nil:
+		id = elem
+	default:
+		oid := primitive.NewObjectID()
+		d = append(d, bsonx.Elem{"_id", bsonx.ObjectID(oid)})
+		id = oid
+	}
+	return d, id
+}
+
+func ensureDollarKey(doc bsonx.Doc) error {
+	if len(doc) == 0 {
+		return errors.New("update document must have at least one element")
+	}
+	if !strings.HasPrefix(doc[0].Key, "$") {
+		return errors.New("update document must contain key beginning with '$'")
+	}
+	return nil
+}
+
+func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface{}) (bsonx.Arr, error) {
+	pipelineArr := bsonx.Arr{}
+	switch t := pipeline.(type) {
+	case bsoncodec.ValueMarshaler:
+		btype, val, err := t.MarshalBSONValue()
+		if err != nil {
+			return nil, err
+		}
+		if btype != bsontype.Array {
+			return nil, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v", btype, bsontype.Array)
+		}
+		err = pipelineArr.UnmarshalBSONValue(btype, val)
+		if err != nil {
+			return nil, err
+		}
+	default:
+		val := reflect.ValueOf(t)
+		if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) {
+			return nil, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind())
+		}
+		for idx := 0; idx < val.Len(); idx++ {
+			elem, err := transformDocument(registry, val.Index(idx).Interface())
+			if err != nil {
+				return nil, err
+			}
+			pipelineArr = append(pipelineArr, bsonx.Document(elem))
+		}
+	}
+
+	return pipelineArr, nil
+}
+
+// Build the aggregation pipeline for the CountDocument command.
+func countDocumentsAggregatePipeline(registry *bsoncodec.Registry, filter interface{}, opts *options.CountOptions) (bsonx.Arr, error) {
+	pipeline := bsonx.Arr{}
+	filterDoc, err := transformDocument(registry, filter)
+
+	if err != nil {
+		return nil, err
+	}
+	pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$match", bsonx.Document(filterDoc)}}))
+
+	if opts != nil {
+		if opts.Skip != nil {
+			pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$skip", bsonx.Int64(*opts.Skip)}}))
+		}
+		if opts.Limit != nil {
+			pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$limit", bsonx.Int64(*opts.Limit)}}))
+		}
+	}
+
+	pipeline = append(pipeline, bsonx.Document(bsonx.Doc{
+		{"$group", bsonx.Document(bsonx.Doc{
+			{"_id", bsonx.Null()},
+			{"n", bsonx.Document(bsonx.Doc{{"$sum", bsonx.Int32(1)}})},
+		})},
+	},
+	))
+
+	return pipeline, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go
new file mode 100644
index 0000000..3700d84
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// AggregateOptions represents all possible options to the aggregate() function
+type AggregateOptions struct {
+	AllowDiskUse             *bool          // Enables writing to temporary files. When set to true, aggregation stages can write data to the _tmp subdirectory in the dbPath directory
+	BatchSize                *int32         // The number of documents to return per batch
+	BypassDocumentValidation *bool          // If true, allows the write to opt-out of document level validation. This only applies when the $out stage is specified
+	Collation                *Collation     // Specifies a collation
+	MaxTime                  *time.Duration // The maximum amount of time to allow the query to run
+	MaxAwaitTime             *time.Duration // The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query
+	Comment                  *string        // Enables users to specify an arbitrary string to help trace the operation through the database profiler, currentOp and logs.
+	Hint                     interface{}    // The index to use for the aggregation. The hint does not apply to $lookup and $graphLookup stages
+}
+
+// Aggregate returns a pointer to a new AggregateOptions
+func Aggregate() *AggregateOptions {
+	return &AggregateOptions{}
+}
+
+// SetAllowDiskUse enables writing to temporary files. When set to true,
+// aggregation stages can write data to the _tmp subdirectory in the
+// dbPath directory
+func (ao *AggregateOptions) SetAllowDiskUse(b bool) *AggregateOptions {
+	ao.AllowDiskUse = &b
+	return ao
+}
+
+// SetBatchSize specifies the number of documents to return per batch
+func (ao *AggregateOptions) SetBatchSize(i int32) *AggregateOptions {
+	ao.BatchSize = &i
+	return ao
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level
+// validation. This only applies when the $out stage is specified
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ao *AggregateOptions) SetBypassDocumentValidation(b bool) *AggregateOptions {
+	ao.BypassDocumentValidation = &b
+	return ao
+}
+
+// SetCollation specifies a collation.
+// Valid for server versions >= 3.4
+func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions {
+	ao.Collation = c
+	return ao
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run
+func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions {
+	ao.MaxTime = &d
+	return ao
+}
+
+// SetMaxAwaitTime specifies the maximum amount of time for the server to
+// wait on new documents to satisfy a tailable cursor query
+// For servers < 3.2, this option is ignored
+func (ao *AggregateOptions) SetMaxAwaitTime(d time.Duration) *AggregateOptions {
+	ao.MaxAwaitTime = &d
+	return ao
+}
+
+// SetComment enables users to specify an arbitrary string to help trace the
+// operation through the database profiler, currentOp and logs.
+func (ao *AggregateOptions) SetComment(s string) *AggregateOptions {
+	ao.Comment = &s
+	return ao
+}
+
+// SetHint specifies the index to use for the aggregation. The hint does not
+// apply to $lookup and $graphLookup stages
+func (ao *AggregateOptions) SetHint(h interface{}) *AggregateOptions {
+	ao.Hint = h
+	return ao
+}
+
+// MergeAggregateOptions combines the argued AggregateOptions into a single AggregateOptions in a last-one-wins fashion
+func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions {
+	aggOpts := Aggregate()
+	for _, ao := range opts {
+		if ao == nil {
+			continue
+		}
+		if ao.AllowDiskUse != nil {
+			aggOpts.AllowDiskUse = ao.AllowDiskUse
+		}
+		if ao.BatchSize != nil {
+			aggOpts.BatchSize = ao.BatchSize
+		}
+		if ao.BypassDocumentValidation != nil {
+			aggOpts.BypassDocumentValidation = ao.BypassDocumentValidation
+		}
+		if ao.Collation != nil {
+			aggOpts.Collation = ao.Collation
+		}
+		if ao.MaxTime != nil {
+			aggOpts.MaxTime = ao.MaxTime
+		}
+		if ao.MaxAwaitTime != nil {
+			aggOpts.MaxAwaitTime = ao.MaxAwaitTime
+		}
+		if ao.Comment != nil {
+			aggOpts.Comment = ao.Comment
+		}
+		if ao.Hint != nil {
+			aggOpts.Hint = ao.Hint
+		}
+	}
+
+	return aggOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go
new file mode 100644
index 0000000..86282fa
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DefaultOrdered is the default order for a BulkWriteOptions struct created from BulkWrite.
+var DefaultOrdered = true
+
+// BulkWriteOptions represent all possible options for a bulkWrite operation.
+type BulkWriteOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt out of document-level validation.
+	Ordered                  *bool // If true, when a write fails, return without performing remaining writes. Defaults to true.
+}
+
+// BulkWrite creates a new *BulkWriteOptions
+func BulkWrite() *BulkWriteOptions {
+	return &BulkWriteOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
+// remaining writes. Defaults to true.
+func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions {
+	b.Ordered = &ordered
+	return b
+}
+
+// SetBypassDocumentValidation specifies if the write should opt out of document-level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOptions {
+	b.BypassDocumentValidation = &bypass
+	return b
+}
+
+// MergeBulkWriteOptions combines the given *BulkWriteOptions into a single *BulkWriteOptions in a last one wins fashion.
+func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions {
+	b := BulkWrite()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Ordered != nil {
+			b.Ordered = opt.Ordered
+		}
+		if opt.BypassDocumentValidation != nil {
+			b.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+	}
+
+	return b
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go
new file mode 100644
index 0000000..c8776ac
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go
@@ -0,0 +1,97 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"time"
+)
+
+// ChangeStreamOptions represents all possible options to a change stream
+type ChangeStreamOptions struct {
+	BatchSize            *int32               // The number of documents to return per batch
+	Collation            *Collation           // Specifies a collation
+	FullDocument         *FullDocument        // When set to ‘updateLookup’, the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
+	MaxAwaitTime         *time.Duration       // The maximum amount of time for the server to wait on new documents to satisfy a change stream query
+	ResumeAfter          interface{}          // Specifies the logical starting point for the new change stream
+	StartAtOperationTime *primitive.Timestamp // Ensures that a change stream will only provide changes that occurred after a timestamp.
+}
+
+// ChangeStream returns a pointer to a new ChangeStreamOptions
+func ChangeStream() *ChangeStreamOptions {
+	return &ChangeStreamOptions{}
+}
+
+// SetBatchSize specifies the number of documents to return per batch
+func (cso *ChangeStreamOptions) SetBatchSize(i int32) *ChangeStreamOptions {
+	cso.BatchSize = &i
+	return cso
+}
+
+// SetCollation specifies a collation
+func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions {
+	cso.Collation = &c
+	return cso
+}
+
+// SetFullDocument specifies the fullDocument option.
+// When set to ‘updateLookup’, the change notification for partial updates will
+// include both a delta describing the changes to the document, as well as a
+// copy of the entire document that was changed from some time after the change
+// occurred.
+func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions {
+	cso.FullDocument = &fd
+	return cso
+}
+
+// SetMaxAwaitTime specifies the maximum amount of time for the server to wait on new documents to satisfy a change stream query
+func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions {
+	cso.MaxAwaitTime = &d
+	return cso
+}
+
+// SetResumeAfter specifies the logical starting point for the new change stream
+func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOptions {
+	cso.ResumeAfter = rt
+	return cso
+}
+
+// SetStartAtOperationTime ensures that a change stream will only provide changes that occurred after a specified timestamp.
+func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions {
+	cso.StartAtOperationTime = t
+	return cso
+}
+
+// MergeChangeStreamOptions combines the argued ChangeStreamOptions into a single ChangeStreamOptions in a last-one-wins fashion
+func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions {
+	csOpts := ChangeStream()
+	for _, cso := range opts {
+		if cso == nil {
+			continue
+		}
+		if cso.BatchSize != nil {
+			csOpts.BatchSize = cso.BatchSize
+		}
+		if cso.Collation != nil {
+			csOpts.Collation = cso.Collation
+		}
+		if cso.FullDocument != nil {
+			csOpts.FullDocument = cso.FullDocument
+		}
+		if cso.MaxAwaitTime != nil {
+			csOpts.MaxAwaitTime = cso.MaxAwaitTime
+		}
+		if cso.ResumeAfter != nil {
+			csOpts.ResumeAfter = cso.ResumeAfter
+		}
+		if cso.StartAtOperationTime != nil {
+			csOpts.StartAtOperationTime = cso.StartAtOperationTime
+		}
+	}
+
+	return csOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go
new file mode 100644
index 0000000..e09a9c1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go
@@ -0,0 +1,424 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+)
+
+// ContextDialer makes new network connections
+type ContextDialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// SSLOpt holds client SSL options.
+//
+// Enabled indicates whether SSL should be enabled.
+//
+// ClientCertificateKeyFile specifies the file containing the client certificate and private key
+// used for authentication.
+//
+// ClientCertificateKeyPassword provides a callback that returns a password used for decrypting the
+// private key of a PEM file (if one is provided).
+//
+// Insecure indicates whether to skip the verification of the server certificate and hostname.
+//
+// CaFile specifies the file containing the certificate authority used for SSL connections.
+type SSLOpt struct {
+	Enabled                      bool
+	ClientCertificateKeyFile     string
+	ClientCertificateKeyPassword func() string
+	Insecure                     bool
+	CaFile                       string
+}
+
+// Credential holds auth options.
+//
+// AuthMechanism indicates the mechanism to use for authentication.
+// Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", "MONGODB-CR", "PLAIN", "GSSAPI", and "MONGODB-X509".
+//
+// AuthMechanismProperties specifies additional configuration options which may be used by certain
+// authentication mechanisms.
+//
+// AuthSource specifies the database to authenticate against.
+//
+// Username specifies the username that will be authenticated.
+//
+// Password specifies the password used for authentication.
+type Credential struct {
+	AuthMechanism           string
+	AuthMechanismProperties map[string]string
+	AuthSource              string
+	Username                string
+	Password                string
+}
+
+// ClientOptions represents all possbile options to configure a client.
+type ClientOptions struct {
+	TopologyOptions []topology.Option
+	ConnString      connstring.ConnString
+	RetryWrites     *bool
+	ReadPreference  *readpref.ReadPref
+	ReadConcern     *readconcern.ReadConcern
+	WriteConcern    *writeconcern.WriteConcern
+	Registry        *bsoncodec.Registry
+}
+
+// Client creates a new ClientOptions instance.
+func Client() *ClientOptions {
+	return &ClientOptions{
+		TopologyOptions: make([]topology.Option, 0),
+	}
+}
+
+// SetAppName specifies the client application name. This value is used by MongoDB when it logs
+// connection information and profile information, such as slow queries.
+func (c *ClientOptions) SetAppName(s string) *ClientOptions {
+	c.ConnString.AppName = s
+
+	return c
+}
+
+// SetAuth sets the authentication options.
+func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions {
+	c.ConnString.AuthMechanism = auth.AuthMechanism
+	c.ConnString.AuthMechanismProperties = auth.AuthMechanismProperties
+	c.ConnString.AuthSource = auth.AuthSource
+	c.ConnString.Username = auth.Username
+	c.ConnString.Password = auth.Password
+
+	return c
+}
+
+// SetConnectTimeout specifies the timeout for an initial connection to a server.
+// If a custom Dialer is used, this method won't be set and the user is
+// responsible for setting the ConnectTimeout for connections on the dialer
+// themselves.
+func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.ConnectTimeout = d
+	c.ConnString.ConnectTimeoutSet = true
+
+	return c
+}
+
+// SetDialer specifies a custom dialer used to dial new connections to a server.
+// If a custom dialer is not set, a net.Dialer with a 300 second keepalive time will be used by default.
+func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions {
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+					return append(
+						opts,
+						connection.WithDialer(func(connection.Dialer) connection.Dialer {
+							return d
+						}),
+					)
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetMonitor specifies a command monitor used to see commands for a client.
+func (c *ClientOptions) SetMonitor(m *event.CommandMonitor) *ClientOptions {
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+					return append(
+						opts,
+						connection.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
+							return m
+						}),
+					)
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetHeartbeatInterval specifies the interval to wait between server monitoring checks.
+func (c *ClientOptions) SetHeartbeatInterval(d time.Duration) *ClientOptions {
+	c.ConnString.HeartbeatInterval = d
+	c.ConnString.HeartbeatIntervalSet = true
+
+	return c
+}
+
+// SetHosts specifies the initial list of addresses from which to discover the rest of the cluster.
+func (c *ClientOptions) SetHosts(s []string) *ClientOptions {
+	c.ConnString.Hosts = s
+
+	return c
+}
+
+// SetLocalThreshold specifies how far to distribute queries, beyond the server with the fastest
+// round-trip time. If a server's roundtrip time is more than LocalThreshold slower than the
+// the fastest, the driver will not send queries to that server.
+func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions {
+	c.ConnString.LocalThreshold = d
+	c.ConnString.LocalThresholdSet = true
+
+	return c
+}
+
+// SetMaxConnIdleTime specifies the maximum number of milliseconds that a connection can remain idle
+// in a connection pool before being removed and closed.
+func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions {
+	c.ConnString.MaxConnIdleTime = d
+	c.ConnString.MaxConnIdleTimeSet = true
+
+	return c
+}
+
+// SetMaxPoolSize specifies the max size of a server's connection pool.
+func (c *ClientOptions) SetMaxPoolSize(u uint16) *ClientOptions {
+	c.ConnString.MaxPoolSize = u
+	c.ConnString.MaxPoolSizeSet = true
+
+	return c
+}
+
+// SetReadConcern specifies the read concern.
+func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptions {
+	c.ReadConcern = rc
+
+	return c
+}
+
+// SetReadPreference specifies the read preference.
+func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions {
+	c.ReadPreference = rp
+
+	return c
+}
+
+// SetRegistry specifies the bsoncodec.Registry.
+func (c *ClientOptions) SetRegistry(registry *bsoncodec.Registry) *ClientOptions {
+	c.Registry = registry
+
+	// add registry to the server options so that it will be used for the cursors built by this client
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithRegistry(func(*bsoncodec.Registry) *bsoncodec.Registry {
+					return registry
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetReplicaSet specifies the name of the replica set of the cluster.
+func (c *ClientOptions) SetReplicaSet(s string) *ClientOptions {
+	c.ConnString.ReplicaSet = s
+
+	return c
+}
+
+// SetRetryWrites specifies whether the client has retryable writes enabled.
+func (c *ClientOptions) SetRetryWrites(b bool) *ClientOptions {
+	c.RetryWrites = &b
+
+	return c
+}
+
+// SetServerSelectionTimeout specifies a timeout in milliseconds to block for server selection.
+func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.ServerSelectionTimeout = d
+	c.ConnString.ServerSelectionTimeoutSet = true
+
+	return c
+}
+
+// SetSingle specifies whether the driver should connect directly to the server instead of
+// auto-discovering other servers in the cluster.
+func (c *ClientOptions) SetSingle(b bool) *ClientOptions {
+	if b {
+		c.ConnString.Connect = connstring.SingleConnect
+	} else {
+		c.ConnString.Connect = connstring.AutoConnect
+	}
+	c.ConnString.ConnectSet = true
+
+	return c
+}
+
+// SetSocketTimeout specifies the time in milliseconds to attempt to send or receive on a socket
+// before the attempt times out.
+func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.SocketTimeout = d
+	c.ConnString.SocketTimeoutSet = true
+
+	return c
+}
+
+// SetSSL sets SSL options.
+func (c *ClientOptions) SetSSL(ssl *SSLOpt) *ClientOptions {
+	c.ConnString.SSL = ssl.Enabled
+	c.ConnString.SSLSet = true
+
+	if ssl.ClientCertificateKeyFile != "" {
+		c.ConnString.SSLClientCertificateKeyFile = ssl.ClientCertificateKeyFile
+		c.ConnString.SSLClientCertificateKeyFileSet = true
+	}
+
+	if ssl.ClientCertificateKeyPassword != nil {
+		c.ConnString.SSLClientCertificateKeyPassword = ssl.ClientCertificateKeyPassword
+		c.ConnString.SSLClientCertificateKeyPasswordSet = true
+	}
+
+	c.ConnString.SSLInsecure = ssl.Insecure
+	c.ConnString.SSLInsecureSet = true
+
+	if ssl.CaFile != "" {
+		c.ConnString.SSLCaFile = ssl.CaFile
+		c.ConnString.SSLCaFileSet = true
+	}
+
+	return c
+}
+
+// SetWriteConcern sets the write concern.
+func (c *ClientOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptions {
+	c.WriteConcern = wc
+
+	return c
+}
+
+// MergeClientOptions combines the given connstring and *ClientOptions into a single *ClientOptions in a last one wins
+// fashion. The given connstring will be used for the default options, which can be overwritten using the given
+// *ClientOptions.
+func MergeClientOptions(cs connstring.ConnString, opts ...*ClientOptions) *ClientOptions {
+	c := Client()
+	c.ConnString = cs
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		c.TopologyOptions = append(c.TopologyOptions, opt.TopologyOptions...)
+
+		if an := opt.ConnString.AppName; an != "" {
+			c.ConnString.AppName = an
+		}
+		if am := opt.ConnString.AuthMechanism; len(am) != 0 {
+			c.ConnString.AuthMechanism = am
+		}
+		if amp := opt.ConnString.AuthMechanismProperties; amp != nil {
+			c.ConnString.AuthMechanismProperties = amp
+		}
+		if as := opt.ConnString.AuthSource; len(as) != 0 {
+			c.ConnString.AuthSource = as
+		}
+		if u := opt.ConnString.Username; len(u) != 0 {
+			c.ConnString.Username = u
+		}
+		if p := opt.ConnString.Password; len(p) != 0 {
+			c.ConnString.Password = p
+		}
+		if opt.ConnString.ConnectTimeoutSet {
+			c.ConnString.ConnectTimeoutSet = true
+			c.ConnString.ConnectTimeout = opt.ConnString.ConnectTimeout
+		}
+		if opt.ConnString.HeartbeatIntervalSet {
+			c.ConnString.HeartbeatIntervalSet = true
+			c.ConnString.HeartbeatInterval = opt.ConnString.HeartbeatInterval
+		}
+		if h := opt.ConnString.Hosts; h != nil {
+			c.ConnString.Hosts = h
+		}
+		if opt.ConnString.LocalThresholdSet {
+			c.ConnString.LocalThresholdSet = true
+			c.ConnString.LocalThreshold = opt.ConnString.LocalThreshold
+		}
+		if opt.ConnString.MaxConnIdleTimeSet {
+			c.ConnString.MaxConnIdleTimeSet = true
+			c.ConnString.MaxConnIdleTime = opt.ConnString.MaxConnIdleTime
+		}
+		if opt.ConnString.MaxPoolSizeSet {
+			c.ConnString.MaxPoolSizeSet = true
+			c.ConnString.MaxPoolSize = opt.ConnString.MaxPoolSize
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+		if rs := opt.ConnString.ReplicaSet; rs != "" {
+			c.ConnString.ReplicaSet = rs
+		}
+		if opt.RetryWrites != nil {
+			c.RetryWrites = opt.RetryWrites
+		}
+		if opt.ConnString.ServerSelectionTimeoutSet {
+			c.ConnString.ServerSelectionTimeoutSet = true
+			c.ConnString.ServerSelectionTimeout = opt.ConnString.ServerSelectionTimeout
+		}
+		if opt.ConnString.ConnectSet {
+			c.ConnString.ConnectSet = true
+			c.ConnString.Connect = opt.ConnString.Connect
+		}
+		if opt.ConnString.SocketTimeoutSet {
+			c.ConnString.SocketTimeoutSet = true
+			c.ConnString.SocketTimeout = opt.ConnString.SocketTimeout
+		}
+		if opt.ConnString.SSLSet {
+			c.ConnString.SSLSet = true
+			c.ConnString.SSL = opt.ConnString.SSL
+		}
+		if opt.ConnString.SSLClientCertificateKeyFileSet {
+			c.ConnString.SSLClientCertificateKeyFileSet = true
+			c.ConnString.SSLClientCertificateKeyFile = opt.ConnString.SSLClientCertificateKeyFile
+		}
+		if opt.ConnString.SSLClientCertificateKeyPasswordSet {
+			c.ConnString.SSLClientCertificateKeyPasswordSet = true
+			c.ConnString.SSLClientCertificateKeyPassword = opt.ConnString.SSLClientCertificateKeyPassword
+		}
+		if opt.ConnString.SSLInsecureSet {
+			c.ConnString.SSLInsecureSet = true
+			c.ConnString.SSLInsecure = opt.ConnString.SSLInsecure
+		}
+		if opt.ConnString.SSLCaFileSet {
+			c.ConnString.SSLCaFileSet = true
+			c.ConnString.SSLCaFile = opt.ConnString.SSLCaFile
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go
new file mode 100644
index 0000000..3415505
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// CollectionOptions represent all possible options to configure a Collection.
+type CollectionOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for operations in the collection.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for operations in the collection.
+	ReadPreference *readpref.ReadPref         // The read preference for operations in the collection.
+	Registry       *bsoncodec.Registry        // The registry to be used to construct BSON encoders and decoders for the collection.
+}
+
+// Collection creates a new CollectionOptions instance
+func Collection() *CollectionOptions {
+	return &CollectionOptions{}
+}
+
+// SetReadConcern sets the read concern for the collection.
+func (c *CollectionOptions) SetReadConcern(rc *readconcern.ReadConcern) *CollectionOptions {
+	c.ReadConcern = rc
+	return c
+}
+
+// SetWriteConcern sets the write concern for the collection.
+func (c *CollectionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *CollectionOptions {
+	c.WriteConcern = wc
+	return c
+}
+
+// SetReadPreference sets the read preference for the collection.
+func (c *CollectionOptions) SetReadPreference(rp *readpref.ReadPref) *CollectionOptions {
+	c.ReadPreference = rp
+	return c
+}
+
+// SetRegistry sets the bsoncodec Registry for the collection.
+func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOptions {
+	c.Registry = r
+	return c
+}
+
+// MergeCollectionOptions combines the *CollectionOptions arguments into a single *CollectionOptions in a last one wins
+// fashion.
+func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions {
+	c := Collection()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go
new file mode 100644
index 0000000..be3baab
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go
@@ -0,0 +1,81 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// CountOptions represents all possible options to the count() function
+type CountOptions struct {
+	Collation *Collation     // Specifies a collation
+	Hint      interface{}    // The index to use
+	Limit     *int64         // The maximum number of documents to count
+	MaxTime   *time.Duration // The maximum amount of time to allow the operation to run
+	Skip      *int64         // The number of documents to skip before counting
+}
+
+// Count returns a pointer to a new CountOptions
+func Count() *CountOptions {
+	return &CountOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for server versions >= 3.4
+func (co *CountOptions) SetCollation(c *Collation) *CountOptions {
+	co.Collation = c
+	return co
+}
+
+// SetHint specifies the index to use
+func (co *CountOptions) SetHint(h interface{}) *CountOptions {
+	co.Hint = h
+	return co
+}
+
+// SetLimit specifies the maximum number of documents to count
+func (co *CountOptions) SetLimit(i int64) *CountOptions {
+	co.Limit = &i
+	return co
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions {
+	co.MaxTime = &d
+	return co
+}
+
+// SetSkip specifies the number of documents to skip before counting
+func (co *CountOptions) SetSkip(i int64) *CountOptions {
+	co.Skip = &i
+	return co
+}
+
+// MergeCountOptions combines the argued CountOptions into a single CountOptions in a last-one-wins fashion
+func MergeCountOptions(opts ...*CountOptions) *CountOptions {
+	countOpts := Count()
+	for _, co := range opts {
+		if co == nil {
+			continue
+		}
+		if co.Collation != nil {
+			countOpts.Collation = co.Collation
+		}
+		if co.Hint != nil {
+			countOpts.Hint = co.Hint
+		}
+		if co.Limit != nil {
+			countOpts.Limit = co.Limit
+		}
+		if co.MaxTime != nil {
+			countOpts.MaxTime = co.MaxTime
+		}
+		if co.Skip != nil {
+			countOpts.Skip = co.Skip
+		}
+	}
+
+	return countOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go
new file mode 100644
index 0000000..989cb13
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// DatabaseOptions represent all possible options to configure a Database.
+type DatabaseOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for operations in the database.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for operations in the database.
+	ReadPreference *readpref.ReadPref         // The read preference for operations in the database.
+	Registry       *bsoncodec.Registry        // The registry to be used to construct BSON encoders and decoders for the database.
+}
+
+// Database creates a new DatabaseOptions instance
+func Database() *DatabaseOptions {
+	return &DatabaseOptions{}
+}
+
+// SetReadConcern sets the read concern for the database.
+func (d *DatabaseOptions) SetReadConcern(rc *readconcern.ReadConcern) *DatabaseOptions {
+	d.ReadConcern = rc
+	return d
+}
+
+// SetWriteConcern sets the write concern for the database.
+func (d *DatabaseOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *DatabaseOptions {
+	d.WriteConcern = wc
+	return d
+}
+
+// SetReadPreference sets the read preference for the database.
+func (d *DatabaseOptions) SetReadPreference(rp *readpref.ReadPref) *DatabaseOptions {
+	d.ReadPreference = rp
+	return d
+}
+
+// SetRegistry sets the bsoncodec Registry for the database.
+func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions {
+	d.Registry = r
+	return d
+}
+
+// MergeDatabaseOptions combines the *DatabaseOptions arguments into a single *DatabaseOptions in a last one wins
+// fashion.
+func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions {
+	d := Database()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			d.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			d.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			d.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			d.Registry = opt.Registry
+		}
+	}
+
+	return d
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go
new file mode 100644
index 0000000..919d6b8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DeleteOptions represents all possible options to the deleteOne() and deleteMany() functions
+type DeleteOptions struct {
+	Collation *Collation // Specifies a collation
+}
+
+// Delete returns a pointer to a new DeleteOptions
+func Delete() *DeleteOptions {
+	return &DeleteOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for servers >= 3.4.
+func (do *DeleteOptions) SetCollation(c *Collation) *DeleteOptions {
+	do.Collation = c
+	return do
+}
+
+// MergeDeleteOptions combines the argued DeleteOptions into a single DeleteOptions in a last-one-wins fashion
+func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions {
+	dOpts := Delete()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			dOpts.Collation = do.Collation
+		}
+	}
+
+	return dOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go
new file mode 100644
index 0000000..3b3f588
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// DistinctOptions represents all possible options to the distinct() function
+type DistinctOptions struct {
+	Collation *Collation     // Specifies a collation
+	MaxTime   *time.Duration // The maximum amount of time to allow the operation to run
+}
+
+// Distinct returns a pointer to a new DistinctOptions
+func Distinct() *DistinctOptions {
+	return &DistinctOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for server versions >= 3.4
+func (do *DistinctOptions) SetCollation(c *Collation) *DistinctOptions {
+	do.Collation = c
+	return do
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (do *DistinctOptions) SetMaxTime(d time.Duration) *DistinctOptions {
+	do.MaxTime = &d
+	return do
+}
+
+// MergeDistinctOptions combines the argued DistinctOptions into a single DistinctOptions in a last-one-wins fashion
+func MergeDistinctOptions(opts ...*DistinctOptions) *DistinctOptions {
+	distinctOpts := Distinct()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			distinctOpts.Collation = do.Collation
+		}
+		if do.MaxTime != nil {
+			distinctOpts.MaxTime = do.MaxTime
+		}
+	}
+
+	return distinctOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go
new file mode 100644
index 0000000..f43bb9f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// EstimatedDocumentCountOptions represents all possible options to the estimatedDocumentCount() function
+type EstimatedDocumentCountOptions struct {
+	MaxTime *time.Duration // The maximum amount of time to allow the operation to run
+}
+
+// EstimatedDocumentCount returns a pointer to a new EstimatedDocumentCountOptions
+func EstimatedDocumentCount() *EstimatedDocumentCountOptions {
+	return &EstimatedDocumentCountOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (eco *EstimatedDocumentCountOptions) SetMaxTime(d time.Duration) *EstimatedDocumentCountOptions {
+	eco.MaxTime = &d
+	return eco
+}
+
+// MergeEstimatedDocumentCountOptions combines the given *EstimatedDocumentCountOptions into a single
+// *EstimatedDocumentCountOptions in a last one wins fashion.
+func MergeEstimatedDocumentCountOptions(opts ...*EstimatedDocumentCountOptions) *EstimatedDocumentCountOptions {
+	e := EstimatedDocumentCount()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.MaxTime != nil {
+			e.MaxTime = opt.MaxTime
+		}
+	}
+
+	return e
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go
new file mode 100644
index 0000000..4ba6133
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go
@@ -0,0 +1,693 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// FindOptions represent all possible options to the find() function.
+type FindOptions struct {
+	AllowPartialResults *bool          // If true, allows partial results to be returned if some shards are down.
+	BatchSize           *int32         // Specifies the number of documents to return in every batch.
+	Collation           *Collation     // Specifies a collation to be used
+	Comment             *string        // Specifies a string to help trace the operation through the database.
+	CursorType          *CursorType    // Specifies the type of cursor to use
+	Hint                interface{}    // Specifies the index to use.
+	Limit               *int64         // Sets a limit on the number of results to return.
+	Max                 interface{}    // Sets an exclusive upper bound for a specific index
+	MaxAwaitTime        *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
+	MaxTime             *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Min                 interface{}    // Specifies the inclusive lower bound for a specific index.
+	NoCursorTimeout     *bool          // If true, prevents cursors from timing out after an inactivity period.
+	OplogReplay         *bool          // Adds an option for internal use only and should not be set.
+	Projection          interface{}    // Limits the fields returned for all documents.
+	ReturnKey           *bool          // If true, only returns index keys for all result documents.
+	ShowRecordID        *bool          // If true, a $recordId field with the record identifier will be added to the returned documents.
+	Skip                *int64         // Specifies the number of documents to skip before returning
+	Snapshot            *bool          // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
+	Sort                interface{}    // Specifies the order in which to return results.
+}
+
+// Find creates a new FindOptions instance.
+func Find() *FindOptions {
+	return &FindOptions{}
+}
+
+// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetAllowPartialResults(b bool) *FindOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *FindOptions) SetBatchSize(i int32) *FindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+// Valid for server versions >= 3.4
+func (f *FindOptions) SetCollation(collation *Collation) *FindOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment specifies a string to help trace the operation through the database.
+func (f *FindOptions) SetComment(comment string) *FindOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType specifes the type of cursor to use.
+func (f *FindOptions) SetCursorType(ct CursorType) *FindOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint specifies the index to use.
+func (f *FindOptions) SetHint(hint interface{}) *FindOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLimit specifies a limit on the number of results.
+// A negative limit implies that only 1 batch should be returned.
+func (f *FindOptions) SetLimit(i int64) *FindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMax specifies an exclusive upper bound for a specific index.
+func (f *FindOptions) SetMax(max interface{}) *FindOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
+// If the cursor type is not TailableAwait, this option is ignored.
+// For server versions < 3.2, this option is ignored.
+func (f *FindOptions) SetMaxAwaitTime(d time.Duration) *FindOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOptions) SetMaxTime(d time.Duration) *FindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin specifies the inclusive lower bound for a specific index.
+func (f *FindOptions) SetMin(min interface{}) *FindOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetNoCursorTimeout(b bool) *FindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay adds an option for internal use only and should not be set.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetOplogReplay(b bool) *FindOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOptions) SetProjection(projection interface{}) *FindOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey adds an option to only return index keys for all result documents.
+func (f *FindOptions) SetReturnKey(b bool) *FindOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
+// If true, a $recordId field will be added to each returned document.
+func (f *FindOptions) SetShowRecordID(b bool) *FindOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+// For server versions < 3.2, this defaults to 0.
+func (f *FindOptions) SetSkip(i int64) *FindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
+func (f *FindOptions) SetSnapshot(b bool) *FindOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOptions) SetSort(sort interface{}) *FindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOptions combines the argued FindOptions into a single FindOptions in a last-one-wins fashion
+func MergeFindOptions(opts ...*FindOptions) *FindOptions {
+	fo := Find()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneOptions represent all possible options to the findOne() function.
+type FindOneOptions struct {
+	AllowPartialResults *bool          // If true, allows partial results to be returned if some shards are down.
+	BatchSize           *int32         // Specifies the number of documents to return in every batch.
+	Collation           *Collation     // Specifies a collation to be used
+	Comment             *string        // Specifies a string to help trace the operation through the database.
+	CursorType          *CursorType    // Specifies the type of cursor to use
+	Hint                interface{}    // Specifies the index to use.
+	Max                 interface{}    // Sets an exclusive upper bound for a specific index
+	MaxAwaitTime        *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
+	MaxTime             *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Min                 interface{}    // Specifies the inclusive lower bound for a specific index.
+	NoCursorTimeout     *bool          // If true, prevents cursors from timing out after an inactivity period.
+	OplogReplay         *bool          // Adds an option for internal use only and should not be set.
+	Projection          interface{}    // Limits the fields returned for all documents.
+	ReturnKey           *bool          // If true, only returns index keys for all result documents.
+	ShowRecordID        *bool          // If true, a $recordId field with the record identifier will be added to the returned documents.
+	Skip                *int64         // Specifies the number of documents to skip before returning
+	Snapshot            *bool          // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
+	Sort                interface{}    // Specifies the order in which to return results.
+}
+
+// FindOne creates a new FindOneOptions instance.
+func FindOne() *FindOneOptions {
+	return &FindOneOptions{}
+}
+
+// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
+func (f *FindOneOptions) SetAllowPartialResults(b bool) *FindOneOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *FindOneOptions) SetBatchSize(i int32) *FindOneOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneOptions) SetCollation(collation *Collation) *FindOneOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment specifies a string to help trace the operation through the database.
+func (f *FindOneOptions) SetComment(comment string) *FindOneOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType specifes the type of cursor to use.
+func (f *FindOneOptions) SetCursorType(ct CursorType) *FindOneOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint specifies the index to use.
+func (f *FindOneOptions) SetHint(hint interface{}) *FindOneOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetMax specifies an exclusive upper bound for a specific index.
+func (f *FindOneOptions) SetMax(max interface{}) *FindOneOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
+// For server versions < 3.2, this option is ignored.
+func (f *FindOneOptions) SetMaxAwaitTime(d time.Duration) *FindOneOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneOptions) SetMaxTime(d time.Duration) *FindOneOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin specifies the inclusive lower bound for a specific index.
+func (f *FindOneOptions) SetMin(min interface{}) *FindOneOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+func (f *FindOneOptions) SetNoCursorTimeout(b bool) *FindOneOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay adds an option for internal use only and should not be set.
+func (f *FindOneOptions) SetOplogReplay(b bool) *FindOneOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneOptions) SetProjection(projection interface{}) *FindOneOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey adds an option to only return index keys for all result documents.
+func (f *FindOneOptions) SetReturnKey(b bool) *FindOneOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
+// If true, a $recordId field will be added to each returned document.
+func (f *FindOneOptions) SetShowRecordID(b bool) *FindOneOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+func (f *FindOneOptions) SetSkip(i int64) *FindOneOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
+func (f *FindOneOptions) SetSnapshot(b bool) *FindOneOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneOptions) SetSort(sort interface{}) *FindOneOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOneOptions combines the argued FindOneOptions into a single FindOneOptions in a last-one-wins fashion
+func MergeFindOneOptions(opts ...*FindOneOptions) *FindOneOptions {
+	fo := FindOne()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndReplaceOptions represent all possible options to the findOne() function.
+type FindOneAndReplaceOptions struct {
+	BypassDocumentValidation *bool           // If true, allows the write to opt out of document-level validation.
+	Collation                *Collation      // Specifies a collation to be used
+	MaxTime                  *time.Duration  // Specifies the maximum amount of time to allow the query to run.
+	Projection               interface{}     // Limits the fields returned for all documents.
+	ReturnDocument           *ReturnDocument // Specifies whether the original or updated document should be returned.
+	Sort                     interface{}     // Specifies the order in which to return results.
+	Upsert                   *bool           // If true, creates a a new document if no document matches the query.
+}
+
+// FindOneAndReplace creates a new FindOneAndReplaceOptions instance.
+func FindOneAndReplace() *FindOneAndReplaceOptions {
+	return &FindOneAndReplaceOptions{}
+}
+
+// SetBypassDocumentValidation specifies whether or not the write should opt out of document-level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (f *FindOneAndReplaceOptions) SetBypassDocumentValidation(b bool) *FindOneAndReplaceOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneAndReplaceOptions) SetCollation(collation *Collation) *FindOneAndReplaceOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndReplaceOptions) SetMaxTime(d time.Duration) *FindOneAndReplaceOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndReplaceOptions) SetProjection(projection interface{}) *FindOneAndReplaceOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument specifies whether the original or updated document should be returned.
+// If set to Before, the original document will be returned. If set to After, the updated document
+// will be returned.
+func (f *FindOneAndReplaceOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndReplaceOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndReplaceOptions) SetSort(sort interface{}) *FindOneAndReplaceOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (f *FindOneAndReplaceOptions) SetUpsert(b bool) *FindOneAndReplaceOptions {
+	f.Upsert = &b
+	return f
+}
+
+// MergeFindOneAndReplaceOptions combines the argued FindOneAndReplaceOptions into a single FindOneAndReplaceOptions in a last-one-wins fashion
+func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions {
+	fo := FindOneAndReplace()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndUpdateOptions represent all possible options to the findOne() function.
+type FindOneAndUpdateOptions struct {
+	ArrayFilters             *ArrayFilters   // A set of filters specifying to which array elements an update should apply.
+	BypassDocumentValidation *bool           // If true, allows the write to opt out of document-level validation.
+	Collation                *Collation      // Specifies a collation to be used
+	MaxTime                  *time.Duration  // Specifies the maximum amount of time to allow the query to run.
+	Projection               interface{}     // Limits the fields returned for all documents.
+	ReturnDocument           *ReturnDocument // Specifies whether the original or updated document should be returned.
+	Sort                     interface{}     // Specifies the order in which to return results.
+	Upsert                   *bool           // If true, creates a a new document if no document matches the query.
+}
+
+// FindOneAndUpdate creates a new FindOneAndUpdateOptions instance.
+func FindOneAndUpdate() *FindOneAndUpdateOptions {
+	return &FindOneAndUpdateOptions{}
+}
+
+// SetBypassDocumentValidation sets filters that specify to which array elements an update should apply.
+func (f *FindOneAndUpdateOptions) SetBypassDocumentValidation(b bool) *FindOneAndUpdateOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetArrayFilters specifies a set of filters, which
+func (f *FindOneAndUpdateOptions) SetArrayFilters(filters ArrayFilters) *FindOneAndUpdateOptions {
+	f.ArrayFilters = &filters
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneAndUpdateOptions) SetCollation(collation *Collation) *FindOneAndUpdateOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndUpdateOptions) SetMaxTime(d time.Duration) *FindOneAndUpdateOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndUpdateOptions) SetProjection(projection interface{}) *FindOneAndUpdateOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument specifies whether the original or updated document should be returned.
+// If set to Before, the original document will be returned. If set to After, the updated document
+// will be returned.
+func (f *FindOneAndUpdateOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndUpdateOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndUpdateOptions) SetSort(sort interface{}) *FindOneAndUpdateOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (f *FindOneAndUpdateOptions) SetUpsert(b bool) *FindOneAndUpdateOptions {
+	f.Upsert = &b
+	return f
+}
+
+// MergeFindOneAndUpdateOptions combines the argued FindOneAndUpdateOptions into a single FindOneAndUpdateOptions in a last-one-wins fashion
+func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions {
+	fo := FindOneAndUpdate()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ArrayFilters != nil {
+			fo.ArrayFilters = opt.ArrayFilters
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndDeleteOptions represent all possible options to the findOne() function.
+type FindOneAndDeleteOptions struct {
+	Collation  *Collation     // Specifies a collation to be used
+	MaxTime    *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Projection interface{}    // Limits the fields returned for all documents.
+	Sort       interface{}    // Specifies the order in which to return results.
+}
+
+// FindOneAndDelete creates a new FindOneAndDeleteOptions instance.
+func FindOneAndDelete() *FindOneAndDeleteOptions {
+	return &FindOneAndDeleteOptions{}
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+// Valid for server versions >= 3.4
+func (f *FindOneAndDeleteOptions) SetCollation(collation *Collation) *FindOneAndDeleteOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndDeleteOptions) SetMaxTime(d time.Duration) *FindOneAndDeleteOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndDeleteOptions) SetProjection(projection interface{}) *FindOneAndDeleteOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndDeleteOptions) SetSort(sort interface{}) *FindOneAndDeleteOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOneAndDeleteOptions combines the argued FindOneAndDeleteOptions into a single FindOneAndDeleteOptions in a last-one-wins fashion
+func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions {
+	fo := FindOneAndDelete()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go
new file mode 100644
index 0000000..232a1c8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go
@@ -0,0 +1,268 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// DefaultName is the default name for a GridFS bucket.
+var DefaultName = "fs"
+
+// DefaultChunkSize is the default size of each file chunk in bytes.
+var DefaultChunkSize int32 = 255 * 1000
+
+// DefaultRevision is the default revision number for a download by name operation.
+var DefaultRevision int32 = -1
+
+// BucketOptions represents all possible options to configure a GridFS bucket.
+type BucketOptions struct {
+	Name           *string                    // The bucket name. Defaults to "fs".
+	ChunkSizeBytes *int32                     // The chunk size in bytes. Defaults to 255KB.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for the bucket. Defaults to the write concern of the database.
+	ReadConcern    *readconcern.ReadConcern   // The read concern for the bucket. Defaults to the read concern of the database.
+	ReadPreference *readpref.ReadPref         // The read preference for the bucket. Defaults to the read preference of the database.
+}
+
+// GridFSBucket creates a new *BucketOptions
+func GridFSBucket() *BucketOptions {
+	return &BucketOptions{
+		Name:           &DefaultName,
+		ChunkSizeBytes: &DefaultChunkSize,
+	}
+}
+
+// SetName sets the name for the bucket. Defaults to "fs" if not set.
+func (b *BucketOptions) SetName(name string) *BucketOptions {
+	b.Name = &name
+	return b
+}
+
+// SetChunkSizeBytes sets the chunk size in bytes for the bucket. Defaults to 255KB if not set.
+func (b *BucketOptions) SetChunkSizeBytes(i int32) *BucketOptions {
+	b.ChunkSizeBytes = &i
+	return b
+}
+
+// SetWriteConcern sets the write concern for the bucket.
+func (b *BucketOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *BucketOptions {
+	b.WriteConcern = wc
+	return b
+}
+
+// SetReadConcern sets the read concern for the bucket.
+func (b *BucketOptions) SetReadConcern(rc *readconcern.ReadConcern) *BucketOptions {
+	b.ReadConcern = rc
+	return b
+}
+
+// SetReadPreference sets the read preference for the bucket.
+func (b *BucketOptions) SetReadPreference(rp *readpref.ReadPref) *BucketOptions {
+	b.ReadPreference = rp
+	return b
+}
+
+// MergeBucketOptions combines the given *BucketOptions into a single *BucketOptions.
+// If the name or chunk size is not set in any of the given *BucketOptions, the resulting *BucketOptions will have
+// name "fs" and chunk size 255KB.
+func MergeBucketOptions(opts ...*BucketOptions) *BucketOptions {
+	b := GridFSBucket()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Name != nil {
+			b.Name = opt.Name
+		}
+		if opt.ChunkSizeBytes != nil {
+			b.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.WriteConcern != nil {
+			b.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadConcern != nil {
+			b.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			b.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return b
+}
+
+// UploadOptions represents all possible options for a GridFS upload operation.
+type UploadOptions struct {
+	ChunkSizeBytes *int32    // Chunk size in bytes. Defaults to the chunk size of the bucket.
+	Metadata       bsonx.Doc // User data for the 'metadata' field of the files collection document.
+}
+
+// GridFSUpload creates a new *UploadOptions
+func GridFSUpload() *UploadOptions {
+	return &UploadOptions{}
+}
+
+// SetChunkSizeBytes sets the chunk size in bytes for the upload. Defaults to 255KB if not set.
+func (u *UploadOptions) SetChunkSizeBytes(i int32) *UploadOptions {
+	u.ChunkSizeBytes = &i
+	return u
+}
+
+// SetMetadata specfies the metadata for the upload.
+func (u *UploadOptions) SetMetadata(doc bsonx.Doc) *UploadOptions {
+	u.Metadata = doc
+	return u
+}
+
+// MergeUploadOptions combines the given *UploadOptions into a single *UploadOptions.
+// If the chunk size is not set in any of the given *UploadOptions, the resulting *UploadOptions will have chunk size
+// 255KB.
+func MergeUploadOptions(opts ...*UploadOptions) *UploadOptions {
+	u := GridFSUpload()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ChunkSizeBytes != nil {
+			u.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.Metadata != nil {
+			u.Metadata = opt.Metadata
+		}
+	}
+
+	return u
+}
+
+// NameOptions represents all options that can be used for a GridFS download by name operation.
+type NameOptions struct {
+	Revision *int32 // Which revision (documents with the same filename and different uploadDate). Defaults to -1 (the most recent revision).
+}
+
+// GridFSName creates a new *NameOptions
+func GridFSName() *NameOptions {
+	return &NameOptions{}
+}
+
+// SetRevision specifies which revision of the file to retrieve. Defaults to -1.
+// * Revision numbers are defined as follows:
+// * 0 = the original stored file
+// * 1 = the first revision
+// * 2 = the second revision
+// * etc…
+// * -2 = the second most recent revision
+// * -1 = the most recent revision
+func (n *NameOptions) SetRevision(r int32) *NameOptions {
+	n.Revision = &r
+	return n
+}
+
+// MergeNameOptions combines the given *NameOptions into a single *NameOptions in a last one wins fashion.
+func MergeNameOptions(opts ...*NameOptions) *NameOptions {
+	n := GridFSName()
+	n.Revision = &DefaultRevision
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Revision != nil {
+			n.Revision = opt.Revision
+		}
+	}
+
+	return n
+}
+
+// GridFSFindOptions represents all options for a GridFS find operation.
+type GridFSFindOptions struct {
+	BatchSize       *int32
+	Limit           *int32
+	MaxTime         *time.Duration
+	NoCursorTimeout *bool
+	Skip            *int32
+	Sort            interface{}
+}
+
+// GridFSFind creates a new GridFSFindOptions instance.
+func GridFSFind() *GridFSFindOptions {
+	return &GridFSFindOptions{}
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *GridFSFindOptions) SetBatchSize(i int32) *GridFSFindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetLimit specifies a limit on the number of results.
+// A negative limit implies that only 1 batch should be returned.
+func (f *GridFSFindOptions) SetLimit(i int32) *GridFSFindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *GridFSFindOptions) SetMaxTime(d time.Duration) *GridFSFindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+func (f *GridFSFindOptions) SetNoCursorTimeout(b bool) *GridFSFindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+func (f *GridFSFindOptions) SetSkip(i int32) *GridFSFindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *GridFSFindOptions) SetSort(sort interface{}) *GridFSFindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeGridFSFindOptions combines the argued GridFSFindOptions into a single GridFSFindOptions in a last-one-wins fashion
+func MergeGridFSFindOptions(opts ...*GridFSFindOptions) *GridFSFindOptions {
+	fo := GridFSFind()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go
new file mode 100644
index 0000000..2fda698
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go
@@ -0,0 +1,326 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// CreateIndexesOptions represents all possible options for the create() function.
+type CreateIndexesOptions struct {
+	MaxTime *time.Duration // The maximum amount of time to allow the query to run.
+}
+
+// CreateIndexes creates a new CreateIndexesOptions instance.
+func CreateIndexes() *CreateIndexesOptions {
+	return &CreateIndexesOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (c *CreateIndexesOptions) SetMaxTime(d time.Duration) *CreateIndexesOptions {
+	c.MaxTime = &d
+	return c
+}
+
+// MergeCreateIndexesOptions combines the given *CreateIndexesOptions into a single *CreateIndexesOptions in a last one
+// wins fashion.
+func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions {
+	c := CreateIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// DropIndexesOptions represents all possible options for the create() function.
+type DropIndexesOptions struct {
+	MaxTime *time.Duration
+}
+
+// DropIndexes creates a new DropIndexesOptions instance.
+func DropIndexes() *DropIndexesOptions {
+	return &DropIndexesOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOptions {
+	d.MaxTime = &duration
+	return d
+}
+
+// MergeDropIndexesOptions combines the given *DropIndexesOptions into a single *DropIndexesOptions in a last one
+// wins fashion.
+func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions {
+	c := DropIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// ListIndexesOptions represents all possible options for the create() function.
+type ListIndexesOptions struct {
+	BatchSize *int32
+	MaxTime   *time.Duration
+}
+
+// ListIndexes creates a new ListIndexesOptions instance.
+func ListIndexes() *ListIndexesOptions {
+	return &ListIndexesOptions{}
+}
+
+// SetBatchSize specifies the number of documents to return in every batch.
+func (l *ListIndexesOptions) SetBatchSize(i int32) *ListIndexesOptions {
+	l.BatchSize = &i
+	return l
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions {
+	l.MaxTime = &d
+	return l
+}
+
+// MergeListIndexesOptions combines the given *ListIndexesOptions into a single *ListIndexesOptions in a last one
+// wins fashion.
+func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions {
+	c := ListIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// IndexOptions represents all possible options to configure a new index.
+type IndexOptions struct {
+	Background              *bool
+	ExpireAfterSeconds      *int32
+	Name                    *string
+	Sparse                  *bool
+	StorageEngine           interface{}
+	Unique                  *bool
+	Version                 *int32
+	DefaultLanguage         *string
+	LanguageOverride        *string
+	TextVersion             *int32
+	Weights                 interface{}
+	SphereVersion           *int32
+	Bits                    *int32
+	Max                     *float64
+	Min                     *float64
+	BucketSize              *int32
+	PartialFilterExpression interface{}
+	Collation               *Collation
+}
+
+// Index creates a new *IndexOptions
+func Index() *IndexOptions {
+	return &IndexOptions{}
+}
+
+// SetBackground sets the background option. If true, the server will create the index in the background and not block
+// other tasks
+func (i *IndexOptions) SetBackground(background bool) *IndexOptions {
+	i.Background = &background
+	return i
+}
+
+// SetExpireAfterSeconds specifies the number of seconds for a document to remain in a collection.
+func (i *IndexOptions) SetExpireAfterSeconds(seconds int32) *IndexOptions {
+	i.ExpireAfterSeconds = &seconds
+	return i
+}
+
+// SetName specifies a name for the index.
+// If not set, a name will be generated in the format "[field]_[direction]".
+// If multiple indexes are created for the same key pattern with different collations, a name must be provided to avoid
+// ambiguity.
+func (i *IndexOptions) SetName(name string) *IndexOptions {
+	i.Name = &name
+	return i
+}
+
+// SetSparse sets the sparse option.
+// If true, the index will only reference documents with the specified field in the index.
+func (i *IndexOptions) SetSparse(sparse bool) *IndexOptions {
+	i.Sparse = &sparse
+	return i
+}
+
+// SetStorageEngine specifies the storage engine to use.
+// Valid for server versions >= 3.0
+func (i *IndexOptions) SetStorageEngine(engine interface{}) *IndexOptions {
+	i.StorageEngine = engine
+	return i
+}
+
+// SetUnique forces the index to be unique.
+func (i *IndexOptions) SetUnique(unique bool) *IndexOptions {
+	i.Unique = &unique
+	return i
+}
+
+// SetVersion specifies the index version number, either 0 or 1.
+func (i *IndexOptions) SetVersion(version int32) *IndexOptions {
+	i.Version = &version
+	return i
+}
+
+// SetDefaultLanguage specifies the default language for text indexes.
+// If not set, this will default to english.
+func (i *IndexOptions) SetDefaultLanguage(language string) *IndexOptions {
+	i.DefaultLanguage = &language
+	return i
+}
+
+// SetLanguageOverride specifies the field in the document to override the language.
+func (i *IndexOptions) SetLanguageOverride(override string) *IndexOptions {
+	i.LanguageOverride = &override
+	return i
+}
+
+// SetTextVersion specifies the text index version number.
+// MongoDB version 2.4 can only support version 1.
+// MongoDB versions 2.6 and higher can support versions 1 or 2.
+func (i *IndexOptions) SetTextVersion(version int32) *IndexOptions {
+	i.TextVersion = &version
+	return i
+}
+
+// SetWeights specifies fields in the index and their corresponding weight values.
+func (i *IndexOptions) SetWeights(weights interface{}) *IndexOptions {
+	i.Weights = weights
+	return i
+}
+
+// SetSphereVersion specifies the 2dsphere index version number.
+// MongoDB version 2.4 can only support version 1.
+// MongoDB versions 2.6 and higher can support versions 1 or 2.
+func (i *IndexOptions) SetSphereVersion(version int32) *IndexOptions {
+	i.SphereVersion = &version
+	return i
+}
+
+// SetBits specifies the precision of the stored geo hash in the 2d index, from 1 to 32.
+func (i *IndexOptions) SetBits(bits int32) *IndexOptions {
+	i.Bits = &bits
+	return i
+}
+
+// SetMax specifies the maximum boundary for latitude and longitude in the 2d index.
+func (i *IndexOptions) SetMax(max float64) *IndexOptions {
+	i.Max = &max
+	return i
+}
+
+// SetMin specifies the minimum boundary for latitude and longitude in the 2d index.
+func (i *IndexOptions) SetMin(min float64) *IndexOptions {
+	i.Min = &min
+	return i
+}
+
+// SetBucketSize specifies number of units within which to group the location values in a geo haystack index.
+func (i *IndexOptions) SetBucketSize(bucketSize int32) *IndexOptions {
+	i.BucketSize = &bucketSize
+	return i
+}
+
+// SetPartialFilterExpression specifies a filter for use in a partial index. Only documents that match the filter
+// expression are included in the index.
+func (i *IndexOptions) SetPartialFilterExpression(expression interface{}) *IndexOptions {
+	i.PartialFilterExpression = expression
+	return i
+}
+
+// SetCollation specifies a Collation to use for the operation.
+// Valid for server versions >= 3.4
+func (i *IndexOptions) SetCollation(collation *Collation) *IndexOptions {
+	i.Collation = collation
+	return i
+}
+
+// MergeIndexOptions combines the given *IndexOptions into a single *IndexOptions in a last one wins fashion.
+func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions {
+	i := Index()
+
+	for _, opt := range opts {
+		if opt.Background != nil {
+			i.Background = opt.Background
+		}
+		if opt.ExpireAfterSeconds != nil {
+			i.ExpireAfterSeconds = opt.ExpireAfterSeconds
+		}
+		if opt.Name != nil {
+			i.Name = opt.Name
+		}
+		if opt.Sparse != nil {
+			i.Sparse = opt.Sparse
+		}
+		if opt.StorageEngine != nil {
+			i.StorageEngine = opt.StorageEngine
+		}
+		if opt.Unique != nil {
+			i.Unique = opt.Unique
+		}
+		if opt.Version != nil {
+			i.Version = opt.Version
+		}
+		if opt.DefaultLanguage != nil {
+			i.DefaultLanguage = opt.DefaultLanguage
+		}
+		if opt.LanguageOverride != nil {
+			i.LanguageOverride = opt.LanguageOverride
+		}
+		if opt.TextVersion != nil {
+			i.TextVersion = opt.TextVersion
+		}
+		if opt.Weights != nil {
+			i.Weights = opt.Weights
+		}
+		if opt.SphereVersion != nil {
+			i.SphereVersion = opt.SphereVersion
+		}
+		if opt.Bits != nil {
+			i.Bits = opt.Bits
+		}
+		if opt.Max != nil {
+			i.Max = opt.Max
+		}
+		if opt.Min != nil {
+			i.Min = opt.Min
+		}
+		if opt.BucketSize != nil {
+			i.BucketSize = opt.BucketSize
+		}
+		if opt.PartialFilterExpression != nil {
+			i.PartialFilterExpression = opt.PartialFilterExpression
+		}
+		if opt.Collation != nil {
+			i.Collation = opt.Collation
+		}
+	}
+
+	return i
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go
new file mode 100644
index 0000000..064ede3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go
@@ -0,0 +1,84 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// InsertOneOptions represents all possible options to the insertOne()
+type InsertOneOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
+}
+
+// InsertOne returns a pointer to a new InsertOneOptions
+func InsertOne() *InsertOneOptions {
+	return &InsertOneOptions{}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ioo *InsertOneOptions) SetBypassDocumentValidation(b bool) *InsertOneOptions {
+	ioo.BypassDocumentValidation = &b
+	return ioo
+}
+
+// MergeInsertOneOptions combines the argued InsertOneOptions into a single InsertOneOptions in a last-one-wins fashion
+func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions {
+	ioOpts := InsertOne()
+	for _, ioo := range opts {
+		if ioo == nil {
+			continue
+		}
+		if ioo.BypassDocumentValidation != nil {
+			ioOpts.BypassDocumentValidation = ioo.BypassDocumentValidation
+		}
+	}
+
+	return ioOpts
+}
+
+// InsertManyOptions represents all possible options to the insertMany()
+type InsertManyOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
+	Ordered                  *bool // If true, when an insert fails, return without performing the remaining inserts. Defaults to true.
+}
+
+// InsertMany returns a pointer to a new InsertManyOptions
+func InsertMany() *InsertManyOptions {
+	return &InsertManyOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (imo *InsertManyOptions) SetBypassDocumentValidation(b bool) *InsertManyOptions {
+	imo.BypassDocumentValidation = &b
+	return imo
+}
+
+// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
+// remaining writes. Defaults to true.
+func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions {
+	imo.Ordered = &b
+	return imo
+}
+
+// MergeInsertManyOptions combines the argued InsertManyOptions into a single InsertManyOptions in a last-one-wins fashion
+func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions {
+	imOpts := InsertMany()
+	for _, imo := range opts {
+		if imo == nil {
+			continue
+		}
+		if imo.BypassDocumentValidation != nil {
+			imOpts.BypassDocumentValidation = imo.BypassDocumentValidation
+		}
+		if imo.Ordered != nil {
+			imOpts.Ordered = imo.Ordered
+		}
+	}
+
+	return imOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go
new file mode 100644
index 0000000..e44ad4a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListCollectionsOptions represents all possible options for a listCollections command.
+type ListCollectionsOptions struct {
+	NameOnly *bool // If true, only the collection names will be returned.
+}
+
+// ListCollections creates a new *ListCollectionsOptions
+func ListCollections() *ListCollectionsOptions {
+	return &ListCollectionsOptions{}
+}
+
+// SetNameOnly specifies whether to return only the collection names.
+func (lc *ListCollectionsOptions) SetNameOnly(b bool) *ListCollectionsOptions {
+	lc.NameOnly = &b
+	return lc
+}
+
+// MergeListCollectionsOptions combines the given *ListCollectionsOptions into a single *ListCollectionsOptions in a
+// last one wins fashion.
+func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions {
+	lc := ListCollections()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			lc.NameOnly = opt.NameOnly
+		}
+	}
+
+	return lc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go
new file mode 100644
index 0000000..5efb6e8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListDatabasesOptions represents all possible options for a listDatabases command.
+type ListDatabasesOptions struct {
+	NameOnly *bool // If true, only the database names will be returned.
+}
+
+// ListDatabases creates a new *ListDatabasesOptions
+func ListDatabases() *ListDatabasesOptions {
+	return &ListDatabasesOptions{}
+}
+
+// SetNameOnly specifies whether to return only the database names.
+func (ld *ListDatabasesOptions) SetNameOnly(b bool) *ListDatabasesOptions {
+	ld.NameOnly = &b
+	return ld
+}
+
+// MergeListDatabasesOptions combines the given *ListDatabasesOptions into a single *ListDatabasesOptions in a last one
+// wins fashion.
+func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions {
+	ld := ListDatabases()
+	for _, opt := range opts {
+		if opts == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			ld.NameOnly = opt.NameOnly
+		}
+	}
+
+	return ld
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go
new file mode 100644
index 0000000..22f383b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go
@@ -0,0 +1,163 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Collation allows users to specify language-specific rules for string comparison, such as
+// rules for lettercase and accent marks.
+type Collation struct {
+	Locale          string `bson:",omitempty"` // The locale
+	CaseLevel       bool   `bson:",omitempty"` // The case level
+	CaseFirst       string `bson:",omitempty"` // The case ordering
+	Strength        int    `bson:",omitempty"` // The number of comparision levels to use
+	NumericOrdering bool   `bson:",omitempty"` // Whether to order numbers based on numerical order and not collation order
+	Alternate       string `bson:",omitempty"` // Whether spaces and punctuation are considered base characters
+	MaxVariable     string `bson:",omitempty"` // Which characters are affected by alternate: "shifted"
+	Normalization   bool   `bson:",omitempty"` // Causes text to be normalized into Unicode NFD
+	Backwards       bool   `bson:",omitempty"` // Causes secondary differences to be considered in reverse order, as it is done in the French language
+}
+
+// ToDocument converts the Collation to a *bsonx.Document
+func (co *Collation) ToDocument() bsonx.Doc {
+	doc := bsonx.Doc{}
+	if co.Locale != "" {
+		doc = append(doc, bsonx.Elem{"locale", bsonx.String(co.Locale)})
+	}
+	if co.CaseLevel {
+		doc = append(doc, bsonx.Elem{"caseLevel", bsonx.Boolean(true)})
+	}
+	if co.CaseFirst != "" {
+		doc = append(doc, bsonx.Elem{"caseFirst", bsonx.String(co.CaseFirst)})
+	}
+	if co.Strength != 0 {
+		doc = append(doc, bsonx.Elem{"strength", bsonx.Int32(int32(co.Strength))})
+	}
+	if co.NumericOrdering {
+		doc = append(doc, bsonx.Elem{"numericOrdering", bsonx.Boolean(true)})
+	}
+	if co.Alternate != "" {
+		doc = append(doc, bsonx.Elem{"alternate", bsonx.String(co.Alternate)})
+	}
+	if co.MaxVariable != "" {
+		doc = append(doc, bsonx.Elem{"maxVariable", bsonx.String(co.MaxVariable)})
+	}
+	if co.Normalization {
+		doc = append(doc, bsonx.Elem{"normalization", bsonx.Boolean(co.Normalization)})
+	}
+	if co.Backwards {
+		doc = append(doc, bsonx.Elem{"backwards", bsonx.Boolean(true)})
+	}
+	return doc
+}
+
+// CursorType specifies whether a cursor should close when the last data is retrieved. See
+// NonTailable, Tailable, and TailableAwait.
+type CursorType int8
+
+const (
+	// NonTailable specifies that a cursor should close after retrieving the last data.
+	NonTailable CursorType = iota
+	// Tailable specifies that a cursor should not close when the last data is retrieved and can be resumed later.
+	Tailable
+	// TailableAwait specifies that a cursor should not close when the last data is retrieved and
+	// that it should block for a certain amount of time for new data before returning no data.
+	TailableAwait
+)
+
+// ReturnDocument specifies whether a findAndUpdate operation should return the document as it was
+// before the update or as it is after the update.
+type ReturnDocument int8
+
+const (
+	// Before specifies that findAndUpdate should return the document as it was before the update.
+	Before ReturnDocument = iota
+	// After specifies that findAndUpdate should return the document as it is after the update.
+	After
+)
+
+// FullDocument specifies whether a change stream should include a copy of the entire document that was changed from
+// some time after the change occurred.
+type FullDocument string
+
+const (
+	// Default does not include a document copy
+	Default FullDocument = "default"
+	// UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that
+	// was changed
+	UpdateLookup FullDocument = "updateLookup"
+)
+
+// ArrayFilters is used to hold filters for the array filters CRUD option. If a registry is nil, bson.DefaultRegistry
+// will be used when converting the filter interfaces to BSON.
+type ArrayFilters struct {
+	Registry *bsoncodec.Registry // The registry to use for converting filters. Defaults to bson.DefaultRegistry.
+	Filters  []interface{}       // The filters to apply
+}
+
+// ToArray builds a bsonx.Arr from the provided ArrayFilters.
+func (af *ArrayFilters) ToArray() (bsonx.Arr, error) {
+	docs := make([]bsonx.Doc, 0, len(af.Filters))
+	for _, f := range af.Filters {
+		d, err := transformDocument(af.Registry, f)
+		if err != nil {
+			return nil, err
+		}
+		docs = append(docs, d)
+	}
+
+	arr := bsonx.Arr{}
+	for _, doc := range docs {
+		arr = append(arr, bsonx.Document(doc))
+	}
+
+	return arr, nil
+}
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a *bsonx.Document", reflect.TypeOf(me.Value))
+}
+
+var defaultRegistry = bson.DefaultRegistry
+
+func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) {
+	if val == nil {
+		return bsonx.Doc{}, nil
+	}
+	reg := defaultRegistry
+	if registry != nil {
+		reg = registry
+	}
+
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(reg, buf, val)
+	if err != nil {
+		return nil, MarshalError{Value: val, Err: err}
+	}
+	return bsonx.ReadDoc(b)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go
new file mode 100644
index 0000000..7a8c2ba
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ReplaceOptions represents all possible options to the replaceOne() function
+type ReplaceOptions struct {
+	BypassDocumentValidation *bool      // If true, allows the write to opt-out of document level validation
+	Collation                *Collation // Specifies a collation
+	Upsert                   *bool      // When true, creates a new document if no document matches the query
+}
+
+// Replace returns a pointer to a new ReplaceOptions
+func Replace() *ReplaceOptions {
+	return &ReplaceOptions{}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ro *ReplaceOptions) SetBypassDocumentValidation(b bool) *ReplaceOptions {
+	ro.BypassDocumentValidation = &b
+	return ro
+}
+
+// SetCollation specifies a collation.
+// Valid for servers >= 3.4
+func (ro *ReplaceOptions) SetCollation(c *Collation) *ReplaceOptions {
+	ro.Collation = c
+	return ro
+}
+
+// SetUpsert allows the creation of a new document if not document matches the query
+func (ro *ReplaceOptions) SetUpsert(b bool) *ReplaceOptions {
+	ro.Upsert = &b
+	return ro
+}
+
+// MergeReplaceOptions combines the argued ReplaceOptions into a single ReplaceOptions in a last-one-wins fashion
+func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions {
+	rOpts := Replace()
+	for _, ro := range opts {
+		if ro == nil {
+			continue
+		}
+		if ro.BypassDocumentValidation != nil {
+			rOpts.BypassDocumentValidation = ro.BypassDocumentValidation
+		}
+		if ro.Collation != nil {
+			rOpts.Collation = ro.Collation
+		}
+		if ro.Upsert != nil {
+			rOpts.Upsert = ro.Upsert
+		}
+	}
+
+	return rOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go
new file mode 100644
index 0000000..c7c696d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "github.com/mongodb/mongo-go-driver/mongo/readpref"
+
+// RunCmdOptions represents all possible options for a runCommand operation.
+type RunCmdOptions struct {
+	ReadPreference *readpref.ReadPref // The read preference for the operation.
+}
+
+// RunCmd creates a new *RunCmdOptions
+func RunCmd() *RunCmdOptions {
+	return &RunCmdOptions{}
+}
+
+// SetReadPreference sets the read preference for the operation.
+func (rc *RunCmdOptions) SetReadPreference(rp *readpref.ReadPref) *RunCmdOptions {
+	rc.ReadPreference = rp
+	return rc
+}
+
+// MergeRunCmdOptions combines the given *RunCmdOptions into one *RunCmdOptions in a last one wins fashion.
+func MergeRunCmdOptions(opts ...*RunCmdOptions) *RunCmdOptions {
+	rc := RunCmd()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadPreference != nil {
+			rc.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return rc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go
new file mode 100644
index 0000000..ffe45e6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// DefaultCausalConsistency is the default value for the CausalConsistency option.
+var DefaultCausalConsistency = true
+
+// SessionOptions represents all possible options for creating a new session.
+type SessionOptions struct {
+	CausalConsistency     *bool                      // Specifies if reads should be causally consistent. Defaults to true.
+	DefaultReadConcern    *readconcern.ReadConcern   // The default read concern for transactions started in the session.
+	DefaultReadPreference *readpref.ReadPref         // The default read preference for transactions started in the session.
+	DefaultWriteConcern   *writeconcern.WriteConcern // The default write concern for transactions started in the session.
+}
+
+// Session creates a new *SessionOptions
+func Session() *SessionOptions {
+	return &SessionOptions{
+		CausalConsistency: &DefaultCausalConsistency,
+	}
+}
+
+// SetCausalConsistency specifies if a session should be causally consistent. Defaults to true.
+func (s *SessionOptions) SetCausalConsistency(b bool) *SessionOptions {
+	s.CausalConsistency = &b
+	return s
+}
+
+// SetDefaultReadConcern sets the default read concern for transactions started in a session.
+func (s *SessionOptions) SetDefaultReadConcern(rc *readconcern.ReadConcern) *SessionOptions {
+	s.DefaultReadConcern = rc
+	return s
+}
+
+// SetDefaultReadPreference sets the default read preference for transactions started in a session.
+func (s *SessionOptions) SetDefaultReadPreference(rp *readpref.ReadPref) *SessionOptions {
+	s.DefaultReadPreference = rp
+	return s
+}
+
+// SetDefaultWriteConcern sets the default write concern for transactions started in a session.
+func (s *SessionOptions) SetDefaultWriteConcern(wc *writeconcern.WriteConcern) *SessionOptions {
+	s.DefaultWriteConcern = wc
+	return s
+}
+
+// MergeSessionOptions combines the given *SessionOptions into a single *SessionOptions in a last one wins fashion.
+func MergeSessionOptions(opts ...*SessionOptions) *SessionOptions {
+	s := Session()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			s.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			s.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			s.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			s.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+	}
+
+	return s
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go
new file mode 100644
index 0000000..5aec1b9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// TransactionOptions represents all possible options for starting a transaction.
+type TransactionOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for the transaction. Defaults to the session's read concern.
+	ReadPreference *readpref.ReadPref         // The read preference for the transaction. Defaults to the session's read preference.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for the transaction. Defaults to the session's write concern.
+}
+
+// Transaction creates a new *TransactionOptions
+func Transaction() *TransactionOptions {
+	return &TransactionOptions{}
+}
+
+// SetReadConcern sets the read concern for the transaction.
+func (t *TransactionOptions) SetReadConcern(rc *readconcern.ReadConcern) *TransactionOptions {
+	t.ReadConcern = rc
+	return t
+}
+
+// SetReadPreference sets the read preference for the transaction.
+func (t *TransactionOptions) SetReadPreference(rp *readpref.ReadPref) *TransactionOptions {
+	t.ReadPreference = rp
+	return t
+}
+
+// SetWriteConcern sets the write concern for the transaction.
+func (t *TransactionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *TransactionOptions {
+	t.WriteConcern = wc
+	return t
+}
+
+// MergeTransactionOptions combines the given *TransactionOptions into a single *TransactionOptions in a last one wins
+// fashion.
+func MergeTransactionOptions(opts ...*TransactionOptions) *TransactionOptions {
+	t := Transaction()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			t.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			t.ReadPreference = opt.ReadPreference
+		}
+		if opt.WriteConcern != nil {
+			t.WriteConcern = opt.WriteConcern
+		}
+	}
+
+	return t
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go
new file mode 100644
index 0000000..468ccda
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go
@@ -0,0 +1,71 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// UpdateOptions represents all possible options to the updateOne() and updateMany() functions
+type UpdateOptions struct {
+	ArrayFilters             *ArrayFilters // A set of filters specifying to which array elements an update should apply
+	BypassDocumentValidation *bool         // If true, allows the write to opt-out of document level validation
+	Collation                *Collation    // Specifies a collation
+	Upsert                   *bool         // When true, creates a new document if no document matches the query
+}
+
+// Update returns a pointer to a new UpdateOptions
+func Update() *UpdateOptions {
+	return &UpdateOptions{}
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply
+// Valid for server versions >= 3.6.
+func (uo *UpdateOptions) SetArrayFilters(af ArrayFilters) *UpdateOptions {
+	uo.ArrayFilters = &af
+	return uo
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (uo *UpdateOptions) SetBypassDocumentValidation(b bool) *UpdateOptions {
+	uo.BypassDocumentValidation = &b
+	return uo
+}
+
+// SetCollation specifies a collation.
+// Valid for server versions >= 3.4.
+func (uo *UpdateOptions) SetCollation(c *Collation) *UpdateOptions {
+	uo.Collation = c
+	return uo
+}
+
+// SetUpsert allows the creation of a new document if not document matches the query
+func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions {
+	uo.Upsert = &b
+	return uo
+}
+
+// MergeUpdateOptions combines the argued UpdateOptions into a single UpdateOptions in a last-one-wins fashion
+func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
+	uOpts := Update()
+	for _, uo := range opts {
+		if uo == nil {
+			continue
+		}
+		if uo.ArrayFilters != nil {
+			uOpts.ArrayFilters = uo.ArrayFilters
+		}
+		if uo.BypassDocumentValidation != nil {
+			uOpts.BypassDocumentValidation = uo.BypassDocumentValidation
+		}
+		if uo.Collation != nil {
+			uOpts.Collation = uo.Collation
+		}
+		if uo.Upsert != nil {
+			uOpts.Upsert = uo.Upsert
+		}
+	}
+
+	return uOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go
new file mode 100644
index 0000000..c185fff
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readconcern
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ReadConcern for replica sets and replica set shards determines which data to return from a query.
+type ReadConcern struct {
+	level string
+}
+
+// Option is an option to provide when creating a ReadConcern.
+type Option func(concern *ReadConcern)
+
+// Level creates an option that sets the level of a ReadConcern.
+func Level(level string) Option {
+	return func(concern *ReadConcern) {
+		concern.level = level
+	}
+}
+
+// Local specifies that the query should return the instance’s most recent data.
+func Local() *ReadConcern {
+	return New(Level("local"))
+}
+
+// Majority specifies that the query should return the instance’s most recent data acknowledged as
+// having been written to a majority of members in the replica set.
+func Majority() *ReadConcern {
+	return New(Level("majority"))
+}
+
+// Linearizable specifies that the query should return data that reflects all successful writes
+// issued with a write concern of "majority" and acknowledged prior to the start of the read operation.
+func Linearizable() *ReadConcern {
+	return New(Level("linearizable"))
+}
+
+// Available specifies that the query should return data from the instance with no guarantee
+// that the data has been written to a majority of the replica set members (i.e. may be rolled back).
+func Available() *ReadConcern {
+	return New(Level("available"))
+}
+
+// Snapshot is only available for operations within multi-document transactions.
+func Snapshot() *ReadConcern {
+	return New(Level("snapshot"))
+}
+
+// New constructs a new read concern from the given string.
+func New(options ...Option) *ReadConcern {
+	concern := &ReadConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+func (rc *ReadConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	var elems []byte
+
+	if len(rc.level) > 0 {
+		elems = bsoncore.AppendStringElement(elems, "level", rc.level)
+	}
+
+	return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go
new file mode 100644
index 0000000..e7030c6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go
@@ -0,0 +1,56 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Mode indicates the user's preference on reads.
+type Mode uint8
+
+// Mode constants
+const (
+	_ Mode = iota
+	// PrimaryMode indicates that only a primary is
+	// considered for reading. This is the default
+	// mode.
+	PrimaryMode
+	// PrimaryPreferredMode indicates that if a primary
+	// is available, use it; otherwise, eligible
+	// secondaries will be considered.
+	PrimaryPreferredMode
+	// SecondaryMode indicates that only secondaries
+	// should be considered.
+	SecondaryMode
+	// SecondaryPreferredMode indicates that only secondaries
+	// should be considered when one is available. If none
+	// are available, then a primary will be considered.
+	SecondaryPreferredMode
+	// NearestMode indicates that all primaries and secondaries
+	// will be considered.
+	NearestMode
+)
+
+// ModeFromString returns a mode corresponding to
+// mode.
+func ModeFromString(mode string) (Mode, error) {
+	switch strings.ToLower(mode) {
+	case "primary":
+		return PrimaryMode, nil
+	case "primarypreferred":
+		return PrimaryPreferredMode, nil
+	case "secondary":
+		return SecondaryMode, nil
+	case "secondarypreferred":
+		return SecondaryPreferredMode, nil
+	case "nearest":
+		return NearestMode, nil
+	}
+	return Mode(0), fmt.Errorf("unknown read preference %v", mode)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go
new file mode 100644
index 0000000..a81cf3e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+// ErrInvalidTagSet indicates that an invalid set of tags was specified.
+var ErrInvalidTagSet = errors.New("an even number of tags must be specified")
+
+// Option configures a read preference
+type Option func(*ReadPref) error
+
+// WithMaxStaleness sets the maximum staleness a
+// server is allowed.
+func WithMaxStaleness(ms time.Duration) Option {
+	return func(rp *ReadPref) error {
+		rp.maxStaleness = ms
+		rp.maxStalenessSet = true
+		return nil
+	}
+}
+
+// WithTags sets a single tag set used to match
+// a server. The last call to WithTags or WithTagSets
+// overrides all previous calls to either method.
+func WithTags(tags ...string) Option {
+	return func(rp *ReadPref) error {
+		length := len(tags)
+		if length < 2 || length%2 != 0 {
+			return ErrInvalidTagSet
+		}
+
+		tagset := make(tag.Set, 0, length/2)
+
+		for i := 1; i < length; i += 2 {
+			tagset = append(tagset, tag.Tag{Name: tags[i-1], Value: tags[i]})
+		}
+
+		return WithTagSets(tagset)(rp)
+	}
+}
+
+// WithTagSets sets the tag sets used to match
+// a server. The last call to WithTags or WithTagSets
+// overrides all previous calls to either method.
+func WithTagSets(tagSets ...tag.Set) Option {
+	return func(rp *ReadPref) error {
+		rp.tagSets = tagSets
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go
new file mode 100644
index 0000000..0d624ff
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go
@@ -0,0 +1,99 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+var (
+	errInvalidReadPreference = errors.New("can not specify tags or max staleness on primary")
+)
+
+var primary = ReadPref{mode: PrimaryMode}
+
+// Primary constructs a read preference with a PrimaryMode.
+func Primary() *ReadPref {
+	return &primary
+}
+
+// PrimaryPreferred constructs a read preference with a PrimaryPreferredMode.
+func PrimaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(PrimaryPreferredMode, opts...)
+	return rp
+}
+
+// SecondaryPreferred constructs a read preference with a SecondaryPreferredMode.
+func SecondaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryPreferredMode, opts...)
+	return rp
+}
+
+// Secondary constructs a read preference with a SecondaryMode.
+func Secondary(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryMode, opts...)
+	return rp
+}
+
+// Nearest constructs a read preference with a NearestMode.
+func Nearest(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(NearestMode, opts...)
+	return rp
+}
+
+// New creates a new ReadPref.
+func New(mode Mode, opts ...Option) (*ReadPref, error) {
+	rp := &ReadPref{
+		mode: mode,
+	}
+
+	if mode == PrimaryMode && len(opts) != 0 {
+		return nil, errInvalidReadPreference
+	}
+
+	for _, opt := range opts {
+		err := opt(rp)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rp, nil
+}
+
+// ReadPref determines which servers are considered suitable for read operations.
+type ReadPref struct {
+	maxStaleness    time.Duration
+	maxStalenessSet bool
+	mode            Mode
+	tagSets         []tag.Set
+}
+
+// MaxStaleness is the maximum amount of time to allow
+// a server to be considered eligible for selection. The
+// second return value indicates if this value has been set.
+func (r *ReadPref) MaxStaleness() (time.Duration, bool) {
+	return r.maxStaleness, r.maxStalenessSet
+}
+
+// Mode indicates the mode of the read preference.
+func (r *ReadPref) Mode() Mode {
+	return r.mode
+}
+
+// TagSets are multiple tag sets indicating
+// which servers should be considered.
+func (r *ReadPref) TagSets() []tag.Set {
+	return r.tagSets
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go
new file mode 100644
index 0000000..b4bcd02
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go
@@ -0,0 +1,139 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// BulkWriteResult holds the result of a bulk write operation.
+type BulkWriteResult struct {
+	InsertedCount int64
+	MatchedCount  int64
+	ModifiedCount int64
+	DeletedCount  int64
+	UpsertedCount int64
+	UpsertedIDs   map[int64]interface{}
+}
+
+// InsertOneResult is a result of an InsertOne operation.
+//
+// InsertedID will be a Go type that corresponds to a BSON type.
+type InsertOneResult struct {
+	// The identifier that was inserted.
+	InsertedID interface{}
+}
+
+// InsertManyResult is a result of an InsertMany operation.
+type InsertManyResult struct {
+	// Maps the indexes of inserted documents to their _id fields.
+	InsertedIDs []interface{}
+}
+
+// DeleteResult is a result of an DeleteOne operation.
+type DeleteResult struct {
+	// The number of documents that were deleted.
+	DeletedCount int64 `bson:"n"`
+}
+
+// ListDatabasesResult is a result of a ListDatabases operation. Each specification
+// is a description of the datbases on the server.
+type ListDatabasesResult struct {
+	Databases []DatabaseSpecification
+	TotalSize int64
+}
+
+func (ldr ListDatabasesResult) fromResult(res result.ListDatabases) ListDatabasesResult {
+	ldr.Databases = make([]DatabaseSpecification, 0, len(res.Databases))
+	for _, spec := range res.Databases {
+		ldr.Databases = append(
+			ldr.Databases,
+			DatabaseSpecification{Name: spec.Name, SizeOnDisk: spec.SizeOnDisk, Empty: spec.Empty},
+		)
+	}
+	ldr.TotalSize = res.TotalSize
+	return ldr
+}
+
+// DatabaseSpecification is the information for a single database returned
+// from a ListDatabases operation.
+type DatabaseSpecification struct {
+	Name       string
+	SizeOnDisk int64
+	Empty      bool
+}
+
+// UpdateResult is a result of an update operation.
+//
+// UpsertedID will be a Go type that corresponds to a BSON type.
+type UpdateResult struct {
+	// The number of documents that matched the filter.
+	MatchedCount int64
+	// The number of documents that were modified.
+	ModifiedCount int64
+	// The number of documents that were upserted.
+	UpsertedCount int64
+	// The identifier of the inserted document if an upsert took place.
+	UpsertedID interface{}
+}
+
+// UnmarshalBSON implements the bson.Unmarshaler interface.
+func (result *UpdateResult) UnmarshalBSON(b []byte) error {
+	elems, err := bson.Raw(b).Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "n":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.MatchedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.MatchedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for n, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "nModified":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.ModifiedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.ModifiedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for nModified, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "upserted":
+			switch elem.Value().Type {
+			case bson.TypeArray:
+				e, err := elem.Value().Array().IndexErr(0)
+				if err != nil {
+					break
+				}
+				if e.Value().Type != bson.TypeEmbeddedDocument {
+					break
+				}
+				var d struct {
+					ID interface{} `bson:"_id"`
+				}
+				err = bson.Unmarshal(e.Value().Document(), &d)
+				if err != nil {
+					return err
+				}
+				result.UpsertedID = d.ID
+			default:
+				return fmt.Errorf("Received invalid type for upserted, should be Array, received %s", elem.Value().Type)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go
new file mode 100644
index 0000000..381714d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go
@@ -0,0 +1,181 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrWrongClient is returned when a user attempts to pass in a session created by a different client than
+// the method call is using.
+var ErrWrongClient = errors.New("session was not created by this client")
+
+// SessionContext is a hybrid interface. It combines a context.Context with
+// a mongo.Session. This type can be used as a regular context.Context or
+// Session type. It is not goroutine safe and should not be used in multiple goroutines concurrently.
+type SessionContext interface {
+	context.Context
+	Session
+}
+
+type sessionContext struct {
+	context.Context
+	Session
+}
+
+type sessionKey struct {
+}
+
+// Session is the interface that represents a sequential set of operations executed.
+// Instances of this interface can be used to use transactions against the server
+// and to enable causally consistent behavior for applications.
+type Session interface {
+	EndSession(context.Context)
+	StartTransaction(...*options.TransactionOptions) error
+	AbortTransaction(context.Context) error
+	CommitTransaction(context.Context) error
+	ClusterTime() bson.Raw
+	AdvanceClusterTime(bson.Raw) error
+	OperationTime() *primitive.Timestamp
+	AdvanceOperationTime(*primitive.Timestamp) error
+	session()
+}
+
+// sessionImpl represents a set of sequential operations executed by an application that are related in some way.
+type sessionImpl struct {
+	*session.Client
+	topo                *topology.Topology
+	didCommitAfterStart bool // true if commit was called after start with no other operations
+}
+
+// EndSession ends the session.
+func (s *sessionImpl) EndSession(ctx context.Context) {
+	if s.TransactionInProgress() {
+		// ignore all errors aborting during an end session
+		_ = s.AbortTransaction(ctx)
+	}
+	s.Client.EndSession()
+}
+
+// StartTransaction starts a transaction for this session.
+func (s *sessionImpl) StartTransaction(opts ...*options.TransactionOptions) error {
+	err := s.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	s.didCommitAfterStart = false
+
+	topts := options.MergeTransactionOptions(opts...)
+	coreOpts := &session.TransactionOptions{
+		ReadConcern:    topts.ReadConcern,
+		ReadPreference: topts.ReadPreference,
+		WriteConcern:   topts.WriteConcern,
+	}
+
+	return s.Client.StartTransaction(coreOpts)
+}
+
+// AbortTransaction aborts the session's transaction, returning any errors and error codes
+func (s *sessionImpl) AbortTransaction(ctx context.Context) error {
+	err := s.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+
+	cmd := command.AbortTransaction{
+		Session: s.Client,
+	}
+
+	s.Aborting = true
+	_, err = driver.AbortTransaction(ctx, cmd, s.topo, description.WriteSelector())
+
+	_ = s.Client.AbortTransaction()
+	return err
+}
+
+// CommitTransaction commits the sesson's transaction.
+func (s *sessionImpl) CommitTransaction(ctx context.Context) error {
+	err := s.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+
+	// Do not run the commit command if the transaction is in started state
+	if s.TransactionStarting() || s.didCommitAfterStart {
+		s.didCommitAfterStart = true
+		return s.Client.CommitTransaction()
+	}
+
+	if s.Client.TransactionCommitted() {
+		s.RetryingCommit = true
+	}
+
+	cmd := command.CommitTransaction{
+		Session: s.Client,
+	}
+
+	// Hack to ensure that session stays in committed state
+	if s.TransactionCommitted() {
+		s.Committing = true
+		defer func() {
+			s.Committing = false
+		}()
+	}
+	_, err = driver.CommitTransaction(ctx, cmd, s.topo, description.WriteSelector())
+	if err == nil {
+		return s.Client.CommitTransaction()
+	}
+	return err
+}
+
+func (s *sessionImpl) ClusterTime() bson.Raw {
+	return s.Client.ClusterTime
+}
+
+func (s *sessionImpl) AdvanceClusterTime(d bson.Raw) error {
+	return s.Client.AdvanceClusterTime(d)
+}
+
+func (s *sessionImpl) OperationTime() *primitive.Timestamp {
+	return s.Client.OperationTime
+}
+
+func (s *sessionImpl) AdvanceOperationTime(ts *primitive.Timestamp) error {
+	return s.Client.AdvanceOperationTime(ts)
+}
+
+func (*sessionImpl) session() {
+}
+
+// sessionFromContext checks for a sessionImpl in the argued context and returns the session if it
+// exists
+func sessionFromContext(ctx context.Context) *session.Client {
+	s := ctx.Value(sessionKey{})
+	if ses, ok := s.(*sessionImpl); ses != nil && ok {
+		return ses.Client
+	}
+
+	return nil
+}
+
+func contextWithSession(ctx context.Context, sess Session) SessionContext {
+	return &sessionContext{
+		Context: context.WithValue(ctx, sessionKey{}, sess),
+		Session: sess,
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go
new file mode 100644
index 0000000..9a929db
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go
@@ -0,0 +1,93 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+)
+
+// ErrNoDocuments is returned by Decode when an operation that returns a
+// SingleResult doesn't return any documents.
+var ErrNoDocuments = errors.New("mongo: no documents in result")
+
+// SingleResult represents a single document returned from an operation. If
+// the operation returned an error, the Err method of SingleResult will
+// return that error.
+type SingleResult struct {
+	err error
+	cur *Cursor
+	rdr bson.Raw
+	reg *bsoncodec.Registry
+}
+
+// Decode will attempt to decode the first document into v. If there was an
+// error from the operation that created this SingleResult then the error
+// will be returned. If there were no returned documents, ErrNoDocuments is
+// returned.
+func (sr *SingleResult) Decode(v interface{}) error {
+	if sr.err != nil {
+		return sr.err
+	}
+	if sr.reg == nil {
+		return bson.ErrNilRegistry
+	}
+	switch {
+	case sr.rdr != nil:
+		if v == nil {
+			return nil
+		}
+		return bson.UnmarshalWithRegistry(sr.reg, sr.rdr, v)
+	case sr.cur != nil:
+		defer sr.cur.Close(context.TODO())
+		if !sr.cur.Next(context.TODO()) {
+			if err := sr.cur.Err(); err != nil {
+				return err
+			}
+			return ErrNoDocuments
+		}
+		if v == nil {
+			return nil
+		}
+		return sr.cur.Decode(v)
+	}
+
+	return ErrNoDocuments
+}
+
+// DecodeBytes will return a copy of the document as a bson.Raw. If there was an
+// error from the operation that created this SingleResult then the error
+// will be returned. If there were no returned documents, ErrNoDocuments is
+// returned.
+func (sr *SingleResult) DecodeBytes() (bson.Raw, error) {
+	switch {
+	case sr.err != nil:
+		return nil, sr.err
+	case sr.rdr != nil:
+		return sr.rdr, nil
+	case sr.cur != nil:
+		defer sr.cur.Close(context.TODO())
+		if !sr.cur.Next(context.TODO()) {
+			if err := sr.cur.Err(); err != nil {
+				return nil, err
+			}
+			return nil, ErrNoDocuments
+		}
+		return sr.cur.Current, nil
+	}
+
+	return nil, ErrNoDocuments
+}
+
+// Err will return the error from the operation that created this SingleResult.
+// If there was no error, nil is returned.
+func (sr *SingleResult) Err() error {
+	return sr.err
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go
new file mode 100644
index 0000000..270fa24
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go
@@ -0,0 +1,7 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go
new file mode 100644
index 0000000..234ba19
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go
@@ -0,0 +1,186 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package writeconcern
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrInconsistent indicates that an inconsistent write concern was specified.
+var ErrInconsistent = errors.New("a write concern cannot have both w=0 and j=true")
+
+// ErrEmptyWriteConcern indicates that a write concern has no fields set.
+var ErrEmptyWriteConcern = errors.New("a write concern must have at least one field set")
+
+// ErrNegativeW indicates that a negative integer `w` field was specified.
+var ErrNegativeW = errors.New("write concern `w` field cannot be a negative number")
+
+// ErrNegativeWTimeout indicates that a negative WTimeout was specified.
+var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative")
+
+// WriteConcern describes the level of acknowledgement requested from MongoDB for write operations
+// to a standalone mongod or to replica sets or to sharded clusters.
+type WriteConcern struct {
+	w        interface{}
+	j        bool
+	wTimeout time.Duration
+}
+
+// Option is an option to provide when creating a ReadConcern.
+type Option func(concern *WriteConcern)
+
+// New constructs a new WriteConcern.
+func New(options ...Option) *WriteConcern {
+	concern := &WriteConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// W requests acknowledgement that write operations propagate to the specified number of mongod
+// instances.
+func W(w int) Option {
+	return func(concern *WriteConcern) {
+		concern.w = w
+	}
+}
+
+// WMajority requests acknowledgement that write operations propagate to the majority of mongod
+// instances.
+func WMajority() Option {
+	return func(concern *WriteConcern) {
+		concern.w = "majority"
+	}
+}
+
+// WTagSet requests acknowledgement that write operations propagate to the specified mongod
+// instance.
+func WTagSet(tag string) Option {
+	return func(concern *WriteConcern) {
+		concern.w = tag
+	}
+}
+
+// J requests acknowledgement from MongoDB that write operations are written to
+// the journal.
+func J(j bool) Option {
+	return func(concern *WriteConcern) {
+		concern.j = j
+	}
+}
+
+// WTimeout specifies specifies a time limit for the write concern.
+func WTimeout(d time.Duration) Option {
+	return func(concern *WriteConcern) {
+		concern.wTimeout = d
+	}
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if !wc.IsValid() {
+		return bsontype.Type(0), nil, ErrInconsistent
+	}
+
+	var elems []byte
+
+	if wc.w != nil {
+		switch t := wc.w.(type) {
+		case int:
+			if t < 0 {
+				return bsontype.Type(0), nil, ErrNegativeW
+			}
+
+			elems = bsoncore.AppendInt32Element(elems, "w", int32(t))
+		case string:
+			elems = bsoncore.AppendStringElement(elems, "w", string(t))
+		}
+	}
+
+	if wc.j {
+		elems = bsoncore.AppendBooleanElement(elems, "j", wc.j)
+	}
+
+	if wc.wTimeout < 0 {
+		return bsontype.Type(0), nil, ErrNegativeWTimeout
+	}
+
+	if wc.wTimeout != 0 {
+		elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.wTimeout/time.Millisecond))
+	}
+
+	if len(elems) == 0 {
+		return bsontype.Type(0), nil, ErrEmptyWriteConcern
+	}
+	return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
+
+// AcknowledgedValue returns true if a BSON RawValue for a write concern represents an acknowledged write concern.
+// The element's value must be a document representing a write concern.
+func AcknowledgedValue(rawv bson.RawValue) bool {
+	doc, ok := bsoncore.Value{Type: rawv.Type, Data: rawv.Value}.DocumentOK()
+	if !ok {
+		return false
+	}
+
+	val, err := doc.LookupErr("w")
+	if err != nil {
+		// key w not found --> acknowledged
+		return true
+	}
+
+	i32, ok := val.Int32OK()
+	if !ok {
+		return false
+	}
+	return i32 != 0
+}
+
+// Acknowledged indicates whether or not a write with the given write concern will be acknowledged.
+func (wc *WriteConcern) Acknowledged() bool {
+	if wc == nil || wc.j {
+		return true
+	}
+
+	switch v := wc.w.(type) {
+	case int:
+		if v == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+// IsValid checks whether the write concern is invalid.
+func (wc *WriteConcern) IsValid() bool {
+	if !wc.j {
+		return true
+	}
+
+	switch v := wc.w.(type) {
+	case int:
+		if v == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+// AckWrite returns true if a write concern represents an acknowledged write
+func AckWrite(wc *WriteConcern) bool {
+	return wc == nil || wc.Acknowledged()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go b/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go
new file mode 100644
index 0000000..c10bd3e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package tag
+
+// Tag is a name/vlaue pair.
+type Tag struct {
+	Name  string
+	Value string
+}
+
+// NewTagSetFromMap creates a new tag set from a map.
+func NewTagSetFromMap(m map[string]string) Set {
+	var set Set
+	for k, v := range m {
+		set = append(set, Tag{Name: k, Value: v})
+	}
+
+	return set
+}
+
+// NewTagSetsFromMaps creates new tag sets from maps.
+func NewTagSetsFromMaps(maps []map[string]string) []Set {
+	sets := make([]Set, 0, len(maps))
+	for _, m := range maps {
+		sets = append(sets, NewTagSetFromMap(m))
+	}
+	return sets
+}
+
+// Set is an ordered list of Tags.
+type Set []Tag
+
+// Contains indicates whether the name/value pair exists in the tagset.
+func (ts Set) Contains(name, value string) bool {
+	for _, t := range ts {
+		if t.Name == name && t.Value == value {
+			return true
+		}
+	}
+
+	return false
+}
+
+// ContainsAll indicates whether all the name/value pairs exist in the tagset.
+func (ts Set) ContainsAll(other []Tag) bool {
+	for _, ot := range other {
+		if !ts.Contains(ot.Name, ot.Value) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/version/version.go b/vendor/github.com/mongodb/mongo-go-driver/version/version.go
new file mode 100644
index 0000000..1aee619
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/version/version.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package version
+
+// Driver is the current version of the driver.
+var Driver = "v0.3.0"
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go
new file mode 100644
index 0000000..8001d70
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go
@@ -0,0 +1,97 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strconv"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilArray indicates that an operation was attempted on a nil *Array.
+var ErrNilArray = errors.New("array is nil")
+
+// Arr represents an array in BSON.
+type Arr []Val
+
+// String implements the fmt.Stringer interface.
+func (a Arr) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Array["))
+	for idx, val := range a {
+		if idx > 0 {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%s", val)
+	}
+	buf.WriteByte(']')
+
+	return buf.String()
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+func (a Arr) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if a == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+
+	idx, dst := bsoncore.ReserveLength(nil)
+	for idx, value := range a {
+		t, data, _ := value.MarshalBSONValue() // marshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, strconv.Itoa(idx)...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return bsontype.Array, dst, nil
+}
+
+// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface.
+func (a *Arr) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if a == nil {
+		return ErrNilArray
+	}
+	*a = (*a)[:0]
+
+	elements, err := bsoncore.Document(data).Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elements {
+		var val Val
+		rawval := elem.Value()
+		err = val.UnmarshalBSONValue(rawval.Type, rawval.Data)
+		if err != nil {
+			return err
+		}
+		*a = append(*a, val)
+	}
+	return nil
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (a Arr) Equal(a2 Arr) bool {
+	if len(a) != len(a2) {
+		return false
+	}
+	for idx := range a {
+		if !a[idx].Equal(a2[idx]) {
+			return false
+		}
+	}
+	return true
+}
+
+func (Arr) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go
new file mode 100644
index 0000000..8d2946a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go
@@ -0,0 +1,791 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsoncore contains functions that can be used to encode and decode BSON
+// elements and values to or from a slice of bytes. These functions are aimed at
+// allowing low level manipulation of BSON and can be used to build a higher
+// level BSON library.
+//
+// The Read* functions within this package return the values of the element and
+// a boolean indicating if the values are valid. A boolean was used instead of
+// an error because any error that would be returned would be the same: not
+// enough bytes. This library attempts to do no validation, it will only return
+// false if there are not enough bytes for an item to be read. For example, the
+// ReadDocument function checks the length, if that length is larger than the
+// number of bytes availble, it will return false, if there are enough bytes, it
+// will return those bytes and true. It is the consumers responsibility to
+// validate those bytes.
+//
+// The Append* functions within this package will append the type value to the
+// given dst slice. If the slice has enough capacity, it will not grow the
+// slice. The Append*Element functions within this package operate in the same
+// way, but additionally append the BSON type and the key before the value.
+package bsoncore
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// AppendType will append t to dst and return the extended buffer.
+func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) }
+
+// AppendKey will append key to dst and return the extended buffer.
+func AppendKey(dst []byte, key string) []byte { return append(dst, key+string(0x00)...) }
+
+// AppendHeader will append Type t and key to dst and return the extended
+// buffer.
+func AppendHeader(dst []byte, t bsontype.Type, key string) []byte {
+	dst = AppendType(dst, t)
+	dst = append(dst, key...)
+	return append(dst, 0x00)
+	// return append(AppendType(dst, t), key+string(0x00)...)
+}
+
+// TODO(skriptble): All of the Read* functions should return src resliced to start just after what
+// was read.
+
+// ReadType will return the first byte of the provided []byte as a type. If
+// there is no availble byte, false is returned.
+func ReadType(src []byte) (bsontype.Type, []byte, bool) {
+	if len(src) < 1 {
+		return 0, src, false
+	}
+	return bsontype.Type(src[0]), src[1:], true
+}
+
+// ReadKey will read a key from src. The 0x00 byte will not be present
+// in the returned string. If there are not enough bytes available, false is
+// returned.
+func ReadKey(src []byte) (string, []byte, bool) { return readcstring(src) }
+
+// ReadKeyBytes will read a key from src as bytes. The 0x00 byte will
+// not be present in the returned string. If there are not enough bytes
+// available, false is returned.
+func ReadKeyBytes(src []byte) ([]byte, []byte, bool) { return readcstringbytes(src) }
+
+// ReadHeader will read a type byte and a key from src. If both of these
+// values cannot be read, false is returned.
+func ReadHeader(src []byte) (t bsontype.Type, key string, rem []byte, ok bool) {
+	t, rem, ok = ReadType(src)
+	if !ok {
+		return 0, "", src, false
+	}
+	key, rem, ok = ReadKey(rem)
+	if !ok {
+		return 0, "", src, false
+	}
+
+	return t, key, rem, true
+}
+
+// ReadHeaderBytes will read a type and a key from src and the remainder of the bytes
+// are returned as rem. If either the type or key cannot be red, ok will be false.
+func ReadHeaderBytes(src []byte) (header []byte, rem []byte, ok bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+// ReadElement reads the next full element from src. It returns the element, the remaining bytes in
+// the slice, and a boolean indicating if the read was successful.
+func ReadElement(src []byte) (Element, []byte, bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	t := bsontype.Type(src[0])
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	length, ok := valueLength(src[idx+2:], t) // We add 2 here because we called IndexByte with src[1:]
+	if !ok {
+		return nil, src, false
+	}
+	elemLength := 1 + idx + 1 + int(length)
+	if elemLength > len(src) {
+		return nil, src, false
+	}
+	return src[:elemLength], src[elemLength:], true
+}
+
+// ReadValue reads the next value as the provided types and returns a Value, the remaining bytes,
+// and a boolean indicating if the read was successful.
+func ReadValue(src []byte, t bsontype.Type) (Value, []byte, bool) {
+	data, rem, ok := readValue(src, t)
+	if !ok {
+		return Value{}, src, false
+	}
+	return Value{Type: t, Data: data}, rem, true
+}
+
+// AppendDouble will append f to dst and return the extended buffer.
+func AppendDouble(dst []byte, f float64) []byte {
+	return appendu64(dst, math.Float64bits(f))
+}
+
+// AppendDoubleElement will append a BSON double element using key and f to dst
+// and return the extended buffer.
+func AppendDoubleElement(dst []byte, key string, f float64) []byte {
+	return AppendDouble(AppendHeader(dst, bsontype.Double, key), f)
+}
+
+// ReadDouble will read a float64 from src. If there are not enough bytes it
+// will return false.
+func ReadDouble(src []byte) (float64, []byte, bool) {
+	bits, src, ok := readu64(src)
+	if !ok {
+		return 0, src, false
+	}
+	return math.Float64frombits(bits), src, true
+}
+
+// AppendString will append s to dst and return the extended buffer.
+func AppendString(dst []byte, s string) []byte {
+	return appendstring(dst, s)
+}
+
+// AppendStringElement will append a BSON string element using key and val to dst
+// and return the extended buffer.
+func AppendStringElement(dst []byte, key, val string) []byte {
+	return AppendString(AppendHeader(dst, bsontype.String, key), val)
+}
+
+// ReadString will read a string from src. If there are not enough bytes it
+// will return false.
+func ReadString(src []byte) (string, []byte, bool) {
+	return readstring(src)
+}
+
+// AppendDocumentStart reserves a document's length and returns the index where the length begins.
+// This index can later be used to write the length of the document.
+//
+// TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd.
+// AppendDocumentStart would handle calling ReserveLength and providing the index of the start of
+// the document. AppendDocumentEnd would handle taking that start index, adding the null byte,
+// calculating the length, and filling in the length at the start of the document.
+func AppendDocumentStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
+
+// AppendDocumentStartInline functions the same as AppendDocumentStart but takes a pointer to the
+// index int32 which allows this function to be used inline.
+func AppendDocumentStartInline(dst []byte, index *int32) []byte {
+	idx, doc := AppendDocumentStart(dst)
+	*index = idx
+	return doc
+}
+
+// AppendDocumentElementStart writes a document element header and then reserves the length bytes.
+func AppendDocumentElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendDocumentStart(AppendHeader(dst, bsontype.EmbeddedDocument, key))
+}
+
+// AppendDocumentEnd writes the null byte for a document and updates the length of the document.
+// The index should be the beginning of the document's length bytes.
+func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) {
+	if int(index) > len(dst)-4 {
+		return dst, fmt.Errorf("not enough bytes available after index to write length")
+	}
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, index, int32(len(dst[index:])))
+	return dst, nil
+}
+
+// AppendDocument will append doc to dst and return the extended buffer.
+func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) }
+
+// AppendDocumentElement will append a BSON embeded document element using key
+// and doc to dst and return the extended buffer.
+func AppendDocumentElement(dst []byte, key string, doc []byte) []byte {
+	return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc)
+}
+
+// BuildDocument will create a document with the given elements and will append it to dst.
+func BuildDocument(dst []byte, elems []byte) []byte {
+	idx, dst := ReserveLength(dst)
+	dst = append(dst, elems...)
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst
+}
+
+// ReadDocument will read a document from src. If there are not enough bytes it
+// will return false.
+func ReadDocument(src []byte) (doc Document, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendArrayStart appends the length bytes to an array and then returns the index of the start
+// of those length bytes.
+func AppendArrayStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
+
+// AppendArrayElementStart appends an array element header and then the length bytes for an array,
+// returning the index where the length starts.
+func AppendArrayElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendArrayStart(AppendHeader(dst, bsontype.Array, key))
+}
+
+// AppendArrayEnd appends the null byte to an array and calculates the length, inserting that
+// calculated length starting at index.
+func AppendArrayEnd(dst []byte, index int32) ([]byte, error) { return AppendDocumentEnd(dst, index) }
+
+// AppendArray will append arr to dst and return the extended buffer.
+func AppendArray(dst []byte, arr []byte) []byte { return append(dst, arr...) }
+
+// AppendArrayElement will append a BSON array element using key and arr to dst
+// and return the extended buffer.
+func AppendArrayElement(dst []byte, key string, arr []byte) []byte {
+	return AppendArray(AppendHeader(dst, bsontype.Array, key), arr)
+}
+
+// ReadArray will read an array from src. If there are not enough bytes it
+// will return false.
+func ReadArray(src []byte) (arr Document, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendBinary will append subtype and b to dst and return the extended buffer.
+func AppendBinary(dst []byte, subtype byte, b []byte) []byte {
+	if subtype == 0x02 {
+		return appendBinarySubtype2(dst, subtype, b)
+	}
+	dst = append(appendLength(dst, int32(len(b))), subtype)
+	return append(dst, b...)
+}
+
+// AppendBinaryElement will append a BSON binary element using key, subtype, and
+// b to dst and return the extended buffer.
+func AppendBinaryElement(dst []byte, key string, subtype byte, b []byte) []byte {
+	return AppendBinary(AppendHeader(dst, bsontype.Binary, key), subtype, b)
+}
+
+// ReadBinary will read a subtype and bin from src. If there are not enough bytes it
+// will return false.
+func ReadBinary(src []byte) (subtype byte, bin []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok {
+		return 0x00, nil, src, false
+	}
+	if len(rem) < 1 { // subtype
+		return 0x00, nil, src, false
+	}
+	subtype, rem = rem[0], rem[1:]
+
+	if len(rem) < int(length) {
+		return 0x00, nil, src, false
+	}
+
+	if subtype == 0x02 {
+		length, rem, ok = ReadLength(rem)
+		if !ok || len(rem) < int(length) {
+			return 0x00, nil, src, false
+		}
+	}
+
+	return subtype, rem[:length], rem[length:], true
+}
+
+// AppendUndefinedElement will append a BSON undefined element using key to dst
+// and return the extended buffer.
+func AppendUndefinedElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.Undefined, key)
+}
+
+// AppendObjectID will append oid to dst and return the extended buffer.
+func AppendObjectID(dst []byte, oid primitive.ObjectID) []byte { return append(dst, oid[:]...) }
+
+// AppendObjectIDElement will append a BSON ObjectID element using key and oid to dst
+// and return the extended buffer.
+func AppendObjectIDElement(dst []byte, key string, oid primitive.ObjectID) []byte {
+	return AppendObjectID(AppendHeader(dst, bsontype.ObjectID, key), oid)
+}
+
+// ReadObjectID will read an ObjectID from src. If there are not enough bytes it
+// will return false.
+func ReadObjectID(src []byte) (primitive.ObjectID, []byte, bool) {
+	if len(src) < 12 {
+		return primitive.ObjectID{}, src, false
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], src[0:12])
+	return oid, src[12:], true
+}
+
+// AppendBoolean will append b to dst and return the extended buffer.
+func AppendBoolean(dst []byte, b bool) []byte {
+	if b {
+		return append(dst, 0x01)
+	}
+	return append(dst, 0x00)
+}
+
+// AppendBooleanElement will append a BSON boolean element using key and b to dst
+// and return the extended buffer.
+func AppendBooleanElement(dst []byte, key string, b bool) []byte {
+	return AppendBoolean(AppendHeader(dst, bsontype.Boolean, key), b)
+}
+
+// ReadBoolean will read a bool from src. If there are not enough bytes it
+// will return false.
+func ReadBoolean(src []byte) (bool, []byte, bool) {
+	if len(src) < 1 {
+		return false, src, false
+	}
+
+	return src[0] == 0x01, src[1:], true
+}
+
+// AppendDateTime will append dt to dst and return the extended buffer.
+func AppendDateTime(dst []byte, dt int64) []byte { return appendi64(dst, dt) }
+
+// AppendDateTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendDateTimeElement(dst []byte, key string, dt int64) []byte {
+	return AppendDateTime(AppendHeader(dst, bsontype.DateTime, key), dt)
+}
+
+// ReadDateTime will read an int64 datetime from src. If there are not enough bytes it
+// will return false.
+func ReadDateTime(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendTime will append time as a BSON DateTime to dst and return the extended buffer.
+func AppendTime(dst []byte, t time.Time) []byte {
+	return AppendDateTime(dst, t.Unix()*1000+int64(t.Nanosecond()/1e6))
+}
+
+// AppendTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendTimeElement(dst []byte, key string, t time.Time) []byte {
+	return AppendTime(AppendHeader(dst, bsontype.DateTime, key), t)
+}
+
+// ReadTime will read an time.Time datetime from src. If there are not enough bytes it
+// will return false.
+func ReadTime(src []byte) (time.Time, []byte, bool) {
+	dt, rem, ok := readi64(src)
+	return time.Unix(dt/1e3, dt%1e3*1e6), rem, ok
+}
+
+// AppendNullElement will append a BSON null element using key to dst
+// and return the extended buffer.
+func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst, bsontype.Null, key) }
+
+// AppendRegex will append pattern and options to dst and return the extended buffer.
+func AppendRegex(dst []byte, pattern, options string) []byte {
+	return append(dst, pattern+string(0x00)+options+string(0x00)...)
+}
+
+// AppendRegexElement will append a BSON regex element using key, pattern, and
+// options to dst and return the extended buffer.
+func AppendRegexElement(dst []byte, key, pattern, options string) []byte {
+	return AppendRegex(AppendHeader(dst, bsontype.Regex, key), pattern, options)
+}
+
+// ReadRegex will read a pattern and options from src. If there are not enough bytes it
+// will return false.
+func ReadRegex(src []byte) (pattern, options string, rem []byte, ok bool) {
+	pattern, rem, ok = readcstring(src)
+	if !ok {
+		return "", "", src, false
+	}
+	options, rem, ok = readcstring(rem)
+	if !ok {
+		return "", "", src, false
+	}
+	return pattern, options, rem, true
+}
+
+// AppendDBPointer will append ns and oid to dst and return the extended buffer.
+func AppendDBPointer(dst []byte, ns string, oid primitive.ObjectID) []byte {
+	return append(appendstring(dst, ns), oid[:]...)
+}
+
+// AppendDBPointerElement will append a BSON DBPointer element using key, ns,
+// and oid to dst and return the extended buffer.
+func AppendDBPointerElement(dst []byte, key, ns string, oid primitive.ObjectID) []byte {
+	return AppendDBPointer(AppendHeader(dst, bsontype.DBPointer, key), ns, oid)
+}
+
+// ReadDBPointer will read a ns and oid from src. If there are not enough bytes it
+// will return false.
+func ReadDBPointer(src []byte) (ns string, oid primitive.ObjectID, rem []byte, ok bool) {
+	ns, rem, ok = readstring(src)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	oid, rem, ok = ReadObjectID(rem)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	return ns, oid, rem, true
+}
+
+// AppendJavaScript will append js to dst and return the extended buffer.
+func AppendJavaScript(dst []byte, js string) []byte { return appendstring(dst, js) }
+
+// AppendJavaScriptElement will append a BSON JavaScript element using key and
+// js to dst and return the extended buffer.
+func AppendJavaScriptElement(dst []byte, key, js string) []byte {
+	return AppendJavaScript(AppendHeader(dst, bsontype.JavaScript, key), js)
+}
+
+// ReadJavaScript will read a js string from src. If there are not enough bytes it
+// will return false.
+func ReadJavaScript(src []byte) (js string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendSymbol will append symbol to dst and return the extended buffer.
+func AppendSymbol(dst []byte, symbol string) []byte { return appendstring(dst, symbol) }
+
+// AppendSymbolElement will append a BSON symbol element using key and symbol to dst
+// and return the extended buffer.
+func AppendSymbolElement(dst []byte, key, symbol string) []byte {
+	return AppendSymbol(AppendHeader(dst, bsontype.Symbol, key), symbol)
+}
+
+// ReadSymbol will read a symbol string from src. If there are not enough bytes it
+// will return false.
+func ReadSymbol(src []byte) (symbol string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendCodeWithScope will append code and scope to dst and return the extended buffer.
+func AppendCodeWithScope(dst []byte, code string, scope []byte) []byte {
+	length := int32(4 + 4 + len(code) + 1 + len(scope)) // length of cws, length of code, code, 0x00, scope
+	dst = appendLength(dst, length)
+
+	return append(appendstring(dst, code), scope...)
+}
+
+// AppendCodeWithScopeElement will append a BSON code with scope element using
+// key, code, and scope to dst
+// and return the extended buffer.
+func AppendCodeWithScopeElement(dst []byte, key, code string, scope []byte) []byte {
+	return AppendCodeWithScope(AppendHeader(dst, bsontype.CodeWithScope, key), code, scope)
+}
+
+// ReadCodeWithScope will read code and scope from src. If there are not enough bytes it
+// will return false.
+func ReadCodeWithScope(src []byte) (code string, scope []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok || len(src) < int(length) {
+		return "", nil, src, false
+	}
+
+	code, rem, ok = readstring(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+
+	scope, rem, ok = ReadDocument(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+	return code, scope, rem, true
+}
+
+// AppendInt32 will append i32 to dst and return the extended buffer.
+func AppendInt32(dst []byte, i32 int32) []byte { return appendi32(dst, i32) }
+
+// AppendInt32Element will append a BSON int32 element using key and i32 to dst
+// and return the extended buffer.
+func AppendInt32Element(dst []byte, key string, i32 int32) []byte {
+	return AppendInt32(AppendHeader(dst, bsontype.Int32, key), i32)
+}
+
+// ReadInt32 will read an int32 from src. If there are not enough bytes it
+// will return false.
+func ReadInt32(src []byte) (int32, []byte, bool) { return readi32(src) }
+
+// AppendTimestamp will append t and i to dst and return the extended buffer.
+func AppendTimestamp(dst []byte, t, i uint32) []byte {
+	return appendu32(appendu32(dst, i), t) // i is the lower 4 bytes, t is the higher 4 bytes
+}
+
+// AppendTimestampElement will append a BSON timestamp element using key, t, and
+// i to dst and return the extended buffer.
+func AppendTimestampElement(dst []byte, key string, t, i uint32) []byte {
+	return AppendTimestamp(AppendHeader(dst, bsontype.Timestamp, key), t, i)
+}
+
+// ReadTimestamp will read t and i from src. If there are not enough bytes it
+// will return false.
+func ReadTimestamp(src []byte) (t, i uint32, rem []byte, ok bool) {
+	i, rem, ok = readu32(src)
+	if !ok {
+		return 0, 0, src, false
+	}
+	t, rem, ok = readu32(rem)
+	if !ok {
+		return 0, 0, src, false
+	}
+	return t, i, rem, true
+}
+
+// AppendInt64 will append i64 to dst and return the extended buffer.
+func AppendInt64(dst []byte, i64 int64) []byte { return appendi64(dst, i64) }
+
+// AppendInt64Element will append a BSON int64 element using key and i64 to dst
+// and return the extended buffer.
+func AppendInt64Element(dst []byte, key string, i64 int64) []byte {
+	return AppendInt64(AppendHeader(dst, bsontype.Int64, key), i64)
+}
+
+// ReadInt64 will read an int64 from src. If there are not enough bytes it
+// will return false.
+func ReadInt64(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendDecimal128 will append d128 to dst and return the extended buffer.
+func AppendDecimal128(dst []byte, d128 primitive.Decimal128) []byte {
+	high, low := d128.GetBytes()
+	return appendu64(appendu64(dst, low), high)
+}
+
+// AppendDecimal128Element will append a BSON primitive.28 element using key and
+// d128 to dst and return the extended buffer.
+func AppendDecimal128Element(dst []byte, key string, d128 primitive.Decimal128) []byte {
+	return AppendDecimal128(AppendHeader(dst, bsontype.Decimal128, key), d128)
+}
+
+// ReadDecimal128 will read a primitive.Decimal128 from src. If there are not enough bytes it
+// will return false.
+func ReadDecimal128(src []byte) (primitive.Decimal128, []byte, bool) {
+	l, rem, ok := readu64(src)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	h, rem, ok := readu64(rem)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	return primitive.NewDecimal128(h, l), rem, true
+}
+
+// AppendMaxKeyElement will append a BSON max key element using key to dst
+// and return the extended buffer.
+func AppendMaxKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MaxKey, key)
+}
+
+// AppendMinKeyElement will append a BSON min key element using key to dst
+// and return the extended buffer.
+func AppendMinKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MinKey, key)
+}
+
+// EqualValue will return true if the two values are equal.
+func EqualValue(t1, t2 bsontype.Type, v1, v2 []byte) bool {
+	if t1 != t2 {
+		return false
+	}
+	v1, _, ok := readValue(v1, t1)
+	if !ok {
+		return false
+	}
+	v2, _, ok = readValue(v2, t2)
+	if !ok {
+		return false
+	}
+	return bytes.Equal(v1, v2)
+}
+
+// valueLength will determine the length of the next value contained in src as if it
+// is type t. The returned bool will be false if there are not enough bytes in src for
+// a value of type t.
+func valueLength(src []byte, t bsontype.Type) (int32, bool) {
+	var length int32
+	ok := true
+	switch t {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, _, ok = ReadLength(src)
+	case bsontype.Binary:
+		length, _, ok = ReadLength(src)
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, _, ok = ReadLength(src)
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, _, ok = ReadLength(src)
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(src, 0x00)
+		if regex < 0 {
+			ok = false
+			break
+		}
+		pattern := bytes.IndexByte(src[regex+1:], 0x00)
+		if pattern < 0 {
+			ok = false
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1)
+	default:
+		ok = false
+	}
+
+	return length, ok
+}
+
+func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) {
+	length, ok := valueLength(src, t)
+	if !ok || int(length) > len(src) {
+		return nil, src, false
+	}
+
+	return src[:length], src[length:], true
+}
+
+// ReserveLength reserves the space required for length and returns the index where to write the length
+// and the []byte with reserved space.
+func ReserveLength(dst []byte) (int32, []byte) {
+	index := len(dst)
+	return int32(index), append(dst, 0x00, 0x00, 0x00, 0x00)
+}
+
+// UpdateLength updates the length at index with length and returns the []byte.
+func UpdateLength(dst []byte, index, length int32) []byte {
+	dst[index] = byte(length)
+	dst[index+1] = byte(length >> 8)
+	dst[index+2] = byte(length >> 16)
+	dst[index+3] = byte(length >> 24)
+	return dst
+}
+
+func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) }
+
+func appendi32(dst []byte, i32 int32) []byte {
+	return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24))
+}
+
+// ReadLength reads an int32 length from src and returns the length and the remaining bytes. If
+// there aren't enough bytes to read a valid length, src is returned unomdified and the returned
+// bool will be false.
+func ReadLength(src []byte) (int32, []byte, bool) { return readi32(src) }
+
+func readi32(src []byte) (int32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+
+	return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true
+}
+
+func appendi64(dst []byte, i64 int64) []byte {
+	return append(dst,
+		byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24),
+		byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56),
+	)
+}
+
+func readi64(src []byte) (int64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 |
+		int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56)
+	return i64, src[8:], true
+}
+
+func appendu32(dst []byte, u32 uint32) []byte {
+	return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24))
+}
+
+func readu32(src []byte) (uint32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+
+	return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true
+}
+
+func appendu64(dst []byte, u64 uint64) []byte {
+	return append(dst,
+		byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24),
+		byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56),
+	)
+}
+
+func readu64(src []byte) (uint64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 |
+		uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56)
+	return u64, src[8:], true
+}
+
+// keep in sync with readcstringbytes
+func readcstring(src []byte) (string, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return "", src, false
+	}
+	return string(src[:idx]), src[idx+1:], true
+}
+
+// keep in sync with readcstring
+func readcstringbytes(src []byte) ([]byte, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+func appendstring(dst []byte, s string) []byte {
+	l := int32(len(s) + 1)
+	dst = appendLength(dst, l)
+	dst = append(dst, s...)
+	return append(dst, 0x00)
+}
+
+func readstring(src []byte) (string, []byte, bool) {
+	l, rem, ok := ReadLength(src)
+	if !ok {
+		return "", src, false
+	}
+	if len(src[4:]) < int(l) {
+		return "", src, false
+	}
+
+	return string(rem[:l-1]), rem[l:], true
+}
+
+// readLengthBytes attempts to read a length and that number of bytes. This
+// function requires that the length include the four bytes for itself.
+func readLengthBytes(src []byte) ([]byte, []byte, bool) {
+	l, _, ok := ReadLength(src)
+	if !ok {
+		return nil, src, false
+	}
+	if len(src) < int(l) {
+		return nil, src, false
+	}
+	return src[:l], src[l:], true
+}
+
+func appendBinarySubtype2(dst []byte, subtype byte, b []byte) []byte {
+	dst = appendLength(dst, int32(len(b)+4)) // The bytes we'll encode need to be 4 larger for the length bytes
+	dst = append(dst, subtype)
+	dst = appendLength(dst, int32(len(b)))
+	return append(dst, b...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go
new file mode 100644
index 0000000..e655a51
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go
@@ -0,0 +1,396 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+
+	"github.com/go-stack/stack"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// DocumentValidationError is an error type returned when attempting to validate a document.
+type DocumentValidationError string
+
+func (dve DocumentValidationError) Error() string { return string(dve) }
+
+// NewDocumentLengthError creates and returns an error for when the length of a document exceeds the
+// bytes available.
+func NewDocumentLengthError(length, rem int) error {
+	return DocumentValidationError(
+		fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem),
+	)
+}
+
+// InsufficientBytesError indicates that there were not enough bytes to read the next component.
+type InsufficientBytesError struct {
+	Source    []byte
+	Remaining []byte
+	Stack     stack.CallStack
+}
+
+// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document, remaining
+// bytes, and the current stack.
+func NewInsufficientBytesError(src, rem []byte) InsufficientBytesError {
+	return InsufficientBytesError{Source: src, Remaining: rem, Stack: stack.Trace().TrimRuntime()}
+}
+
+// Error implements the error interface.
+func (ibe InsufficientBytesError) Error() string {
+	return "too few bytes to read next component"
+}
+
+// ErrorStack returns a string representing the stack at the point where the error occurred.
+func (ibe InsufficientBytesError) ErrorStack() string {
+	s := bytes.NewBufferString("too few bytes to read next component: [")
+
+	for i, call := range ibe.Stack {
+		if i != 0 {
+			s.WriteString(", ")
+		}
+
+		// go vet doesn't like %k even though it's part of stack's API, so we move the format
+		// string so it doesn't complain. (We also can't make it a constant, or go vet still
+		// complains.)
+		callFormat := "%k.%n %v"
+
+		s.WriteString(fmt.Sprintf(callFormat, call, call, call))
+	}
+
+	s.WriteRune(']')
+
+	return s.String()
+}
+
+// Equal checks that err2 also is an ErrTooSmall.
+func (ibe InsufficientBytesError) Equal(err2 error) bool {
+	switch err2.(type) {
+	case InsufficientBytesError:
+		return true
+	default:
+		return false
+	}
+}
+
+// InvalidDepthTraversalError is returned when attempting a recursive Lookup when one component of
+// the path is neither an embedded document nor an array.
+type InvalidDepthTraversalError struct {
+	Key  string
+	Type bsontype.Type
+}
+
+func (idte InvalidDepthTraversalError) Error() string {
+	return fmt.Sprintf(
+		"attempt to traverse into %s, but it's type is %s, not %s nor %s",
+		idte.Key, idte.Type, bsontype.EmbeddedDocument, bsontype.Array,
+	)
+}
+
+// ErrMissingNull is returned when a document's last byte is not null.
+const ErrMissingNull DocumentValidationError = "document end is missing null byte"
+
+// ErrNilReader indicates that an operation was attempted on a nil io.Reader.
+var ErrNilReader = errors.New("nil reader")
+
+// ErrInvalidLength indicates that a length in a binary representation of a BSON document is invalid.
+var ErrInvalidLength = errors.New("document length is invalid")
+
+// ErrEmptyKey indicates that no key was provided to a Lookup method.
+var ErrEmptyKey = errors.New("empty key provided")
+
+// ErrElementNotFound indicates that an Element matching a certain condition does not exist.
+var ErrElementNotFound = errors.New("element not found")
+
+// ErrOutOfBounds indicates that an index provided to access something was invalid.
+var ErrOutOfBounds = errors.New("out of bounds")
+
+// Document is a raw bytes representation of a BSON document.
+type Document []byte
+
+// NewDocumentFromReader reads a document from r. This function will only validate the length is
+// correct and that the document ends with a null byte.
+func NewDocumentFromReader(r io.Reader) (Document, error) {
+	if r == nil {
+		return nil, ErrNilReader
+	}
+
+	var lengthBytes [4]byte
+
+	// ReadFull guarantees that we will have read at least len(lengthBytes) if err == nil
+	_, err := io.ReadFull(r, lengthBytes[:])
+	if err != nil {
+		return nil, err
+	}
+
+	length, _, _ := readi32(lengthBytes[:]) // ignore ok since we always have enough bytes to read a length
+	if length < 0 {
+		return nil, ErrInvalidLength
+	}
+	document := make([]byte, length)
+
+	copy(document, lengthBytes[:])
+
+	_, err = io.ReadFull(r, document[4:])
+	if err != nil {
+		return nil, err
+	}
+
+	if document[length-1] != 0x00 {
+		return nil, ErrMissingNull
+	}
+
+	return document, nil
+}
+
+// Lookup searches the document, potentially recursively, for the given key. If there are multiple
+// keys provided, this method will recurse down, as long as the top and intermediate nodes are
+// either documents or arrays. If an error occurs or if the value doesn't exist, an empty Value is
+// returned.
+func (d Document) Lookup(key ...string) Value {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr is the same as Lookup, except it returns an error in addition to an empty Value.
+func (d Document) LookupErr(key ...string) (Value, error) {
+	if len(key) < 1 {
+		return Value{}, ErrEmptyKey
+	}
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return Value{}, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return Value{}, NewInsufficientBytesError(d, rem)
+		}
+		if elem.Key() != key[0] {
+			continue
+		}
+		if len(key) > 1 {
+			tt := bsontype.Type(elem[0])
+			switch tt {
+			case bsontype.EmbeddedDocument:
+				val, err := elem.Value().Document().LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			case bsontype.Array:
+				val, err := elem.Value().Array().LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			default:
+				return Value{}, InvalidDepthTraversalError{Key: elem.Key(), Type: tt}
+			}
+		}
+		return elem.ValueErr()
+	}
+	return Value{}, ErrElementNotFound
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (d Document) Index(index uint) Element {
+	elem, err := d.IndexErr(index)
+	if err != nil {
+		panic(err)
+	}
+	return elem
+}
+
+// IndexErr searches for and retrieves the element at the given index.
+func (d Document) IndexErr(index uint) (Element, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var current uint
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return nil, NewInsufficientBytesError(d, rem)
+		}
+		if current != index {
+			current++
+			continue
+		}
+		return elem, nil
+	}
+	return nil, ErrOutOfBounds
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (d Document) DebugString() string {
+	if len(d) < 5 {
+		return "<malformed>"
+	}
+	var buf bytes.Buffer
+	buf.WriteString("Document")
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+	buf.WriteByte('(')
+	buf.WriteString(strconv.Itoa(int(length)))
+	length -= 4
+	buf.WriteString("){")
+	var elem Element
+	var ok bool
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
+			break
+		}
+		fmt.Fprintf(&buf, "%s ", elem.DebugString())
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// String outputs an ExtendedJSON version of Document. If the document is not valid, this method
+// returns an empty string.
+func (d Document) String() string {
+	if len(d) < 5 {
+		return ""
+	}
+	var buf bytes.Buffer
+	buf.WriteByte('{')
+
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	first := true
+	for length > 1 {
+		if !first {
+			buf.WriteByte(',')
+		}
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		fmt.Fprintf(&buf, "%s", elem.String())
+		first = false
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (d Document) Elements() ([]Element, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var elems []Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return elems, NewInsufficientBytesError(d, rem)
+		}
+		if err := elem.Validate(); err != nil {
+			return elems, err
+		}
+		elems = append(elems, elem)
+	}
+	return elems, nil
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (d Document) Values() ([]Value, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var vals []Value
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return vals, NewInsufficientBytesError(d, rem)
+		}
+		if err := elem.Value().Validate(); err != nil {
+			return vals, err
+		}
+		vals = append(vals, elem.Value())
+	}
+	return vals, nil
+}
+
+// Validate validates the document and ensures the elements contained within are valid.
+func (d Document) Validate() error {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return NewInsufficientBytesError(d, rem)
+	}
+	if int(length) > len(d) {
+		return d.lengtherror(int(length), len(d))
+	}
+	if d[length-1] != 0x00 {
+		return ErrMissingNull
+	}
+
+	length -= 4
+	var elem Element
+
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return NewInsufficientBytesError(d, rem)
+		}
+		err := elem.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(rem) < 1 || rem[0] != 0x00 {
+		return ErrMissingNull
+	}
+	return nil
+}
+
+func (Document) lengtherror(length, rem int) error {
+	return DocumentValidationError(fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go
new file mode 100644
index 0000000..ccf9075
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go
@@ -0,0 +1,152 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// MalformedElementError represents a class of errors that RawElement methods return.
+type MalformedElementError string
+
+func (mee MalformedElementError) Error() string { return string(mee) }
+
+// ErrElementMissingKey is returned when a RawElement is missing a key.
+const ErrElementMissingKey MalformedElementError = "element is missing key"
+
+// ErrElementMissingType is returned when a RawElement is missing a type.
+const ErrElementMissingType MalformedElementError = "element is missing type"
+
+// Element is a raw bytes representation of a BSON element.
+type Element []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (e Element) Key() string {
+	key, _ := e.KeyErr()
+	return key
+}
+
+// KeyBytes returns the key for this element as a []byte. If the element is not valid, this method
+// returns an empty string. If knowing if the element is valid is important, use KeyErr. This method
+// will not include the null byte at the end of the key in the slice of bytes.
+func (e Element) KeyBytes() []byte {
+	key, _ := e.KeyBytesErr()
+	return key
+}
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (e Element) KeyErr() (string, error) {
+	key, err := e.KeyBytesErr()
+	return string(key), err
+}
+
+// KeyBytesErr returns the key for this element as a []byte, returning an error if the element is
+// not valid.
+func (e Element) KeyBytesErr() ([]byte, error) {
+	if len(e) <= 0 {
+		return nil, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return nil, ErrElementMissingKey
+	}
+	return e[1 : idx+1], nil
+}
+
+// Validate ensures the element is a valid BSON element.
+func (e Element) Validate() error {
+	if len(e) < 1 {
+		return ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ErrElementMissingKey
+	}
+	return Value{Type: bsontype.Type(e[0]), Data: e[idx+2:]}.Validate()
+}
+
+// CompareKey will compare this element's key to key. This method makes it easy to compare keys
+// without needing to allocate a string. The key may be null terminated. If a valid key cannot be
+// read this method will return false.
+func (e Element) CompareKey(key []byte) bool {
+	if len(e) < 2 {
+		return false
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return false
+	}
+	if index := bytes.IndexByte(key, 0x00); index > -1 {
+		key = key[:index]
+	}
+	return bytes.Equal(e[1:idx+1], key)
+}
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (e Element) Value() Value {
+	val, _ := e.ValueErr()
+	return val
+}
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (e Element) ValueErr() (Value, error) {
+	if len(e) <= 0 {
+		return Value{}, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return Value{}, ErrElementMissingKey
+	}
+
+	val, rem, exists := ReadValue(e[idx+2:], bsontype.Type(e[0]))
+	if !exists {
+		return Value{}, NewInsufficientBytesError(e, rem)
+	}
+	return val, nil
+}
+
+// String implements the fmt.String interface. The output will be in extended JSON format.
+func (e Element) String() string {
+	if len(e) <= 0 {
+		return ""
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ""
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return ""
+	}
+	return fmt.Sprintf(`"%s": %v`, key, val)
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (e Element) DebugString() string {
+	if len(e) <= 0 {
+		return "<malformed>"
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return fmt.Sprintf(`bson.Element{[%s]<malformed>}`, t)
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return fmt.Sprintf(`bson.Element{[%s]"%s": <malformed>}`, t, key)
+	}
+	return fmt.Sprintf(`bson.Element{[%s]"%s": %v}`, t, key, val)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go
new file mode 100644
index 0000000..9fd903f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsoncore
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go
new file mode 100644
index 0000000..f0593d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go
@@ -0,0 +1,901 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value.
+type ElementTypeError struct {
+	Method string
+	Type   bsontype.Type
+}
+
+// Error implements the error interface.
+func (ete ElementTypeError) Error() string {
+	return "Call of " + ete.Method + " on " + ete.Type.String() + " type"
+}
+
+// Value represents a BSON value with a type and raw bytes.
+type Value struct {
+	Type bsontype.Type
+	Data []byte
+}
+
+// Validate ensures the value is a valid BSON value.
+func (v Value) Validate() error {
+	_, _, valid := readValue(v.Data, v.Type)
+	if !valid {
+		return NewInsufficientBytesError(v.Data, v.Data)
+	}
+	return nil
+}
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (v Value) IsNumber() bool {
+	switch v.Type {
+	case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128:
+		return true
+	default:
+		return false
+	}
+}
+
+// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt32() int32 { return 0 }
+
+// AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt32OK() (int32, bool) { return 0, false }
+
+// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt64() int64 { return 0 }
+
+// AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt64OK() (int64, bool) { return 0, false }
+
+// AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsFloat64() float64 { return 0 }
+
+// AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsFloat64OK() (float64, bool) { return 0, false }
+
+// Add will add this value to another. This is currently only implemented for strings and numbers.
+// If either value is a string, the other type is coerced into a string and added to the other.
+//
+// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small,
+// it will be expanded.
+func (v *Value) Add(v2 Value) error { return nil }
+
+// Equal compaes v to v2 and returns true if they are equal.
+func (v Value) Equal(v2 Value) bool {
+	if v.Type != v2.Type {
+		return false
+	}
+
+	return bytes.Equal(v.Data, v2.Data)
+}
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (v Value) String() string {
+	switch v.Type {
+	case bsontype.Double:
+		f64, ok := v.DoubleOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDouble":"%s"}`, formatDouble(f64))
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return ""
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return ""
+		}
+		return doc.String()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return ""
+		}
+		return docAsArray(arr, false)
+	case bsontype.Binary:
+		subtype, data, ok := v.BinaryOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$binary":{"base64":"%s","subType":"%02x"}}`, base64.StdEncoding.EncodeToString(data), subtype)
+	case bsontype.Undefined:
+		return `{"$undefined":true}`
+	case bsontype.ObjectID:
+		oid, ok := v.ObjectIDOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$oid":%s}`, oid.Hex())
+	case bsontype.Boolean:
+		b, ok := v.BooleanOK()
+		if !ok {
+			return ""
+		}
+		return strconv.FormatBool(b)
+	case bsontype.DateTime:
+		dt, ok := v.DateTimeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$date":{"$numberLong":"%d"}}`, dt)
+	case bsontype.Null:
+		return "null"
+	case bsontype.Regex:
+		pattern, options, ok := v.RegexOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(
+			`{"$regularExpression":{"pattern":%s,"options":"%s"}}`,
+			escapeString(pattern), sortStringAlphebeticAscending(options),
+		)
+	case bsontype.DBPointer:
+		ns, pointer, ok := v.DBPointerOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$dbPointer":{"$ref":%s,"$id":{"$oid":"%s"}}}`, escapeString(ns), pointer.Hex())
+	case bsontype.JavaScript:
+		js, ok := v.JavaScriptOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s}`, escapeString(js))
+	case bsontype.Symbol:
+		symbol, ok := v.SymbolOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$symbol":%s}`, escapeString(symbol))
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope)
+	case bsontype.Int32:
+		i32, ok := v.Int32OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberInt":"%d"}`, i32)
+	case bsontype.Timestamp:
+		t, i, ok := v.TimestampOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$timestamp":{"t":"%s","i":"%s"}}`, strconv.FormatUint(uint64(t), 10), strconv.FormatUint(uint64(i), 10))
+	case bsontype.Int64:
+		i64, ok := v.Int64OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberLong":"%d"}`, i64)
+	case bsontype.Decimal128:
+		d128, ok := v.Decimal128OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDecimal":"%s"}`, d128.String())
+	case bsontype.MinKey:
+		return `{"$minKey":1}`
+	case bsontype.MaxKey:
+		return `{"$maxKey":1}`
+	default:
+		return ""
+	}
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (v Value) DebugString() string {
+	switch v.Type {
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return doc.DebugString()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return docAsArray(arr, true)
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope.DebugString())
+	default:
+		str := v.String()
+		if str == "" {
+			return "<malformed>"
+		}
+		return str
+	}
+}
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (v Value) Double() float64 {
+	if v.Type != bsontype.Double {
+		panic(ElementTypeError{"bsoncore.Value.Double", v.Type})
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return f64
+}
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (v Value) DoubleOK() (float64, bool) {
+	if v.Type != bsontype.Double {
+		return 0, false
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return f64, true
+}
+
+// StringValue returns the string balue for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (v Value) StringValue() string {
+	if v.Type != bsontype.String {
+		panic(ElementTypeError{"bsoncore.Value.StringValue", v.Type})
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return str
+}
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (v Value) StringValueOK() (string, bool) {
+	if v.Type != bsontype.String {
+		return "", false
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		return "", false
+	}
+	return str, true
+}
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (v Value) Document() Document {
+	if v.Type != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bsoncore.Value.Document", v.Type})
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return doc
+}
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Value) DocumentOK() (Document, bool) {
+	if v.Type != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return doc, true
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (v Value) Array() Document {
+	if v.Type != bsontype.Array {
+		panic(ElementTypeError{"bsoncore.Value.Array", v.Type})
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return arr
+}
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (v Value) ArrayOK() (Document, bool) {
+	if v.Type != bsontype.Array {
+		return nil, false
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return arr, true
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Value) Binary() (subtype byte, data []byte) {
+	if v.Type != bsontype.Binary {
+		panic(ElementTypeError{"bsoncore.Value.Binary", v.Type})
+	}
+	subtype, data, _, ok := ReadBinary(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return subtype, data
+}
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (v Value) BinaryOK() (subtype byte, data []byte, ok bool) {
+	if v.Type != bsontype.Binary {
+		return 0x00, nil, false
+	}
+	subtype, data, _, ok = ReadBinary(v.Data)
+	if !ok {
+		return 0x00, nil, false
+	}
+	return subtype, data, true
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (v Value) ObjectID() primitive.ObjectID {
+	if v.Type != bsontype.ObjectID {
+		panic(ElementTypeError{"bsoncore.Value.ObjectID", v.Type})
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return oid
+}
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (v Value) ObjectIDOK() (primitive.ObjectID, bool) {
+	if v.Type != bsontype.ObjectID {
+		return primitive.ObjectID{}, false
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		return primitive.ObjectID{}, false
+	}
+	return oid, true
+}
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (v Value) Boolean() bool {
+	if v.Type != bsontype.Boolean {
+		panic(ElementTypeError{"bsoncore.Value.Boolean", v.Type})
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return b
+}
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (v Value) BooleanOK() (bool, bool) {
+	if v.Type != bsontype.Boolean {
+		return false, false
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		return false, false
+	}
+	return b, true
+}
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (v Value) DateTime() int64 {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.DateTime", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return dt
+}
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (v Value) DateTimeOK() (int64, bool) {
+	if v.Type != bsontype.DateTime {
+		return 0, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return dt, true
+}
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (v Value) Time() time.Time {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.Time", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000)
+}
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (v Value) TimeOK() (time.Time, bool) {
+	if v.Type != bsontype.DateTime {
+		return time.Time{}, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return time.Time{}, false
+	}
+	return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000), true
+}
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (v Value) Regex() (pattern, options string) {
+	if v.Type != bsontype.Regex {
+		panic(ElementTypeError{"bsoncore.Value.Regex", v.Type})
+	}
+	pattern, options, _, ok := ReadRegex(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return pattern, options
+}
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (v Value) RegexOK() (pattern, options string, ok bool) {
+	if v.Type != bsontype.Regex {
+		return "", "", false
+	}
+	pattern, options, _, ok = ReadRegex(v.Data)
+	if !ok {
+		return "", "", false
+	}
+	return pattern, options, true
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (v Value) DBPointer() (string, primitive.ObjectID) {
+	if v.Type != bsontype.DBPointer {
+		panic(ElementTypeError{"bsoncore.Value.DBPointer", v.Type})
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return ns, pointer
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (v Value) DBPointerOK() (string, primitive.ObjectID, bool) {
+	if v.Type != bsontype.DBPointer {
+		return "", primitive.ObjectID{}, false
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		return "", primitive.ObjectID{}, false
+	}
+	return ns, pointer, true
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (v Value) JavaScript() string {
+	if v.Type != bsontype.JavaScript {
+		panic(ElementTypeError{"bsoncore.Value.JavaScript", v.Type})
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return js
+}
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) JavaScriptOK() (string, bool) {
+	if v.Type != bsontype.JavaScript {
+		return "", false
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		return "", false
+	}
+	return js, true
+}
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (v Value) Symbol() string {
+	if v.Type != bsontype.Symbol {
+		panic(ElementTypeError{"bsoncore.Value.Symbol", v.Type})
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return symbol
+}
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) SymbolOK() (string, bool) {
+	if v.Type != bsontype.Symbol {
+		return "", false
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		return "", false
+	}
+	return symbol, true
+}
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (v Value) CodeWithScope() (string, Document) {
+	if v.Type != bsontype.CodeWithScope {
+		panic(ElementTypeError{"bsoncore.Value.CodeWithScope", v.Type})
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return code, scope
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (v Value) CodeWithScopeOK() (string, Document, bool) {
+	if v.Type != bsontype.CodeWithScope {
+		return "", nil, false
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		return "", nil, false
+	}
+	return code, scope, true
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (v Value) Int32() int32 {
+	if v.Type != bsontype.Int32 {
+		panic(ElementTypeError{"bsoncore.Value.Int32", v.Type})
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i32
+}
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int32OK() (int32, bool) {
+	if v.Type != bsontype.Int32 {
+		return 0, false
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i32, true
+}
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (v Value) Timestamp() (t, i uint32) {
+	if v.Type != bsontype.Timestamp {
+		panic(ElementTypeError{"bsoncore.Value.Timestamp", v.Type})
+	}
+	t, i, _, ok := ReadTimestamp(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return t, i
+}
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (v Value) TimestampOK() (t, i uint32, ok bool) {
+	if v.Type != bsontype.Timestamp {
+		return 0, 0, false
+	}
+	t, i, _, ok = ReadTimestamp(v.Data)
+	if !ok {
+		return 0, 0, false
+	}
+	return t, i, true
+}
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (v Value) Int64() int64 {
+	if v.Type != bsontype.Int64 {
+		panic(ElementTypeError{"bsoncore.Value.Int64", v.Type})
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i64
+}
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int64OK() (int64, bool) {
+	if v.Type != bsontype.Int64 {
+		return 0, false
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i64, true
+}
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (v Value) Decimal128() primitive.Decimal128 {
+	if v.Type != bsontype.Decimal128 {
+		panic(ElementTypeError{"bsoncore.Value.Decimal128", v.Type})
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return d128
+}
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (v Value) Decimal128OK() (primitive.Decimal128, bool) {
+	if v.Type != bsontype.Decimal128 {
+		return primitive.Decimal128{}, false
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		return primitive.Decimal128{}, false
+	}
+	return d128, true
+}
+
+var hexChars = "0123456789abcdef"
+
+func escapeString(s string) string {
+	escapeHTML := true
+	var buf bytes.Buffer
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+	return buf.String()
+}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	oldI := ss[i]
+	ss[i] = ss[j]
+	ss[j] = oldI
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
+
+func docAsArray(d Document, debug bool) string {
+	if len(d) < 5 {
+		return ""
+	}
+	var buf bytes.Buffer
+	buf.WriteByte('[')
+
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	first := true
+	for length > 1 {
+		if !first {
+			buf.WriteByte(',')
+		}
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		if debug {
+			fmt.Fprintf(&buf, "%s ", elem.Value().DebugString())
+		} else {
+			fmt.Fprintf(&buf, "%s", elem.Value())
+		}
+		first = false
+	}
+	buf.WriteByte(']')
+
+	return buf.String()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go
new file mode 100644
index 0000000..28374ba
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go
@@ -0,0 +1,166 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"encoding/binary"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// IDoc is the interface implemented by Doc and MDoc. It allows either of these types to be provided
+// to the Document function to create a Value.
+type IDoc interface {
+	idoc()
+}
+
+// Double constructs a BSON double Value.
+func Double(f64 float64) Val {
+	v := Val{t: bsontype.Double}
+	binary.LittleEndian.PutUint64(v.bootstrap[0:8], math.Float64bits(f64))
+	return v
+}
+
+// String constructs a BSON string Value.
+func String(str string) Val { return Val{t: bsontype.String}.writestring(str) }
+
+// Document constructs a Value from the given IDoc. If nil is provided, a BSON Null value will be
+// returned.
+func Document(doc IDoc) Val {
+	var v Val
+	switch tt := doc.(type) {
+	case Doc:
+		if tt == nil {
+			v.t = bsontype.Null
+			break
+		}
+		v.t = bsontype.EmbeddedDocument
+		v.primitive = tt
+	case MDoc:
+		if tt == nil {
+			v.t = bsontype.Null
+			break
+		}
+		v.t = bsontype.EmbeddedDocument
+		v.primitive = tt
+	default:
+		v.t = bsontype.Null
+	}
+	return v
+}
+
+// Array constructs a Value from arr. If arr is nil, a BSON Null value is returned.
+func Array(arr Arr) Val {
+	if arr == nil {
+		return Val{t: bsontype.Null}
+	}
+	return Val{t: bsontype.Array, primitive: arr}
+}
+
+// Binary constructs a BSON binary Value.
+func Binary(subtype byte, data []byte) Val {
+	return Val{t: bsontype.Binary, primitive: primitive.Binary{Subtype: subtype, Data: data}}
+}
+
+// Undefined constructs a BSON binary Value.
+func Undefined() Val { return Val{t: bsontype.Undefined} }
+
+// ObjectID constructs a BSON objectid Value.
+func ObjectID(oid primitive.ObjectID) Val {
+	v := Val{t: bsontype.ObjectID}
+	copy(v.bootstrap[0:12], oid[:])
+	return v
+}
+
+// Boolean constructs a BSON boolean Value.
+func Boolean(b bool) Val {
+	v := Val{t: bsontype.Boolean}
+	if b {
+		v.bootstrap[0] = 0x01
+	}
+	return v
+}
+
+// DateTime constructs a BSON datetime Value.
+func DateTime(dt int64) Val { return Val{t: bsontype.DateTime}.writei64(dt) }
+
+// Time constructs a BSON datetime Value.
+func Time(t time.Time) Val {
+	return Val{t: bsontype.DateTime}.writei64(t.Unix()*1e3 + int64(t.Nanosecond()/1e6))
+}
+
+// Null constructs a BSON binary Value.
+func Null() Val { return Val{t: bsontype.Null} }
+
+// Regex constructs a BSON regex Value.
+func Regex(pattern, options string) Val {
+	regex := primitive.Regex{Pattern: pattern, Options: options}
+	return Val{t: bsontype.Regex, primitive: regex}
+}
+
+// DBPointer constructs a BSON dbpointer Value.
+func DBPointer(ns string, ptr primitive.ObjectID) Val {
+	dbptr := primitive.DBPointer{DB: ns, Pointer: ptr}
+	return Val{t: bsontype.DBPointer, primitive: dbptr}
+}
+
+// JavaScript constructs a BSON javascript Value.
+func JavaScript(js string) Val {
+	return Val{t: bsontype.JavaScript}.writestring(js)
+}
+
+// Symbol constructs a BSON symbol Value.
+func Symbol(symbol string) Val {
+	return Val{t: bsontype.Symbol}.writestring(symbol)
+}
+
+// CodeWithScope constructs a BSON code with scope Value.
+func CodeWithScope(code string, scope IDoc) Val {
+	cws := primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope}
+	return Val{t: bsontype.CodeWithScope, primitive: cws}
+}
+
+// Int32 constructs a BSON int32 Value.
+func Int32(i32 int32) Val {
+	v := Val{t: bsontype.Int32}
+	v.bootstrap[0] = byte(i32)
+	v.bootstrap[1] = byte(i32 >> 8)
+	v.bootstrap[2] = byte(i32 >> 16)
+	v.bootstrap[3] = byte(i32 >> 24)
+	return v
+}
+
+// Timestamp constructs a BSON timestamp Value.
+func Timestamp(t, i uint32) Val {
+	v := Val{t: bsontype.Timestamp}
+	v.bootstrap[0] = byte(i)
+	v.bootstrap[1] = byte(i >> 8)
+	v.bootstrap[2] = byte(i >> 16)
+	v.bootstrap[3] = byte(i >> 24)
+	v.bootstrap[4] = byte(t)
+	v.bootstrap[5] = byte(t >> 8)
+	v.bootstrap[6] = byte(t >> 16)
+	v.bootstrap[7] = byte(t >> 24)
+	return v
+}
+
+// Int64 constructs a BSON int64 Value.
+func Int64(i64 int64) Val { return Val{t: bsontype.Int64}.writei64(i64) }
+
+// Decimal128 constructs a BSON decimal128 Value.
+func Decimal128(d128 primitive.Decimal128) Val {
+	return Val{t: bsontype.Decimal128, primitive: d128}
+}
+
+// MinKey constructs a BSON minkey Value.
+func MinKey() Val { return Val{t: bsontype.MinKey} }
+
+// MaxKey constructs a BSON maxkey Value.
+func MaxKey() Val { return Val{t: bsontype.MaxKey} }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go
new file mode 100644
index 0000000..f2209a0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go
@@ -0,0 +1,305 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilDocument indicates that an operation was attempted on a nil *bson.Document.
+var ErrNilDocument = errors.New("document is nil")
+
+// KeyNotFound is an error type returned from the Lookup methods on Document. This type contains
+// information about which key was not found and if it was actually not found or if a component of
+// the key except the last was not a document nor array.
+type KeyNotFound struct {
+	Key   []string      // The keys that were searched for.
+	Depth uint          // Which key either was not found or was an incorrect type.
+	Type  bsontype.Type // The type of the key that was found but was an incorrect type.
+}
+
+func (knf KeyNotFound) Error() string {
+	depth := knf.Depth
+	if depth >= uint(len(knf.Key)) {
+		depth = uint(len(knf.Key)) - 1
+	}
+
+	if len(knf.Key) == 0 {
+		return "no keys were provided for lookup"
+	}
+
+	if knf.Type != bsontype.Type(0) {
+		return fmt.Sprintf(`key "%s" was found but was not valid to traverse BSON type %s`, knf.Key[depth], knf.Type)
+	}
+
+	return fmt.Sprintf(`key "%s" was not found`, knf.Key[depth])
+}
+
+// Doc is a type safe, concise BSON document representation.
+type Doc []Elem
+
+// ReadDoc will create a Document using the provided slice of bytes. If the
+// slice of bytes is not a valid BSON document, this method will return an error.
+func ReadDoc(b []byte) (Doc, error) {
+	doc := make(Doc, 0)
+	err := doc.UnmarshalBSON(b)
+	if err != nil {
+		return nil, err
+	}
+	return doc, nil
+}
+
+// Copy makes a shallow copy of this document.
+func (d Doc) Copy() Doc {
+	d2 := make(Doc, len(d))
+	copy(d2, d)
+	return d2
+}
+
+// Append adds an element to the end of the document, creating it from the key and value provided.
+func (d Doc) Append(key string, val Val) Doc {
+	return append(d, Elem{Key: key, Value: val})
+}
+
+// Prepend adds an element to the beginning of the document, creating it from the key and value provided.
+func (d Doc) Prepend(key string, val Val) Doc {
+	// TODO: should we just modify d itself instead of doing an alloc here?
+	return append(Doc{{Key: key, Value: val}}, d...)
+}
+
+// Set replaces an element of a document. If an element with a matching key is
+// found, the element will be replaced with the one provided. If the document
+// does not have an element with that key, the element is appended to the
+// document instead.
+func (d Doc) Set(key string, val Val) Doc {
+	idx := d.IndexOf(key)
+	if idx == -1 {
+		return append(d, Elem{Key: key, Value: val})
+	}
+	d[idx] = Elem{Key: key, Value: val}
+	return d
+}
+
+// IndexOf returns the index of the first element with a key of key, or -1 if no element with a key
+// was found.
+func (d Doc) IndexOf(key string) int {
+	for i, e := range d {
+		if e.Key == key {
+			return i
+		}
+	}
+	return -1
+}
+
+// Delete removes the element with key if it exists and returns the updated Doc.
+func (d Doc) Delete(key string) Doc {
+	idx := d.IndexOf(key)
+	if idx == -1 {
+		return d
+	}
+	return append(d[:idx], d[idx+1:]...)
+}
+
+// Lookup searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Value if they key does not exist. To know if they key actually
+// exists, use LookupErr.
+func (d Doc) Lookup(key ...string) Val {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d Doc) LookupErr(key ...string) (Val, error) {
+	elem, err := d.LookupElementErr(key...)
+	return elem.Value, err
+}
+
+// LookupElement searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Element if they key does not exist. To know if they key actually
+// exists, use LookupElementErr.
+func (d Doc) LookupElement(key ...string) Elem {
+	elem, _ := d.LookupElementErr(key...)
+	return elem
+}
+
+// LookupElementErr searches the document and potentially subdocuments for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d Doc) LookupElementErr(key ...string) (Elem, error) {
+	// KeyNotFound operates by being created where the error happens and then the depth is
+	// incremented by 1 as each function unwinds. Whenever this function returns, it also assigns
+	// the Key slice to the key slice it has. This ensures that the proper depth is identified and
+	// the proper keys.
+	if len(key) == 0 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	var elem Elem
+	var err error
+	idx := d.IndexOf(key[0])
+	if idx == -1 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	elem = d[idx]
+	if len(key) == 1 {
+		return elem, nil
+	}
+
+	switch elem.Value.Type() {
+	case bsontype.EmbeddedDocument:
+		switch tt := elem.Value.primitive.(type) {
+		case Doc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		case MDoc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		}
+	default:
+		return Elem{}, KeyNotFound{Type: elem.Value.Type()}
+	}
+	switch tt := err.(type) {
+	case KeyNotFound:
+		tt.Depth++
+		tt.Key = key
+		return Elem{}, tt
+	case nil:
+		return elem, nil
+	default:
+		return Elem{}, err // We can't actually hit this.
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+//
+// This method will never return an error.
+func (d Doc) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if d == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+	data, _ := d.MarshalBSON()
+	return bsontype.EmbeddedDocument, data, nil
+}
+
+// MarshalBSON implements the Marshaler interface.
+//
+// This method will never return an error.
+func (d Doc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) }
+
+// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst.
+//
+// This method will never return an error.
+func (d Doc) AppendMarshalBSON(dst []byte) ([]byte, error) {
+	idx, dst := bsoncore.ReserveLength(dst)
+	for _, elem := range d {
+		t, data, _ := elem.Value.MarshalBSONValue() // Value.MarshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, elem.Key...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst, nil
+}
+
+// UnmarshalBSON implements the Unmarshaler interface.
+func (d *Doc) UnmarshalBSON(b []byte) error {
+	if d == nil {
+		return ErrNilDocument
+	}
+
+	if err := bsoncore.Document(b).Validate(); err != nil {
+		return err
+	}
+
+	elems, err := bsoncore.Document(b).Elements()
+	if err != nil {
+		return err
+	}
+	var val Val
+	for _, elem := range elems {
+		rawv := elem.Value()
+		err = val.UnmarshalBSONValue(rawv.Type, rawv.Data)
+		if err != nil {
+			return err
+		}
+		*d = d.Append(elem.Key(), val)
+	}
+	return nil
+}
+
+// UnmarshalBSONValue implements the bson.ValueUnmarshaler interface.
+func (d *Doc) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if t != bsontype.EmbeddedDocument {
+		return fmt.Errorf("cannot unmarshal %s into a bsonx.Doc", t)
+	}
+	return d.UnmarshalBSON(data)
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (d Doc) Equal(id IDoc) bool {
+	switch tt := id.(type) {
+	case Doc:
+		d2 := tt
+		if len(d) != len(d2) {
+			return false
+		}
+		for idx := range d {
+			if !d[idx].Equal(d2[idx]) {
+				return false
+			}
+		}
+	case MDoc:
+		unique := make(map[string]struct{}, 0)
+		for _, elem := range d {
+			unique[elem.Key] = struct{}{}
+			val, ok := tt[elem.Key]
+			if !ok {
+				return false
+			}
+			if !val.Equal(elem.Value) {
+				return false
+			}
+		}
+		if len(unique) != len(tt) {
+			return false
+		}
+	case nil:
+		return d == nil
+	default:
+		return false
+	}
+
+	return true
+}
+
+// String implements the fmt.Stringer interface.
+func (d Doc) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Document{"))
+	for idx, elem := range d {
+		if idx > 0 {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%v", elem)
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+func (Doc) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go
new file mode 100644
index 0000000..b45dbea
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go
@@ -0,0 +1,53 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const validateMaxDepthDefault = 2048
+
+// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value.
+//
+// TODO: rename this ValueTypeError.
+type ElementTypeError struct {
+	Method string
+	Type   bsontype.Type
+}
+
+// Error implements the error interface.
+func (ete ElementTypeError) Error() string {
+	return "Call of " + ete.Method + " on " + ete.Type.String() + " type"
+}
+
+// Elem represents a BSON element.
+//
+// NOTE: Element cannot be the value of a map nor a property of a struct without special handling.
+// The default encoders and decoders will not process Element correctly. To do so would require
+// information loss since an Element contains a key, but the keys used when encoding a struct are
+// the struct field names. Instead of using an Element, use a Value as a value in a map or a
+// property of a struct.
+type Elem struct {
+	Key   string
+	Value Val
+}
+
+// Equal compares e and e2 and returns true if they are equal.
+func (e Elem) Equal(e2 Elem) bool {
+	if e.Key != e2.Key {
+		return false
+	}
+	return e.Value.Equal(e2.Value)
+}
+
+func (e Elem) String() string {
+	// TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here.
+	return fmt.Sprintf(`bson.Element{"%s": %v}`, e.Key, e.Value)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go
new file mode 100644
index 0000000..e483556
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go
@@ -0,0 +1,231 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// MDoc is an unordered, type safe, concise BSON document representation. This type should not be
+// used if you require ordering of values or duplicate keys.
+type MDoc map[string]Val
+
+// ReadMDoc will create a Doc using the provided slice of bytes. If the
+// slice of bytes is not a valid BSON document, this method will return an error.
+func ReadMDoc(b []byte) (MDoc, error) {
+	doc := make(MDoc, 0)
+	err := doc.UnmarshalBSON(b)
+	if err != nil {
+		return nil, err
+	}
+	return doc, nil
+}
+
+// Copy makes a shallow copy of this document.
+func (d MDoc) Copy() MDoc {
+	d2 := make(MDoc, len(d))
+	for k, v := range d {
+		d2[k] = v
+	}
+	return d2
+}
+
+// Lookup searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Value if they key does not exist. To know if they key actually
+// exists, use LookupErr.
+func (d MDoc) Lookup(key ...string) Val {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d MDoc) LookupErr(key ...string) (Val, error) {
+	elem, err := d.LookupElementErr(key...)
+	return elem.Value, err
+}
+
+// LookupElement searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Element if they key does not exist. To know if they key actually
+// exists, use LookupElementErr.
+func (d MDoc) LookupElement(key ...string) Elem {
+	elem, _ := d.LookupElementErr(key...)
+	return elem
+}
+
+// LookupElementErr searches the document and potentially subdocuments for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d MDoc) LookupElementErr(key ...string) (Elem, error) {
+	// KeyNotFound operates by being created where the error happens and then the depth is
+	// incremented by 1 as each function unwinds. Whenever this function returns, it also assigns
+	// the Key slice to the key slice it has. This ensures that the proper depth is identified and
+	// the proper keys.
+	if len(key) == 0 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	var elem Elem
+	var err error
+	val, ok := d[key[0]]
+	if !ok {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	if len(key) == 1 {
+		return Elem{Key: key[0], Value: val}, nil
+	}
+
+	switch val.Type() {
+	case bsontype.EmbeddedDocument:
+		switch tt := val.primitive.(type) {
+		case Doc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		case MDoc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		}
+	default:
+		return Elem{}, KeyNotFound{Type: val.Type()}
+	}
+	switch tt := err.(type) {
+	case KeyNotFound:
+		tt.Depth++
+		tt.Key = key
+		return Elem{}, tt
+	case nil:
+		return elem, nil
+	default:
+		return Elem{}, err // We can't actually hit this.
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+//
+// This method will never return an error.
+func (d MDoc) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if d == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+	data, _ := d.MarshalBSON()
+	return bsontype.EmbeddedDocument, data, nil
+}
+
+// MarshalBSON implements the Marshaler interface.
+//
+// This method will never return an error.
+func (d MDoc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) }
+
+// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst.
+//
+// This method will never return an error.
+func (d MDoc) AppendMarshalBSON(dst []byte) ([]byte, error) {
+	idx, dst := bsoncore.ReserveLength(dst)
+	for k, v := range d {
+		t, data, _ := v.MarshalBSONValue() // Value.MarshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, k...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst, nil
+}
+
+// UnmarshalBSON implements the Unmarshaler interface.
+func (d *MDoc) UnmarshalBSON(b []byte) error {
+	if d == nil {
+		return ErrNilDocument
+	}
+
+	if err := bsoncore.Document(b).Validate(); err != nil {
+		return err
+	}
+
+	elems, err := bsoncore.Document(b).Elements()
+	if err != nil {
+		return err
+	}
+	var val Val
+	for _, elem := range elems {
+		rawv := elem.Value()
+		err = val.UnmarshalBSONValue(rawv.Type, rawv.Data)
+		if err != nil {
+			return err
+		}
+		(*d)[elem.Key()] = val
+	}
+	return nil
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (d MDoc) Equal(id IDoc) bool {
+	switch tt := id.(type) {
+	case MDoc:
+		d2 := tt
+		if len(d) != len(d2) {
+			return false
+		}
+		for key, value := range d {
+			value2, ok := d2[key]
+			if !ok {
+				return false
+			}
+			if !value.Equal(value2) {
+				return false
+			}
+		}
+	case Doc:
+		unique := make(map[string]struct{}, 0)
+		for _, elem := range tt {
+			unique[elem.Key] = struct{}{}
+			val, ok := d[elem.Key]
+			if !ok {
+				return false
+			}
+			if !val.Equal(elem.Value) {
+				return false
+			}
+		}
+		if len(unique) != len(d) {
+			return false
+		}
+	case nil:
+		return d == nil
+	default:
+		return false
+	}
+
+	return true
+}
+
+// String implements the fmt.Stringer interface.
+func (d MDoc) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Document{"))
+	first := true
+	for key, value := range d {
+		if !first {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%v", Elem{Key: key, Value: value})
+		first = false
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+func (MDoc) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go
new file mode 100644
index 0000000..b34f07d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go
@@ -0,0 +1,638 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var primitiveCodecs PrimitiveCodecs
+
+var tDocument = reflect.TypeOf((Doc)(nil))
+var tMDoc = reflect.TypeOf((MDoc)(nil))
+var tArray = reflect.TypeOf((Arr)(nil))
+var tValue = reflect.TypeOf(Val{})
+var tElementSlice = reflect.TypeOf(([]Elem)(nil))
+
+// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
+// defined in this package.
+type PrimitiveCodecs struct{}
+
+// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
+// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
+func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
+	}
+
+	rb.
+		RegisterEncoder(tDocument, bsoncodec.ValueEncoderFunc(pc.DocumentEncodeValue)).
+		RegisterEncoder(tArray, bsoncodec.ValueEncoderFunc(pc.ArrayEncodeValue)).
+		RegisterEncoder(tValue, bsoncodec.ValueEncoderFunc(pc.ValueEncodeValue)).
+		RegisterEncoder(tElementSlice, bsoncodec.ValueEncoderFunc(pc.ElementSliceEncodeValue)).
+		RegisterDecoder(tDocument, bsoncodec.ValueDecoderFunc(pc.DocumentDecodeValue)).
+		RegisterDecoder(tArray, bsoncodec.ValueDecoderFunc(pc.ArrayDecodeValue)).
+		RegisterDecoder(tValue, bsoncodec.ValueDecoderFunc(pc.ValueDecodeValue)).
+		RegisterDecoder(tElementSlice, bsoncodec.ValueDecoderFunc(pc.ElementSliceDecodeValue))
+}
+
+// DocumentEncodeValue is the ValueEncoderFunc for *Document.
+func (pc PrimitiveCodecs) DocumentEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDocument {
+		return bsoncodec.ValueEncoderError{Name: "DocumentEncodeValue", Types: []reflect.Type{tDocument}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	doc := val.Interface().(Doc)
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return pc.encodeDocument(ec, dw, doc)
+}
+
+// DocumentDecodeValue is the ValueDecoderFunc for *Document.
+func (pc PrimitiveCodecs) DocumentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDocument {
+		return bsoncodec.ValueDecoderError{Name: "DocumentDecodeValue", Types: []reflect.Type{tDocument}, Received: val}
+	}
+
+	return pc.documentDecodeValue(dctx, vr, val.Addr().Interface().(*Doc))
+}
+
+func (pc PrimitiveCodecs) documentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, doc *Doc) error {
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	return pc.decodeDocument(dctx, dr, doc)
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for *Array.
+func (pc PrimitiveCodecs) ArrayEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tArray {
+		return bsoncodec.ValueEncoderError{Name: "ArrayEncodeValue", Types: []reflect.Type{tArray}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	arr := val.Interface().(Arr)
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	for _, val := range arr {
+		dvw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = pc.encodeValue(ec, dvw, val)
+
+		if err != nil {
+			return err
+		}
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for *Array.
+func (pc PrimitiveCodecs) ArrayDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tArray {
+		return bsoncodec.ValueDecoderError{Name: "ArrayDecodeValue", Types: []reflect.Type{tArray}, Received: val}
+	}
+
+	ar, err := vr.ReadArray()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(tArray, 0, 0))
+	}
+	val.SetLen(0)
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == bsonrw.ErrEOA {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Val
+		err = pc.valueDecodeValue(dc, vr, &elem)
+		if err != nil {
+			return err
+		}
+
+		val.Set(reflect.Append(val, reflect.ValueOf(elem)))
+	}
+
+	return nil
+}
+
+// ElementSliceEncodeValue is the ValueEncoderFunc for []*Element.
+func (pc PrimitiveCodecs) ElementSliceEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tElementSlice {
+		return bsoncodec.ValueEncoderError{Name: "ElementSliceEncodeValue", Types: []reflect.Type{tElementSlice}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	return pc.DocumentEncodeValue(ec, vw, val.Convert(tDocument))
+}
+
+// ElementSliceDecodeValue is the ValueDecoderFunc for []*Element.
+func (pc PrimitiveCodecs) ElementSliceDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tElementSlice {
+		return bsoncodec.ValueDecoderError{Name: "ElementSliceDecodeValue", Types: []reflect.Type{tElementSlice}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+	elems := make([]reflect.Value, 0)
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Elem
+		err = pc.elementDecodeValue(dc, vr, key, &elem)
+		if err != nil {
+			return err
+		}
+
+		elems = append(elems, reflect.ValueOf(elem))
+	}
+
+	val.Set(reflect.Append(val, elems...))
+	return nil
+}
+
+// ValueEncodeValue is the ValueEncoderFunc for *Value.
+func (pc PrimitiveCodecs) ValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tValue {
+		return bsoncodec.ValueEncoderError{Name: "ValueEncodeValue", Types: []reflect.Type{tValue}, Received: val}
+	}
+
+	v := val.Interface().(Val)
+
+	return pc.encodeValue(ec, vw, v)
+}
+
+// ValueDecodeValue is the ValueDecoderFunc for *Value.
+func (pc PrimitiveCodecs) ValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tValue {
+		return bsoncodec.ValueDecoderError{Name: "ValueDecodeValue", Types: []reflect.Type{tValue}, Received: val}
+	}
+
+	return pc.valueDecodeValue(dc, vr, val.Addr().Interface().(*Val))
+}
+
+// encodeDocument is a separate function that we use because CodeWithScope
+// returns us a DocumentWriter and we need to do the same logic that we would do
+// for a document but cannot use a Codec.
+func (pc PrimitiveCodecs) encodeDocument(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, doc Doc) error {
+	for _, elem := range doc {
+		dvw, err := dw.WriteDocumentElement(elem.Key)
+		if err != nil {
+			return err
+		}
+
+		err = pc.encodeValue(ec, dvw, elem.Value)
+
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// DecodeDocument haves decoding into a Doc from a bsonrw.DocumentReader.
+func (pc PrimitiveCodecs) DecodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error {
+	return pc.decodeDocument(dctx, dr, pdoc)
+}
+
+func (pc PrimitiveCodecs) decodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error {
+	if *pdoc == nil {
+		*pdoc = make(Doc, 0)
+	}
+	*pdoc = (*pdoc)[:0]
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Elem
+		err = pc.elementDecodeValue(dctx, vr, key, &elem)
+		if err != nil {
+			return err
+		}
+
+		*pdoc = append(*pdoc, elem)
+	}
+	return nil
+}
+
+func (pc PrimitiveCodecs) elementDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, key string, elem *Elem) error {
+	var val Val
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		val = Double(f64)
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return err
+		}
+		val = String(str)
+	case bsontype.EmbeddedDocument:
+		var embeddedDoc Doc
+		err := pc.documentDecodeValue(dc, vr, &embeddedDoc)
+		if err != nil {
+			return err
+		}
+		val = Document(embeddedDoc)
+	case bsontype.Array:
+		arr := reflect.New(tArray).Elem()
+		err := pc.ArrayDecodeValue(dc, vr, arr)
+		if err != nil {
+			return err
+		}
+		val = Array(arr.Interface().(Arr))
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		val = Binary(subtype, data)
+	case bsontype.Undefined:
+		err := vr.ReadUndefined()
+		if err != nil {
+			return err
+		}
+		val = Undefined()
+	case bsontype.ObjectID:
+		oid, err := vr.ReadObjectID()
+		if err != nil {
+			return err
+		}
+		val = ObjectID(oid)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return err
+		}
+		val = Boolean(b)
+	case bsontype.DateTime:
+		dt, err := vr.ReadDateTime()
+		if err != nil {
+			return err
+		}
+		val = DateTime(dt)
+	case bsontype.Null:
+		err := vr.ReadNull()
+		if err != nil {
+			return err
+		}
+		val = Null()
+	case bsontype.Regex:
+		pattern, options, err := vr.ReadRegex()
+		if err != nil {
+			return err
+		}
+		val = Regex(pattern, options)
+	case bsontype.DBPointer:
+		ns, pointer, err := vr.ReadDBPointer()
+		if err != nil {
+			return err
+		}
+		val = DBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		js, err := vr.ReadJavascript()
+		if err != nil {
+			return err
+		}
+		val = JavaScript(js)
+	case bsontype.Symbol:
+		symbol, err := vr.ReadSymbol()
+		if err != nil {
+			return err
+		}
+		val = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		code, scope, err := vr.ReadCodeWithScope()
+		if err != nil {
+			return err
+		}
+		var doc Doc
+		err = pc.decodeDocument(dc, scope, &doc)
+		if err != nil {
+			return err
+		}
+		val = CodeWithScope(code, doc)
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		val = Int32(i32)
+	case bsontype.Timestamp:
+		t, i, err := vr.ReadTimestamp()
+		if err != nil {
+			return err
+		}
+		val = Timestamp(t, i)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		val = Int64(i64)
+	case bsontype.Decimal128:
+		d128, err := vr.ReadDecimal128()
+		if err != nil {
+			return err
+		}
+		val = Decimal128(d128)
+	case bsontype.MinKey:
+		err := vr.ReadMinKey()
+		if err != nil {
+			return err
+		}
+		val = MinKey()
+	case bsontype.MaxKey:
+		err := vr.ReadMaxKey()
+		if err != nil {
+			return err
+		}
+		val = MaxKey()
+	default:
+		return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type())
+	}
+
+	*elem = Elem{Key: key, Value: val}
+	return nil
+}
+
+// encodeValue does not validation, and the callers must perform validation on val before calling
+// this method.
+func (pc PrimitiveCodecs) encodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val Val) error {
+	var err error
+	switch val.Type() {
+	case bsontype.Double:
+		err = vw.WriteDouble(val.Double())
+	case bsontype.String:
+		err = vw.WriteString(val.StringValue())
+	case bsontype.EmbeddedDocument:
+		var encoder bsoncodec.ValueEncoder
+		encoder, err = ec.LookupEncoder(tDocument)
+		if err != nil {
+			break
+		}
+		err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Document()))
+	case bsontype.Array:
+		var encoder bsoncodec.ValueEncoder
+		encoder, err = ec.LookupEncoder(tArray)
+		if err != nil {
+			break
+		}
+		err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Array()))
+	case bsontype.Binary:
+		// TODO: FIX THIS (╯°□°)╯︵ ┻━┻
+		subtype, data := val.Binary()
+		err = vw.WriteBinaryWithSubtype(data, subtype)
+	case bsontype.Undefined:
+		err = vw.WriteUndefined()
+	case bsontype.ObjectID:
+		err = vw.WriteObjectID(val.ObjectID())
+	case bsontype.Boolean:
+		err = vw.WriteBoolean(val.Boolean())
+	case bsontype.DateTime:
+		err = vw.WriteDateTime(val.DateTime())
+	case bsontype.Null:
+		err = vw.WriteNull()
+	case bsontype.Regex:
+		err = vw.WriteRegex(val.Regex())
+	case bsontype.DBPointer:
+		err = vw.WriteDBPointer(val.DBPointer())
+	case bsontype.JavaScript:
+		err = vw.WriteJavascript(val.JavaScript())
+	case bsontype.Symbol:
+		err = vw.WriteSymbol(val.Symbol())
+	case bsontype.CodeWithScope:
+		code, scope := val.CodeWithScope()
+
+		var cwsw bsonrw.DocumentWriter
+		cwsw, err = vw.WriteCodeWithScope(code)
+		if err != nil {
+			break
+		}
+
+		err = pc.encodeDocument(ec, cwsw, scope)
+	case bsontype.Int32:
+		err = vw.WriteInt32(val.Int32())
+	case bsontype.Timestamp:
+		err = vw.WriteTimestamp(val.Timestamp())
+	case bsontype.Int64:
+		err = vw.WriteInt64(val.Int64())
+	case bsontype.Decimal128:
+		err = vw.WriteDecimal128(val.Decimal128())
+	case bsontype.MinKey:
+		err = vw.WriteMinKey()
+	case bsontype.MaxKey:
+		err = vw.WriteMaxKey()
+	default:
+		err = fmt.Errorf("%T is not a valid BSON type to encode", val.Type())
+	}
+
+	return err
+}
+
+func (pc PrimitiveCodecs) valueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val *Val) error {
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		*val = Double(f64)
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return err
+		}
+		*val = String(str)
+	case bsontype.EmbeddedDocument:
+		var embeddedDoc Doc
+		err := pc.documentDecodeValue(dc, vr, &embeddedDoc)
+		if err != nil {
+			return err
+		}
+		*val = Document(embeddedDoc)
+	case bsontype.Array:
+		arr := reflect.New(tArray).Elem()
+		err := pc.ArrayDecodeValue(dc, vr, arr)
+		if err != nil {
+			return err
+		}
+		*val = Array(arr.Interface().(Arr))
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		*val = Binary(subtype, data)
+	case bsontype.Undefined:
+		err := vr.ReadUndefined()
+		if err != nil {
+			return err
+		}
+		*val = Undefined()
+	case bsontype.ObjectID:
+		oid, err := vr.ReadObjectID()
+		if err != nil {
+			return err
+		}
+		*val = ObjectID(oid)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return err
+		}
+		*val = Boolean(b)
+	case bsontype.DateTime:
+		dt, err := vr.ReadDateTime()
+		if err != nil {
+			return err
+		}
+		*val = DateTime(dt)
+	case bsontype.Null:
+		err := vr.ReadNull()
+		if err != nil {
+			return err
+		}
+		*val = Null()
+	case bsontype.Regex:
+		pattern, options, err := vr.ReadRegex()
+		if err != nil {
+			return err
+		}
+		*val = Regex(pattern, options)
+	case bsontype.DBPointer:
+		ns, pointer, err := vr.ReadDBPointer()
+		if err != nil {
+			return err
+		}
+		*val = DBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		js, err := vr.ReadJavascript()
+		if err != nil {
+			return err
+		}
+		*val = JavaScript(js)
+	case bsontype.Symbol:
+		symbol, err := vr.ReadSymbol()
+		if err != nil {
+			return err
+		}
+		*val = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		code, scope, err := vr.ReadCodeWithScope()
+		if err != nil {
+			return err
+		}
+		var scopeDoc Doc
+		err = pc.decodeDocument(dc, scope, &scopeDoc)
+		if err != nil {
+			return err
+		}
+		*val = CodeWithScope(code, scopeDoc)
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		*val = Int32(i32)
+	case bsontype.Timestamp:
+		t, i, err := vr.ReadTimestamp()
+		if err != nil {
+			return err
+		}
+		*val = Timestamp(t, i)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		*val = Int64(i64)
+	case bsontype.Decimal128:
+		d128, err := vr.ReadDecimal128()
+		if err != nil {
+			return err
+		}
+		*val = Decimal128(d128)
+	case bsontype.MinKey:
+		err := vr.ReadMinKey()
+		if err != nil {
+			return err
+		}
+		*val = MinKey()
+	case bsontype.MaxKey:
+		err := vr.ReadMaxKey()
+		if err != nil {
+			return err
+		}
+		*val = MaxKey()
+	default:
+		return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type())
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go
new file mode 100644
index 0000000..83dfaa5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go
@@ -0,0 +1,22 @@
+package bsonx
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+)
+
+// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
+// primitive codecs.
+var DefaultRegistry = NewRegistryBuilder().Build()
+
+// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
+// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
+// PrimitiveCodecs type in this package.
+func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
+	rb := bsoncodec.NewRegistryBuilder()
+	bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
+	bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
+	bson.PrimitiveCodecs{}.RegisterPrimitiveCodecs(rb)
+	primitiveCodecs.RegisterPrimitiveCodecs(rb)
+	return rb
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go
new file mode 100644
index 0000000..033d9d8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go
@@ -0,0 +1,899 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// Val represents a BSON value.
+type Val struct {
+	// NOTE: The bootstrap is a small amount of space that'll be on the stack. At 15 bytes this
+	// doesn't make this type any larger, since there are 7 bytes of padding and we want an int64 to
+	// store small values (e.g. boolean, double, int64, etc...). The primitive property is where all
+	// of the larger values go. They will use either Go primitives or the primitive.* types.
+	t         bsontype.Type
+	bootstrap [15]byte
+	primitive interface{}
+}
+
+func (v Val) reset() Val {
+	v.primitive = nil // clear out any pointers so we don't accidentally stop them from being garbage collected.
+	v.t = bsontype.Type(0)
+	v.bootstrap[0] = 0x00
+	v.bootstrap[1] = 0x00
+	v.bootstrap[2] = 0x00
+	v.bootstrap[3] = 0x00
+	v.bootstrap[4] = 0x00
+	v.bootstrap[5] = 0x00
+	v.bootstrap[6] = 0x00
+	v.bootstrap[7] = 0x00
+	v.bootstrap[8] = 0x00
+	v.bootstrap[9] = 0x00
+	v.bootstrap[10] = 0x00
+	v.bootstrap[11] = 0x00
+	v.bootstrap[12] = 0x00
+	v.bootstrap[13] = 0x00
+	v.bootstrap[14] = 0x00
+	return v
+}
+
+func (v Val) string() string {
+	if v.primitive != nil {
+		return v.primitive.(string)
+	}
+	// The string will either end with a null byte or it fills the entire bootstrap space.
+	length := uint8(v.bootstrap[0])
+	return string(v.bootstrap[1 : length+1])
+}
+
+func (v Val) writestring(str string) Val {
+	switch {
+	case len(str) < 15:
+		v.bootstrap[0] = uint8(len(str))
+		copy(v.bootstrap[1:], str)
+	default:
+		v.primitive = str
+	}
+	return v
+}
+
+func (v Val) i64() int64 {
+	return int64(v.bootstrap[0]) | int64(v.bootstrap[1])<<8 | int64(v.bootstrap[2])<<16 |
+		int64(v.bootstrap[3])<<24 | int64(v.bootstrap[4])<<32 | int64(v.bootstrap[5])<<40 |
+		int64(v.bootstrap[6])<<48 | int64(v.bootstrap[7])<<56
+}
+
+func (v Val) writei64(i64 int64) Val {
+	v.bootstrap[0] = byte(i64)
+	v.bootstrap[1] = byte(i64 >> 8)
+	v.bootstrap[2] = byte(i64 >> 16)
+	v.bootstrap[3] = byte(i64 >> 24)
+	v.bootstrap[4] = byte(i64 >> 32)
+	v.bootstrap[5] = byte(i64 >> 40)
+	v.bootstrap[6] = byte(i64 >> 48)
+	v.bootstrap[7] = byte(i64 >> 56)
+	return v
+}
+
+// IsZero returns true if this value is zero or a BSON null.
+func (v Val) IsZero() bool { return v.t == bsontype.Type(0) || v.t == bsontype.Null }
+
+func (v Val) String() string {
+	// TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here.
+	return fmt.Sprintf("%v", v.Interface())
+}
+
+// Interface returns the Go value of this Value as an empty interface.
+//
+// This method will return nil if it is empty, otherwise it will return a Go primitive or a
+// primitive.* instance.
+func (v Val) Interface() interface{} {
+	switch v.Type() {
+	case bsontype.Double:
+		return v.Double()
+	case bsontype.String:
+		return v.StringValue()
+	case bsontype.EmbeddedDocument:
+		switch v.primitive.(type) {
+		case Doc:
+			return v.primitive.(Doc)
+		case MDoc:
+			return v.primitive.(MDoc)
+		default:
+			return primitive.Null{}
+		}
+	case bsontype.Array:
+		return v.Array()
+	case bsontype.Binary:
+		return v.primitive.(primitive.Binary)
+	case bsontype.Undefined:
+		return primitive.Undefined{}
+	case bsontype.ObjectID:
+		return v.ObjectID()
+	case bsontype.Boolean:
+		return v.Boolean()
+	case bsontype.DateTime:
+		return v.DateTime()
+	case bsontype.Null:
+		return primitive.Null{}
+	case bsontype.Regex:
+		return v.primitive.(primitive.Regex)
+	case bsontype.DBPointer:
+		return v.primitive.(primitive.DBPointer)
+	case bsontype.JavaScript:
+		return v.JavaScript()
+	case bsontype.Symbol:
+		return v.Symbol()
+	case bsontype.CodeWithScope:
+		return v.primitive.(primitive.CodeWithScope)
+	case bsontype.Int32:
+		return v.Int32()
+	case bsontype.Timestamp:
+		t, i := v.Timestamp()
+		return primitive.Timestamp{T: t, I: i}
+	case bsontype.Int64:
+		return v.Int64()
+	case bsontype.Decimal128:
+		return v.Decimal128()
+	case bsontype.MinKey:
+		return primitive.MinKey{}
+	case bsontype.MaxKey:
+		return primitive.MaxKey{}
+	default:
+		return primitive.Null{}
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+func (v Val) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	return v.MarshalAppendBSONValue(nil)
+}
+
+// MarshalAppendBSONValue is similar to MarshalBSONValue, but allows the caller to specify a slice
+// to add the bytes to.
+func (v Val) MarshalAppendBSONValue(dst []byte) (bsontype.Type, []byte, error) {
+	t := v.Type()
+	switch v.Type() {
+	case bsontype.Double:
+		dst = bsoncore.AppendDouble(dst, v.Double())
+	case bsontype.String:
+		dst = bsoncore.AppendString(dst, v.String())
+	case bsontype.EmbeddedDocument:
+		switch v.primitive.(type) {
+		case Doc:
+			t, dst, _ = v.primitive.(Doc).MarshalBSONValue() // Doc.MarshalBSONValue never returns an error.
+		case MDoc:
+			t, dst, _ = v.primitive.(MDoc).MarshalBSONValue() // MDoc.MarshalBSONValue never returns an error.
+		}
+	case bsontype.Array:
+		t, dst, _ = v.Array().MarshalBSONValue() // Arr.MarshalBSON never returns an error.
+	case bsontype.Binary:
+		subtype, bindata := v.Binary()
+		dst = bsoncore.AppendBinary(dst, subtype, bindata)
+	case bsontype.Undefined:
+	case bsontype.ObjectID:
+		dst = bsoncore.AppendObjectID(dst, v.ObjectID())
+	case bsontype.Boolean:
+		dst = bsoncore.AppendBoolean(dst, v.Boolean())
+	case bsontype.DateTime:
+		dst = bsoncore.AppendDateTime(dst, int64(v.DateTime()))
+	case bsontype.Null:
+	case bsontype.Regex:
+		pattern, options := v.Regex()
+		dst = bsoncore.AppendRegex(dst, pattern, options)
+	case bsontype.DBPointer:
+		ns, ptr := v.DBPointer()
+		dst = bsoncore.AppendDBPointer(dst, ns, ptr)
+	case bsontype.JavaScript:
+		dst = bsoncore.AppendJavaScript(dst, string(v.JavaScript()))
+	case bsontype.Symbol:
+		dst = bsoncore.AppendSymbol(dst, string(v.Symbol()))
+	case bsontype.CodeWithScope:
+		code, doc := v.CodeWithScope()
+		var scope []byte
+		scope, _ = doc.MarshalBSON() // Doc.MarshalBSON never returns an error.
+		dst = bsoncore.AppendCodeWithScope(dst, code, scope)
+	case bsontype.Int32:
+		dst = bsoncore.AppendInt32(dst, v.Int32())
+	case bsontype.Timestamp:
+		t, i := v.Timestamp()
+		dst = bsoncore.AppendTimestamp(dst, t, i)
+	case bsontype.Int64:
+		dst = bsoncore.AppendInt64(dst, v.Int64())
+	case bsontype.Decimal128:
+		dst = bsoncore.AppendDecimal128(dst, v.Decimal128())
+	case bsontype.MinKey:
+	case bsontype.MaxKey:
+	default:
+		panic(fmt.Errorf("invalid BSON type %v", t))
+	}
+
+	return t, dst, nil
+}
+
+// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface.
+func (v *Val) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if v == nil {
+		return errors.New("cannot unmarshal into nil Value")
+	}
+	var err error
+	var ok = true
+	var rem []byte
+	switch t {
+	case bsontype.Double:
+		var f64 float64
+		f64, rem, ok = bsoncore.ReadDouble(data)
+		*v = Double(f64)
+	case bsontype.String:
+		var str string
+		str, rem, ok = bsoncore.ReadString(data)
+		*v = String(str)
+	case bsontype.EmbeddedDocument:
+		var raw []byte
+		var doc Doc
+		raw, rem, ok = bsoncore.ReadDocument(data)
+		doc, err = ReadDoc(raw)
+		*v = Document(doc)
+	case bsontype.Array:
+		var raw []byte
+		arr := make(Arr, 0)
+		raw, rem, ok = bsoncore.ReadArray(data)
+		err = arr.UnmarshalBSONValue(t, raw)
+		*v = Array(arr)
+	case bsontype.Binary:
+		var subtype byte
+		var bindata []byte
+		subtype, bindata, rem, ok = bsoncore.ReadBinary(data)
+		*v = Binary(subtype, bindata)
+	case bsontype.Undefined:
+		*v = Undefined()
+	case bsontype.ObjectID:
+		var oid primitive.ObjectID
+		oid, rem, ok = bsoncore.ReadObjectID(data)
+		*v = ObjectID(oid)
+	case bsontype.Boolean:
+		var b bool
+		b, rem, ok = bsoncore.ReadBoolean(data)
+		*v = Boolean(b)
+	case bsontype.DateTime:
+		var dt int64
+		dt, rem, ok = bsoncore.ReadDateTime(data)
+		*v = DateTime(dt)
+	case bsontype.Null:
+		*v = Null()
+	case bsontype.Regex:
+		var pattern, options string
+		pattern, options, rem, ok = bsoncore.ReadRegex(data)
+		*v = Regex(pattern, options)
+	case bsontype.DBPointer:
+		var ns string
+		var ptr primitive.ObjectID
+		ns, ptr, rem, ok = bsoncore.ReadDBPointer(data)
+		*v = DBPointer(ns, ptr)
+	case bsontype.JavaScript:
+		var js string
+		js, rem, ok = bsoncore.ReadJavaScript(data)
+		*v = JavaScript(js)
+	case bsontype.Symbol:
+		var symbol string
+		symbol, rem, ok = bsoncore.ReadSymbol(data)
+		*v = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		var raw []byte
+		var code string
+		var scope Doc
+		code, raw, rem, ok = bsoncore.ReadCodeWithScope(data)
+		scope, err = ReadDoc(raw)
+		*v = CodeWithScope(code, scope)
+	case bsontype.Int32:
+		var i32 int32
+		i32, rem, ok = bsoncore.ReadInt32(data)
+		*v = Int32(i32)
+	case bsontype.Timestamp:
+		var i, t uint32
+		t, i, rem, ok = bsoncore.ReadTimestamp(data)
+		*v = Timestamp(t, i)
+	case bsontype.Int64:
+		var i64 int64
+		i64, rem, ok = bsoncore.ReadInt64(data)
+		*v = Int64(i64)
+	case bsontype.Decimal128:
+		var d128 primitive.Decimal128
+		d128, rem, ok = bsoncore.ReadDecimal128(data)
+		*v = Decimal128(d128)
+	case bsontype.MinKey:
+		*v = MinKey()
+	case bsontype.MaxKey:
+		*v = MaxKey()
+	default:
+		err = fmt.Errorf("invalid BSON type %v", t)
+	}
+
+	if !ok && err == nil {
+		err = bsoncore.NewInsufficientBytesError(data, rem)
+	}
+
+	return err
+}
+
+// Type returns the BSON type of this value.
+func (v Val) Type() bsontype.Type {
+	if v.t == bsontype.Type(0) {
+		return bsontype.Null
+	}
+	return v.t
+}
+
+// IsNumber returns true if the type of v is a numberic BSON type.
+func (v Val) IsNumber() bool {
+	switch v.Type() {
+	case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128:
+		return true
+	default:
+		return false
+	}
+}
+
+// Double returns the BSON double value the Value represents. It panics if the value is a BSON type
+// other than double.
+func (v Val) Double() float64 {
+	if v.t != bsontype.Double {
+		panic(ElementTypeError{"bson.Value.Double", v.t})
+	}
+	return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8]))
+}
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (v Val) DoubleOK() (float64, bool) {
+	if v.t != bsontype.Double {
+		return 0, false
+	}
+	return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8])), true
+}
+
+// StringValue returns the BSON string the Value represents. It panics if the value is a BSON type
+// other than string.
+//
+// NOTE: This method is called StringValue to avoid it implementing the
+// fmt.Stringer interface.
+func (v Val) StringValue() string {
+	if v.t != bsontype.String {
+		panic(ElementTypeError{"bson.Value.StringValue", v.t})
+	}
+	return v.string()
+}
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (v Val) StringValueOK() (string, bool) {
+	if v.t != bsontype.String {
+		return "", false
+	}
+	return v.string(), true
+}
+
+func (v Val) asDoc() Doc {
+	doc, ok := v.primitive.(Doc)
+	if ok {
+		return doc
+	}
+	mdoc := v.primitive.(MDoc)
+	for k, v := range mdoc {
+		doc = append(doc, Elem{k, v})
+	}
+	return doc
+}
+
+func (v Val) asMDoc() MDoc {
+	mdoc, ok := v.primitive.(MDoc)
+	if ok {
+		return mdoc
+	}
+	doc := v.primitive.(Doc)
+	for _, elem := range doc {
+		mdoc[elem.Key] = elem.Value
+	}
+	return mdoc
+}
+
+// Document returns the BSON embedded document value the Value represents. It panics if the value
+// is a BSON type other than embedded document.
+func (v Val) Document() Doc {
+	if v.t != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bson.Value.Document", v.t})
+	}
+	return v.asDoc()
+}
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Val) DocumentOK() (Doc, bool) {
+	if v.t != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	return v.asDoc(), true
+}
+
+// MDocument returns the BSON embedded document value the Value represents. It panics if the value
+// is a BSON type other than embedded document.
+func (v Val) MDocument() MDoc {
+	if v.t != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bson.Value.MDocument", v.t})
+	}
+	return v.asMDoc()
+}
+
+// MDocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Val) MDocumentOK() (MDoc, bool) {
+	if v.t != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	return v.asMDoc(), true
+}
+
+// Array returns the BSON array value the Value represents. It panics if the value is a BSON type
+// other than array.
+func (v Val) Array() Arr {
+	if v.t != bsontype.Array {
+		panic(ElementTypeError{"bson.Value.Array", v.t})
+	}
+	return v.primitive.(Arr)
+}
+
+// ArrayOK is the same as Array, except it returns a boolean
+// instead of panicking.
+func (v Val) ArrayOK() (Arr, bool) {
+	if v.t != bsontype.Array {
+		return nil, false
+	}
+	return v.primitive.(Arr), true
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Binary() (byte, []byte) {
+	if v.t != bsontype.Binary {
+		panic(ElementTypeError{"bson.Value.Binary", v.t})
+	}
+	bin := v.primitive.(primitive.Binary)
+	return bin.Subtype, bin.Data
+}
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (v Val) BinaryOK() (byte, []byte, bool) {
+	if v.t != bsontype.Binary {
+		return 0x00, nil, false
+	}
+	bin := v.primitive.(primitive.Binary)
+	return bin.Subtype, bin.Data, true
+}
+
+// Undefined returns the BSON undefined the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Undefined() {
+	if v.t != bsontype.Undefined {
+		panic(ElementTypeError{"bson.Value.Undefined", v.t})
+	}
+	return
+}
+
+// UndefinedOK is the same as Undefined, except it returns a boolean instead of
+// panicking.
+func (v Val) UndefinedOK() bool {
+	if v.t != bsontype.Undefined {
+		return false
+	}
+	return true
+}
+
+// ObjectID returns the BSON ObjectID the Value represents. It panics if the value is a BSON type
+// other than ObjectID.
+func (v Val) ObjectID() primitive.ObjectID {
+	if v.t != bsontype.ObjectID {
+		panic(ElementTypeError{"bson.Value.ObjectID", v.t})
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], v.bootstrap[:12])
+	return oid
+}
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (v Val) ObjectIDOK() (primitive.ObjectID, bool) {
+	if v.t != bsontype.ObjectID {
+		return primitive.ObjectID{}, false
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], v.bootstrap[:12])
+	return oid, true
+}
+
+// Boolean returns the BSON boolean the Value represents. It panics if the value is a BSON type
+// other than boolean.
+func (v Val) Boolean() bool {
+	if v.t != bsontype.Boolean {
+		panic(ElementTypeError{"bson.Value.Boolean", v.t})
+	}
+	return v.bootstrap[0] == 0x01
+}
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (v Val) BooleanOK() (bool, bool) {
+	if v.t != bsontype.Boolean {
+		return false, false
+	}
+	return v.bootstrap[0] == 0x01, true
+}
+
+// DateTime returns the BSON datetime the Value represents. It panics if the value is a BSON type
+// other than datetime.
+func (v Val) DateTime() int64 {
+	if v.t != bsontype.DateTime {
+		panic(ElementTypeError{"bson.Value.DateTime", v.t})
+	}
+	return v.i64()
+}
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (v Val) DateTimeOK() (int64, bool) {
+	if v.t != bsontype.DateTime {
+		return 0, false
+	}
+	return v.i64(), true
+}
+
+// Time returns the BSON datetime the Value represents as time.Time. It panics if the value is a BSON
+// type other than datetime.
+func (v Val) Time() time.Time {
+	if v.t != bsontype.DateTime {
+		panic(ElementTypeError{"bson.Value.Time", v.t})
+	}
+	i := v.i64()
+	return time.Unix(int64(i)/1000, int64(i)%1000*1000000)
+}
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (v Val) TimeOK() (time.Time, bool) {
+	if v.t != bsontype.DateTime {
+		return time.Time{}, false
+	}
+	i := v.i64()
+	return time.Unix(int64(i)/1000, int64(i)%1000*1000000), true
+}
+
+// Null returns the BSON undefined the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Null() {
+	if v.t != bsontype.Null && v.t != bsontype.Type(0) {
+		panic(ElementTypeError{"bson.Value.Null", v.t})
+	}
+	return
+}
+
+// NullOK is the same as Null, except it returns a boolean instead of
+// panicking.
+func (v Val) NullOK() bool {
+	if v.t != bsontype.Null && v.t != bsontype.Type(0) {
+		return false
+	}
+	return true
+}
+
+// Regex returns the BSON regex the Value represents. It panics if the value is a BSON type
+// other than regex.
+func (v Val) Regex() (pattern, options string) {
+	if v.t != bsontype.Regex {
+		panic(ElementTypeError{"bson.Value.Regex", v.t})
+	}
+	regex := v.primitive.(primitive.Regex)
+	return regex.Pattern, regex.Options
+}
+
+// RegexOK is the same as Regex, except that it returns a boolean
+// instead of panicking.
+func (v Val) RegexOK() (pattern, options string, ok bool) {
+	if v.t != bsontype.Regex {
+		return "", "", false
+	}
+	regex := v.primitive.(primitive.Regex)
+	return regex.Pattern, regex.Options, true
+}
+
+// DBPointer returns the BSON dbpointer the Value represents. It panics if the value is a BSON type
+// other than dbpointer.
+func (v Val) DBPointer() (string, primitive.ObjectID) {
+	if v.t != bsontype.DBPointer {
+		panic(ElementTypeError{"bson.Value.DBPointer", v.t})
+	}
+	dbptr := v.primitive.(primitive.DBPointer)
+	return dbptr.DB, dbptr.Pointer
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (v Val) DBPointerOK() (string, primitive.ObjectID, bool) {
+	if v.t != bsontype.DBPointer {
+		return "", primitive.ObjectID{}, false
+	}
+	dbptr := v.primitive.(primitive.DBPointer)
+	return dbptr.DB, dbptr.Pointer, true
+}
+
+// JavaScript returns the BSON JavaScript the Value represents. It panics if the value is a BSON type
+// other than JavaScript.
+func (v Val) JavaScript() string {
+	if v.t != bsontype.JavaScript {
+		panic(ElementTypeError{"bson.Value.JavaScript", v.t})
+	}
+	return v.string()
+}
+
+// JavaScriptOK is the same as Javascript, except that it returns a boolean
+// instead of panicking.
+func (v Val) JavaScriptOK() (string, bool) {
+	if v.t != bsontype.JavaScript {
+		return "", false
+	}
+	return v.string(), true
+}
+
+// Symbol returns the BSON symbol the Value represents. It panics if the value is a BSON type
+// other than symbol.
+func (v Val) Symbol() string {
+	if v.t != bsontype.Symbol {
+		panic(ElementTypeError{"bson.Value.Symbol", v.t})
+	}
+	return v.string()
+}
+
+// SymbolOK is the same as Javascript, except that it returns a boolean
+// instead of panicking.
+func (v Val) SymbolOK() (string, bool) {
+	if v.t != bsontype.Symbol {
+		return "", false
+	}
+	return v.string(), true
+}
+
+// CodeWithScope returns the BSON code with scope value the Value represents. It panics if the
+// value is a BSON type other than code with scope.
+func (v Val) CodeWithScope() (string, Doc) {
+	if v.t != bsontype.CodeWithScope {
+		panic(ElementTypeError{"bson.Value.CodeWithScope", v.t})
+	}
+	cws := v.primitive.(primitive.CodeWithScope)
+	return string(cws.Code), cws.Scope.(Doc)
+}
+
+// CodeWithScopeOK is the same as JavascriptWithScope,
+// except that it returns a boolean instead of panicking.
+func (v Val) CodeWithScopeOK() (string, Doc, bool) {
+	if v.t != bsontype.CodeWithScope {
+		return "", nil, false
+	}
+	cws := v.primitive.(primitive.CodeWithScope)
+	return string(cws.Code), cws.Scope.(Doc), true
+}
+
+// Int32 returns the BSON int32 the Value represents. It panics if the value is a BSON type
+// other than int32.
+func (v Val) Int32() int32 {
+	if v.t != bsontype.Int32 {
+		panic(ElementTypeError{"bson.Value.Int32", v.t})
+	}
+	return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 |
+		int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24
+}
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (v Val) Int32OK() (int32, bool) {
+	if v.t != bsontype.Int32 {
+		return 0, false
+	}
+	return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 |
+			int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24,
+		true
+}
+
+// Timestamp returns the BSON timestamp the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (v Val) Timestamp() (t, i uint32) {
+	if v.t != bsontype.Timestamp {
+		panic(ElementTypeError{"bson.Value.Timestamp", v.t})
+	}
+	return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 |
+			uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24,
+		uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 |
+			uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24
+}
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (v Val) TimestampOK() (t uint32, i uint32, ok bool) {
+	if v.t != bsontype.Timestamp {
+		return 0, 0, false
+	}
+	return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 |
+			uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24,
+		uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 |
+			uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24,
+		true
+}
+
+// Int64 returns the BSON int64 the Value represents. It panics if the value is a BSON type
+// other than int64.
+func (v Val) Int64() int64 {
+	if v.t != bsontype.Int64 {
+		panic(ElementTypeError{"bson.Value.Int64", v.t})
+	}
+	return v.i64()
+}
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (v Val) Int64OK() (int64, bool) {
+	if v.t != bsontype.Int64 {
+		return 0, false
+	}
+	return v.i64(), true
+}
+
+// Decimal128 returns the BSON decimal128 value the Value represents. It panics if the value is a
+// BSON type other than decimal128.
+func (v Val) Decimal128() primitive.Decimal128 {
+	if v.t != bsontype.Decimal128 {
+		panic(ElementTypeError{"bson.Value.Decimal128", v.t})
+	}
+	return v.primitive.(primitive.Decimal128)
+}
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (v Val) Decimal128OK() (primitive.Decimal128, bool) {
+	if v.t != bsontype.Decimal128 {
+		return primitive.Decimal128{}, false
+	}
+	return v.primitive.(primitive.Decimal128), true
+}
+
+// MinKey returns the BSON minkey the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) MinKey() {
+	if v.t != bsontype.MinKey {
+		panic(ElementTypeError{"bson.Value.MinKey", v.t})
+	}
+	return
+}
+
+// MinKeyOK is the same as MinKey, except it returns a boolean instead of
+// panicking.
+func (v Val) MinKeyOK() bool {
+	if v.t != bsontype.MinKey {
+		return false
+	}
+	return true
+}
+
+// MaxKey returns the BSON maxkey the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) MaxKey() {
+	if v.t != bsontype.MaxKey {
+		panic(ElementTypeError{"bson.Value.MaxKey", v.t})
+	}
+	return
+}
+
+// MaxKeyOK is the same as MaxKey, except it returns a boolean instead of
+// panicking.
+func (v Val) MaxKeyOK() bool {
+	if v.t != bsontype.MaxKey {
+		return false
+	}
+	return true
+}
+
+// Equal compares v to v2 and returns true if they are equal. Unknown BSON types are
+// never equal. Two empty values are equal.
+func (v Val) Equal(v2 Val) bool {
+	if v.Type() != v2.Type() {
+		return false
+	}
+	if v.IsZero() && v2.IsZero() {
+		return true
+	}
+
+	switch v.Type() {
+	case bsontype.Double, bsontype.DateTime, bsontype.Timestamp, bsontype.Int64:
+		return bytes.Equal(v.bootstrap[0:8], v2.bootstrap[0:8])
+	case bsontype.String:
+		return v.string() == v2.string()
+	case bsontype.EmbeddedDocument:
+		return v.equalDocs(v2)
+	case bsontype.Array:
+		return v.Array().Equal(v2.Array())
+	case bsontype.Binary:
+		return v.primitive.(primitive.Binary).Equal(v2.primitive.(primitive.Binary))
+	case bsontype.Undefined:
+		return true
+	case bsontype.ObjectID:
+		return bytes.Equal(v.bootstrap[0:12], v2.bootstrap[0:12])
+	case bsontype.Boolean:
+		return v.bootstrap[0] == v2.bootstrap[0]
+	case bsontype.Null:
+		return true
+	case bsontype.Regex:
+		return v.primitive.(primitive.Regex).Equal(v2.primitive.(primitive.Regex))
+	case bsontype.DBPointer:
+		return v.primitive.(primitive.DBPointer).Equal(v2.primitive.(primitive.DBPointer))
+	case bsontype.JavaScript:
+		return v.JavaScript() == v2.JavaScript()
+	case bsontype.Symbol:
+		return v.Symbol() == v2.Symbol()
+	case bsontype.CodeWithScope:
+		code1, scope1 := v.primitive.(primitive.CodeWithScope).Code, v.primitive.(primitive.CodeWithScope).Scope
+		code2, scope2 := v2.primitive.(primitive.CodeWithScope).Code, v2.primitive.(primitive.CodeWithScope).Scope
+		return code1 == code2 && v.equalInterfaceDocs(scope1, scope2)
+	case bsontype.Int32:
+		return v.Int32() == v2.Int32()
+	case bsontype.Decimal128:
+		h, l := v.Decimal128().GetBytes()
+		h2, l2 := v2.Decimal128().GetBytes()
+		return h == h2 && l == l2
+	case bsontype.MinKey:
+		return true
+	case bsontype.MaxKey:
+		return true
+	default:
+		return false
+	}
+}
+
+func (v Val) equalDocs(v2 Val) bool {
+	_, ok1 := v.primitive.(MDoc)
+	_, ok2 := v2.primitive.(MDoc)
+	if ok1 || ok2 {
+		return v.asMDoc().Equal(v2.asMDoc())
+	}
+	return v.asDoc().Equal(v2.asDoc())
+}
+
+func (Val) equalInterfaceDocs(i, i2 interface{}) bool {
+	switch d := i.(type) {
+	case MDoc:
+		d2, ok := i2.(IDoc)
+		if !ok {
+			return false
+		}
+		return d.Equal(d2)
+	case Doc:
+		d2, ok := i2.(IDoc)
+		if !ok {
+			return false
+		}
+		return d.Equal(d2)
+	case nil:
+		return i2 == nil
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md
new file mode 100644
index 0000000..7588f4a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md
@@ -0,0 +1,11 @@
+MongoDB Go Driver Low-Level Driver Library
+==========================================
+The packages within this library allow users to build applications using a low-level driver
+interface. Knowledge of the internals of a MongoDB driver are assumed, so this library contains
+advanced features. The aim of this library is to provide an easy to use, high performance
+implementation of a low-level driver.
+
+This Library's API is experimental and subject to change. Packages may be changed or removed without
+notice. These APIs are not stable and do not guarantee backward compatibility.
+
+**THIS LIBRARY IS EXPERIMENTAL AND SUBJECT TO CHANGE.**
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go
new file mode 100644
index 0000000..4798b05
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go
@@ -0,0 +1,71 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// AbortTransaction handles the full cycle dispatch and execution of abortting a transaction
+// against the provided topology.
+func AbortTransaction(
+	ctx context.Context,
+	cmd command.AbortTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.TransactionResult, error) {
+	res, err := abortTransaction(ctx, cmd, topo, selector, nil)
+	if cerr, ok := err.(command.Error); ok && err != nil {
+		// Retry if appropriate
+		if cerr.Retryable() {
+			res, err = abortTransaction(ctx, cmd, topo, selector, cerr)
+		}
+	}
+	return res, err
+}
+
+func abortTransaction(
+	ctx context.Context,
+	cmd command.AbortTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	oldErr error,
+) (result.TransactionResult, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		// If retrying server selection, return the original error if it fails
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+
+	desc := ss.Description()
+
+	if oldErr != nil && (!topo.SupportsSessions() || !description.SessionsSupported(desc.WireVersion)) {
+		// Assuming we are retrying (oldErr != nil),
+		// if server doesn't support retryable writes, return the original error
+		// Conditions for retry write support are the same as that of sessions
+		return result.TransactionResult{}, oldErr
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go
new file mode 100644
index 0000000..fd6c949
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go
@@ -0,0 +1,207 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Aggregate handles the full cycle dispatch and execution of an aggregate command against the provided
+// topology.
+func Aggregate(
+	ctx context.Context,
+	cmd command.Aggregate,
+	topo *topology.Topology,
+	readSelector, writeSelector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.AggregateOptions,
+) (*BatchCursor, error) {
+
+	dollarOut := cmd.HasDollarOut()
+
+	var ss *topology.SelectedServer
+	var err error
+	switch dollarOut {
+	case true:
+		ss, err = topo.SelectServer(ctx, writeSelector)
+		if err != nil {
+			return nil, err
+		}
+	case false:
+		ss, err = topo.SelectServer(ctx, readSelector)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	aggOpts := options.MergeAggregateOptions(opts...)
+
+	if aggOpts.AllowDiskUse != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"allowDiskUse", bsonx.Boolean(*aggOpts.AllowDiskUse)})
+	}
+	var batchSize int32
+	if aggOpts.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*aggOpts.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+		batchSize = *aggOpts.BatchSize
+	}
+	if aggOpts.BypassDocumentValidation != nil && desc.WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*aggOpts.BypassDocumentValidation)})
+	}
+	if aggOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return nil, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(aggOpts.Collation.ToDocument())})
+	}
+	if aggOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*aggOpts.MaxTime / time.Millisecond))})
+	}
+	if aggOpts.MaxAwaitTime != nil {
+		// specified as maxTimeMS on getMore commands
+		cmd.CursorOpts = append(cmd.CursorOpts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*aggOpts.MaxAwaitTime / time.Millisecond)),
+		})
+	}
+	if aggOpts.Comment != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"comment", bsonx.String(*aggOpts.Comment)})
+	}
+	if aggOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", aggOpts.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	if desc.WireVersion.Max < 4 {
+		return buildLegacyCommandBatchCursor(res, batchSize, ss.Server)
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+func buildLegacyCommandBatchCursor(rdr bson.Raw, batchSize int32, server *topology.Server) (*BatchCursor, error) {
+	firstBatchDocs, ns, cursorID, err := getCursorValues(rdr)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewLegacyBatchCursor(ns, cursorID, firstBatchDocs, 0, batchSize, server)
+}
+
+// get the firstBatch, cursor ID, and namespace from a bson.Raw
+//
+// TODO(GODRIVER-617): Change the documents return value into []bsoncore.Document.
+func getCursorValues(result bson.Raw) ([]bson.Raw, command.Namespace, int64, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, command.Namespace{}, 0, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, command.Namespace{}, 0, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, command.Namespace{}, 0, err
+	}
+
+	var ok bool
+	var batch []bson.Raw
+	var namespace command.Namespace
+	var cursorID int64
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok := elem.Value().ArrayOK()
+			if !ok {
+				return nil, command.Namespace{}, 0, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+
+			vals, err := arr.Values()
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+
+			for _, val := range vals {
+				if val.Type != bsontype.EmbeddedDocument {
+					return nil, command.Namespace{}, 0, fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+				}
+				batch = append(batch, val.Value)
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, command.Namespace{}, 0, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace = command.ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+		case "id":
+			cursorID, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, command.Namespace{}, 0, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	return batch, namespace, cursorID, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go
new file mode 100644
index 0000000..2ea1bfc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go
@@ -0,0 +1,169 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// AuthenticatorFactory constructs an authenticator.
+type AuthenticatorFactory func(cred *Cred) (Authenticator, error)
+
+var authFactories = make(map[string]AuthenticatorFactory)
+
+func init() {
+	RegisterAuthenticatorFactory("", newDefaultAuthenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA1, newScramSHA1Authenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA256, newScramSHA256Authenticator)
+	RegisterAuthenticatorFactory(MONGODBCR, newMongoDBCRAuthenticator)
+	RegisterAuthenticatorFactory(PLAIN, newPlainAuthenticator)
+	RegisterAuthenticatorFactory(GSSAPI, newGSSAPIAuthenticator)
+	RegisterAuthenticatorFactory(MongoDBX509, newMongoDBX509Authenticator)
+}
+
+// CreateAuthenticator creates an authenticator.
+func CreateAuthenticator(name string, cred *Cred) (Authenticator, error) {
+	if f, ok := authFactories[name]; ok {
+		return f(cred)
+	}
+
+	return nil, newAuthError(fmt.Sprintf("unknown authenticator: %s", name), nil)
+}
+
+// RegisterAuthenticatorFactory registers the authenticator factory.
+func RegisterAuthenticatorFactory(name string, factory AuthenticatorFactory) {
+	authFactories[name] = factory
+}
+
+// // Opener returns a connection opener that will open and authenticate the connection.
+// func Opener(opener conn.Opener, authenticator Authenticator) conn.Opener {
+// 	return func(ctx context.Context, addr model.Addr, opts ...conn.Option) (conn.Connection, error) {
+// 		return NewConnection(ctx, authenticator, opener, addr, opts...)
+// 	}
+// }
+//
+// // NewConnection opens a connection and authenticates it.
+// func NewConnection(ctx context.Context, authenticator Authenticator, opener conn.Opener, addr model.Addr, opts ...conn.Option) (conn.Connection, error) {
+// 	conn, err := opener(ctx, addr, opts...)
+// 	if err != nil {
+// 		if conn != nil {
+// 			// Ignore any error that occurs since we're already returning a different one.
+// 			_ = conn.Close()
+// 		}
+// 		return nil, err
+// 	}
+//
+// 	err = authenticator.Auth(ctx, conn)
+// 	if err != nil {
+// 		// Ignore any error that occurs since we're already returning a different one.
+// 		_ = conn.Close()
+// 		return nil, err
+// 	}
+//
+// 	return conn, nil
+// }
+
+// Configurer creates a connection configurer for the given authenticator.
+//
+// TODO(skriptble): Fully implement this once this package is moved over to the new connection type.
+// func Configurer(configurer connection.Configurer, authenticator Authenticator) connection.Configurer {
+// 	return connection.ConfigurerFunc(func(ctx context.Context, conn connection.Connection) (connection.Connection, error) {
+// 		err := authenticator.Auth(ctx, conn)
+// 		if err != nil {
+// 			conn.Close()
+// 			return nil, err
+// 		}
+// 		if configurer == nil {
+// 			return conn, nil
+// 		}
+// 		return configurer.Configure(ctx, conn)
+// 	})
+// }
+
+// HandshakeOptions packages options that can be passed to the Handshaker()
+// function.  DBUser is optional but must be of the form <dbname.username>;
+// if non-empty, then the connection will do SASL mechanism negotiation.
+type HandshakeOptions struct {
+	AppName       string
+	Authenticator Authenticator
+	Compressors   []string
+	DBUser        string
+}
+
+// Handshaker creates a connection handshaker for the given authenticator.
+func Handshaker(h connection.Handshaker, options *HandshakeOptions) connection.Handshaker {
+	return connection.HandshakerFunc(func(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+		desc, err := (&command.Handshake{
+			Client:             command.ClientDoc(options.AppName),
+			Compressors:        options.Compressors,
+			SaslSupportedMechs: options.DBUser,
+		}).Handshake(ctx, addr, rw)
+
+		if err != nil {
+			return description.Server{}, newAuthError("handshake failure", err)
+		}
+
+		err = options.Authenticator.Auth(ctx, desc, rw)
+		if err != nil {
+			return description.Server{}, newAuthError("auth error", err)
+		}
+		if h == nil {
+			return desc, nil
+		}
+		return h.Handshake(ctx, addr, rw)
+	})
+}
+
+// Authenticator handles authenticating a connection.
+type Authenticator interface {
+	// Auth authenticates the connection.
+	Auth(context.Context, description.Server, wiremessage.ReadWriter) error
+}
+
+func newAuthError(msg string, inner error) error {
+	return &Error{
+		message: msg,
+		inner:   inner,
+	}
+}
+
+func newError(err error, mech string) error {
+	return &Error{
+		message: fmt.Sprintf("unable to authenticate using mechanism \"%s\"", mech),
+		inner:   err,
+	}
+}
+
+// Error is an error that occurred during authentication.
+type Error struct {
+	message string
+	inner   error
+}
+
+func (e *Error) Error() string {
+	if e.inner == nil {
+		return e.message
+	}
+	return fmt.Sprintf("%s: %s", e.message, e.inner)
+}
+
+// Inner returns the wrapped error.
+func (e *Error) Inner() error {
+	return e.inner
+}
+
+// Message returns the message.
+func (e *Error) Message() string {
+	return e.message
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go
new file mode 100644
index 0000000..7b2b8f1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+// Cred is a user's credential.
+type Cred struct {
+	Source      string
+	Username    string
+	Password    string
+	PasswordSet bool
+	Props       map[string]string
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go
new file mode 100644
index 0000000..52d07e9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go
@@ -0,0 +1,67 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+func newDefaultAuthenticator(cred *Cred) (Authenticator, error) {
+	return &DefaultAuthenticator{
+		Cred: cred,
+	}, nil
+}
+
+// DefaultAuthenticator uses SCRAM-SHA-1 or MONGODB-CR depending
+// on the server version.
+type DefaultAuthenticator struct {
+	Cred *Cred
+}
+
+// Auth authenticates the connection.
+func (a *DefaultAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	var actual Authenticator
+	var err error
+
+	switch chooseAuthMechanism(desc) {
+	case SCRAMSHA256:
+		actual, err = newScramSHA256Authenticator(a.Cred)
+	case SCRAMSHA1:
+		actual, err = newScramSHA1Authenticator(a.Cred)
+	default:
+		actual, err = newMongoDBCRAuthenticator(a.Cred)
+	}
+
+	if err != nil {
+		return newAuthError("error creating authenticator", err)
+	}
+
+	return actual.Auth(ctx, desc, rw)
+}
+
+// If a server provides a list of supported mechanisms, we choose
+// SCRAM-SHA-256 if it exists or else MUST use SCRAM-SHA-1.
+// Otherwise, we decide based on what is supported.
+func chooseAuthMechanism(desc description.Server) string {
+	if desc.SaslSupportedMechs != nil {
+		for _, v := range desc.SaslSupportedMechs {
+			if v == SCRAMSHA256 {
+				return v
+			}
+		}
+		return SCRAMSHA1
+	}
+
+	if err := description.ScramSHA1Supported(desc.WireVersion); err == nil {
+		return SCRAMSHA1
+	}
+
+	return MONGODBCR
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go
new file mode 100644
index 0000000..9db65cf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package auth is not for public use.
+//
+// The API for packages in the 'private' directory have no stability
+// guarantee.
+//
+// The packages within the 'private' directory would normally be put into an
+// 'internal' directory to prohibit their use outside the 'mongo' directory.
+// However, some MongoDB tools require very low-level access to the building
+// blocks of a driver, so we have placed them under 'private' to allow these
+// packages to be imported by projects that need them.
+//
+// These package APIs may be modified in backwards-incompatible ways at any
+// time.
+//
+// You are strongly discouraged from directly using any packages
+// under 'private'.
+package auth
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go
new file mode 100644
index 0000000..f324957
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build windows linux darwin
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	if cred.Source != "" && cred.Source != "$external" {
+		return nil, newAuthError("GSSAPI source must be empty or $external", nil)
+	}
+
+	return &GSSAPIAuthenticator{
+		Username:    cred.Username,
+		Password:    cred.Password,
+		PasswordSet: cred.PasswordSet,
+		Props:       cred.Props,
+	}, nil
+}
+
+// GSSAPIAuthenticator uses the GSSAPI algorithm over SASL to authenticate a connection.
+type GSSAPIAuthenticator struct {
+	Username    string
+	Password    string
+	PasswordSet bool
+	Props       map[string]string
+}
+
+// Auth authenticates the connection.
+func (a *GSSAPIAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	client, err := gssapi.New(desc.Addr.String(), a.Username, a.Password, a.PasswordSet, a.Props)
+
+	if err != nil {
+		return newAuthError("error creating gssapi", err)
+	}
+	return ConductSaslConversation(ctx, desc, rw, "$external", client)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go
new file mode 100644
index 0000000..d88b764
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build !gssapi
+
+package auth
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	return nil, newAuthError("GSSAPI support not enabled during build (-tags gssapi)", nil)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go
new file mode 100644
index 0000000..55caa28
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go
@@ -0,0 +1,21 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,!windows,!linux,!darwin
+
+package auth
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	return nil, newAuthError(fmt.Sprintf("GSSAPI is not supported on %s", runtime.GOOS), nil)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go
new file mode 100644
index 0000000..366ab7b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go
@@ -0,0 +1,168 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build linux darwin
+
+package gssapi
+
+/*
+#cgo linux CFLAGS: -DGOOS_linux
+#cgo linux LDFLAGS: -lgssapi_krb5 -lkrb5
+#cgo darwin CFLAGS: -DGOOS_darwin
+#cgo darwin LDFLAGS: -framework GSS
+#include "gss_wrapper.h"
+*/
+import "C"
+import (
+	"fmt"
+	"net"
+	"runtime"
+	"strings"
+	"unsafe"
+)
+
+// New creates a new SaslClient.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	serviceName := "mongodb"
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			return nil, fmt.Errorf("CANONICALIZE_HOST_NAME is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_REALM":
+			return nil, fmt.Errorf("SERVICE_REALM is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_NAME":
+			serviceName = value
+		default:
+			return nil, fmt.Errorf("unknown mechanism property %s", key)
+		}
+	}
+
+	hostname, _, err := net.SplitHostPort(target)
+	if err != nil {
+		return nil, fmt.Errorf("invalid endpoint (%s) specified: %s", target, err)
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s@%s", serviceName, hostname)
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.gssapi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.gssapi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	cservicePrincipalName := C.CString(sc.servicePrincipalName)
+	defer C.free(unsafe.Pointer(cservicePrincipalName))
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.gssapi_client_init(&sc.state, cservicePrincipalName, cusername, cpassword)
+
+	if status != C.GSSAPI_OK {
+		return mechName, nil, sc.getError("unable to initialize client")
+	}
+
+	return mechName, nil, nil
+}
+
+func (sc *SaslClient) Next(challenge []byte) ([]byte, error) {
+
+	var buf unsafe.Pointer
+	var bufLen C.size_t
+	var outBuf unsafe.Pointer
+	var outBufLen C.size_t
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.gssapi_client_username(&sc.state, &cusername)
+			if status != C.GSSAPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf = unsafe.Pointer(&bytes[0])
+		bufLen = C.size_t(len(bytes))
+		status := C.gssapi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.GSSAPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		if len(challenge) > 0 {
+			buf = unsafe.Pointer(&challenge[0])
+			bufLen = C.size_t(len(challenge))
+		}
+
+		status := C.gssapi_client_negotiate(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.GSSAPI_OK:
+			sc.contextComplete = true
+		case C.GSSAPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != nil {
+		defer C.free(outBuf)
+	}
+
+	return C.GoBytes(outBuf, C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	var desc *C.char
+
+	status := C.gssapi_error_desc(sc.state.maj_stat, sc.state.min_stat, &desc)
+	if status != C.GSSAPI_OK {
+		if desc != nil {
+			C.free(unsafe.Pointer(desc))
+		}
+
+		return fmt.Errorf("%s: (%v, %v)", prefix, sc.state.maj_stat, sc.state.min_stat)
+	}
+	defer C.free(unsafe.Pointer(desc))
+
+	return fmt.Errorf("%s: %v(%v,%v)", prefix, C.GoString(desc), int32(sc.state.maj_stat), int32(sc.state.min_stat))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
new file mode 100644
index 0000000..0ca591f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
@@ -0,0 +1,248 @@
+//+build gssapi
+//+build linux darwin
+
+#include <string.h>
+#include <stdio.h>
+#include "gss_wrapper.h"
+
+OM_uint32 gssapi_canonicalize_name(
+    OM_uint32* minor_status, 
+    char *input_name, 
+    gss_OID input_name_type, 
+    gss_name_t *output_name
+)
+{
+    OM_uint32 major_status;
+    gss_name_t imported_name = GSS_C_NO_NAME;
+    gss_buffer_desc buffer = GSS_C_EMPTY_BUFFER;
+
+    buffer.value = input_name;
+    buffer.length = strlen(input_name);
+    major_status = gss_import_name(minor_status, &buffer, input_name_type, &imported_name);
+    if (GSS_ERROR(major_status)) {
+        return major_status;
+    }
+
+    major_status = gss_canonicalize_name(minor_status, imported_name, (gss_OID)gss_mech_krb5, output_name);
+    if (imported_name != GSS_C_NO_NAME) {
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &imported_name);
+    }
+
+    return major_status;
+}
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat, 
+    OM_uint32 min_stat, 
+    char **desc
+)
+{
+    OM_uint32 stat = maj_stat;
+    int stat_type = GSS_C_GSS_CODE;
+    if (min_stat != 0) {
+        stat = min_stat;
+        stat_type = GSS_C_MECH_CODE;
+    }
+
+    OM_uint32 local_maj_stat, local_min_stat;
+    OM_uint32 msg_ctx = 0;
+    gss_buffer_desc desc_buffer;
+    do
+    {
+        local_maj_stat = gss_display_status(
+            &local_min_stat,
+            stat,
+            stat_type,
+            GSS_C_NO_OID,
+            &msg_ctx,
+            &desc_buffer
+        );
+        if (GSS_ERROR(local_maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (*desc) {
+            free(*desc);
+        }
+
+        *desc = malloc(desc_buffer.length+1);
+        memcpy(*desc, desc_buffer.value, desc_buffer.length+1);
+
+        gss_release_buffer(&local_min_stat, &desc_buffer);
+    }
+    while(msg_ctx != 0);
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+)
+{
+    client->cred = GSS_C_NO_CREDENTIAL;
+    client->ctx = GSS_C_NO_CONTEXT;
+
+    client->maj_stat = gssapi_canonicalize_name(&client->min_stat, spn, GSS_C_NT_HOSTBASED_SERVICE, &client->spn);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    if (username) {
+        gss_name_t name;
+        client->maj_stat = gssapi_canonicalize_name(&client->min_stat, username, GSS_C_NT_USER_NAME, &name);
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (password) {
+            gss_buffer_desc password_buffer;
+            password_buffer.value = password;
+            password_buffer.length = strlen(password);
+            client->maj_stat = gss_acquire_cred_with_password(&client->min_stat, name, &password_buffer, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        } else {
+            client->maj_stat = gss_acquire_cred(&client->min_stat, name, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        }
+
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &name);
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+)
+{
+    OM_uint32 ignored;
+    gss_name_t name = GSS_C_NO_NAME;
+
+    client->maj_stat = gss_inquire_context(&client->min_stat, client->ctx, &name, NULL, NULL, NULL, NULL, NULL, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    gss_buffer_desc name_buffer;
+    client->maj_stat = gss_display_name(&client->min_stat, name, &name_buffer, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        gss_release_name(&ignored, &name);
+        return GSSAPI_ERROR;
+    }
+
+	*username = malloc(name_buffer.length+1);
+	memcpy(*username, name_buffer.value, name_buffer.length+1);
+
+    gss_release_buffer(&ignored, &name_buffer);
+    gss_release_name(&ignored, &name);
+    return GSSAPI_OK;
+}
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    if (input) {
+        input_buffer.value = input;
+        input_buffer.length = input_length;
+    }
+
+    client->maj_stat = gss_init_sec_context(
+        &client->min_stat,
+        client->cred,
+        &client->ctx,
+        client->spn,
+        GSS_C_NO_OID,
+        GSS_C_MUTUAL_FLAG | GSS_C_SEQUENCE_FLAG,
+        0,
+        GSS_C_NO_CHANNEL_BINDINGS,
+        &input_buffer,
+        NULL,
+        &output_buffer,
+        NULL,
+        NULL
+    );
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        OM_uint32 ignored;
+        gss_release_buffer(&ignored, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    } else if (client->maj_stat == GSS_S_CONTINUE_NEEDED) {
+        return GSSAPI_CONTINUE;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length 
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    input_buffer.value = input;
+    input_buffer.length = input_length;
+
+    client->maj_stat = gss_wrap(&client->min_stat, client->ctx, 0, GSS_C_QOP_DEFAULT, &input_buffer, NULL, &output_buffer);
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        gss_release_buffer(&client->min_stat, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+)
+{
+    OM_uint32 ignored;
+    if (client->ctx != GSS_C_NO_CONTEXT) {
+        gss_delete_sec_context(&ignored, &client->ctx, GSS_C_NO_BUFFER);
+    }
+
+    if (client->spn != GSS_C_NO_NAME) {
+        gss_release_name(&ignored, &client->spn);
+    }
+
+    if (client->cred != GSS_C_NO_CREDENTIAL) {
+        gss_release_cred(&ignored, &client->cred);
+    }
+
+    return GSSAPI_OK;
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
new file mode 100644
index 0000000..ca7b907
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
@@ -0,0 +1,66 @@
+//+build gssapi
+//+build linux darwin
+#ifndef GSS_WRAPPER_H
+#define GSS_WRAPPER_H
+
+#include <stdlib.h>
+#ifdef GOOS_linux
+#include <gssapi/gssapi.h>
+#include <gssapi/gssapi_krb5.h>
+#endif
+#ifdef GOOS_darwin
+#include <GSS/GSS.h>
+#endif
+
+#define GSSAPI_OK 0
+#define GSSAPI_CONTINUE 1
+#define GSSAPI_ERROR 2
+
+typedef struct {
+    gss_name_t spn;
+    gss_cred_id_t cred;
+    gss_ctx_id_t ctx;
+
+    OM_uint32 maj_stat;
+    OM_uint32 min_stat;
+} gssapi_client_state;
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat, 
+    OM_uint32 min_stat, 
+    char **desc
+);
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+);
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+);
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+);
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length 
+);
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+);
+
+#endif
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
new file mode 100644
index 0000000..dd547f3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
@@ -0,0 +1,345 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,windows
+
+package gssapi
+
+// #include "sspi_wrapper.h"
+import "C"
+import (
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"unsafe"
+)
+
+// New creates a new SaslClient.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	initOnce.Do(initSSPI)
+	if initError != nil {
+		return nil, initError
+	}
+
+	var err error
+	serviceName := "mongodb"
+	serviceRealm := ""
+	canonicalizeHostName := false
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			canonicalizeHostName, err = strconv.ParseBool(value)
+			if err != nil {
+				return nil, fmt.Errorf("%s must be a boolean (true, false, 0, 1) but got '%s'", key, value)
+			}
+
+		case "SERVICE_REALM":
+			serviceRealm = value
+		case "SERVICE_NAME":
+			serviceName = value
+		}
+	}
+
+	hostname, _, err := net.SplitHostPort(target)
+	if err != nil {
+		return nil, fmt.Errorf("invalid endpoint (%s) specified: %s", target, err)
+	}
+	if canonicalizeHostName {
+		names, err := net.LookupAddr(hostname)
+		if err != nil || len(names) == 0 {
+			return nil, fmt.Errorf("unable to canonicalize hostname: %s", err)
+		}
+		hostname = names[0]
+		if hostname[len(hostname)-1] == '.' {
+			hostname = hostname[:len(hostname)-1]
+		}
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s/%s", serviceName, hostname)
+	if serviceRealm != "" {
+		servicePrincipalName += "@" + serviceRealm
+	}
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.sspi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.sspi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.sspi_client_init(&sc.state, cusername, cpassword)
+
+	if status != C.SSPI_OK {
+		return mechName, nil, sc.getError("unable to intitialize client")
+	}
+
+	return mechName, nil, nil
+}
+
+func (sc *SaslClient) Next(challenge []byte) ([]byte, error) {
+
+	var outBuf C.PVOID
+	var outBufLen C.ULONG
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.sspi_client_username(&sc.state, &cusername)
+			if status != C.SSPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf := (C.PVOID)(unsafe.Pointer(&bytes[0]))
+		bufLen := C.ULONG(len(bytes))
+		status := C.sspi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.SSPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		var buf C.PVOID
+		var bufLen C.ULONG
+		if len(challenge) > 0 {
+			buf = (C.PVOID)(unsafe.Pointer(&challenge[0]))
+			bufLen = C.ULONG(len(challenge))
+		}
+		cservicePrincipalName := C.CString(sc.servicePrincipalName)
+		defer C.free(unsafe.Pointer(cservicePrincipalName))
+
+		status := C.sspi_client_negotiate(&sc.state, cservicePrincipalName, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.SSPI_OK:
+			sc.contextComplete = true
+		case C.SSPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != C.PVOID(nil) {
+		defer C.free(unsafe.Pointer(outBuf))
+	}
+
+	return C.GoBytes(unsafe.Pointer(outBuf), C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	return getError(prefix, sc.state.status)
+}
+
+var initOnce sync.Once
+var initError error
+
+func initSSPI() {
+	rc := C.sspi_init()
+	if rc != 0 {
+		initError = fmt.Errorf("error initializing sspi: %v", rc)
+	}
+}
+
+func getError(prefix string, status C.SECURITY_STATUS) error {
+	var s string
+	switch status {
+	case C.SEC_E_ALGORITHM_MISMATCH:
+		s = "The client and server cannot communicate because they do not possess a common algorithm."
+	case C.SEC_E_BAD_BINDINGS:
+		s = "The SSPI channel bindings supplied by the client are incorrect."
+	case C.SEC_E_BAD_PKGID:
+		s = "The requested package identifier does not exist."
+	case C.SEC_E_BUFFER_TOO_SMALL:
+		s = "The buffers supplied to the function are not large enough to contain the information."
+	case C.SEC_E_CANNOT_INSTALL:
+		s = "The security package cannot initialize successfully and should not be installed."
+	case C.SEC_E_CANNOT_PACK:
+		s = "The package is unable to pack the context."
+	case C.SEC_E_CERT_EXPIRED:
+		s = "The received certificate has expired."
+	case C.SEC_E_CERT_UNKNOWN:
+		s = "An unknown error occurred while processing the certificate."
+	case C.SEC_E_CERT_WRONG_USAGE:
+		s = "The certificate is not valid for the requested usage."
+	case C.SEC_E_CONTEXT_EXPIRED:
+		s = "The application is referencing a context that has already been closed. A properly written application should not receive this error."
+	case C.SEC_E_CROSSREALM_DELEGATION_FAILURE:
+		s = "The server attempted to make a Kerberos-constrained delegation request for a target outside the server's realm."
+	case C.SEC_E_CRYPTO_SYSTEM_INVALID:
+		s = "The cryptographic system or checksum function is not valid because a required function is unavailable."
+	case C.SEC_E_DECRYPT_FAILURE:
+		s = "The specified data could not be decrypted."
+	case C.SEC_E_DELEGATION_REQUIRED:
+		s = "The requested operation cannot be completed. The computer must be trusted for delegation"
+	case C.SEC_E_DOWNGRADE_DETECTED:
+		s = "The system detected a possible attempt to compromise security. Verify that the server that authenticated you can be contacted."
+	case C.SEC_E_ENCRYPT_FAILURE:
+		s = "The specified data could not be encrypted."
+	case C.SEC_E_ILLEGAL_MESSAGE:
+		s = "The message received was unexpected or badly formatted."
+	case C.SEC_E_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. The context could not be initialized."
+	case C.SEC_E_INCOMPLETE_MESSAGE:
+		s = "The message supplied was incomplete. The signature was not verified."
+	case C.SEC_E_INSUFFICIENT_MEMORY:
+		s = "Not enough memory is available to complete the request."
+	case C.SEC_E_INTERNAL_ERROR:
+		s = "An error occurred that did not map to an SSPI error code."
+	case C.SEC_E_INVALID_HANDLE:
+		s = "The handle passed to the function is not valid."
+	case C.SEC_E_INVALID_TOKEN:
+		s = "The token passed to the function is not valid."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED:
+		s = "An untrusted certification authority (CA) was detected while processing the smart card certificate used for authentication."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED_KDC:
+		s = "An untrusted CA was detected while processing the domain controller certificate used for authentication. The system event log contains additional information."
+	case C.SEC_E_KDC_CERT_EXPIRED:
+		s = "The domain controller certificate used for smart card logon has expired."
+	case C.SEC_E_KDC_CERT_REVOKED:
+		s = "The domain controller certificate used for smart card logon has been revoked."
+	case C.SEC_E_KDC_INVALID_REQUEST:
+		s = "A request that is not valid was sent to the KDC."
+	case C.SEC_E_KDC_UNABLE_TO_REFER:
+		s = "The KDC was unable to generate a referral for the service requested."
+	case C.SEC_E_KDC_UNKNOWN_ETYPE:
+		s = "The requested encryption type is not supported by the KDC."
+	case C.SEC_E_LOGON_DENIED:
+		s = "The logon has been denied"
+	case C.SEC_E_MAX_REFERRALS_EXCEEDED:
+		s = "The number of maximum ticket referrals has been exceeded."
+	case C.SEC_E_MESSAGE_ALTERED:
+		s = "The message supplied for verification has been altered."
+	case C.SEC_E_MULTIPLE_ACCOUNTS:
+		s = "The received certificate was mapped to multiple accounts."
+	case C.SEC_E_MUST_BE_KDC:
+		s = "The local computer must be a Kerberos domain controller (KDC)"
+	case C.SEC_E_NO_AUTHENTICATING_AUTHORITY:
+		s = "No authority could be contacted for authentication."
+	case C.SEC_E_NO_CREDENTIALS:
+		s = "No credentials are available."
+	case C.SEC_E_NO_IMPERSONATION:
+		s = "No impersonation is allowed for this context."
+	case C.SEC_E_NO_IP_ADDRESSES:
+		s = "Unable to accomplish the requested task because the local computer does not have any IP addresses."
+	case C.SEC_E_NO_KERB_KEY:
+		s = "No Kerberos key was found."
+	case C.SEC_E_NO_PA_DATA:
+		s = "Policy administrator (PA) data is needed to determine the encryption type"
+	case C.SEC_E_NO_S4U_PROT_SUPPORT:
+		s = "The Kerberos subsystem encountered an error. A service for user protocol request was made against a domain controller which does not support service for a user."
+	case C.SEC_E_NO_TGT_REPLY:
+		s = "The client is trying to negotiate a context and the server requires a user-to-user connection"
+	case C.SEC_E_NOT_OWNER:
+		s = "The caller of the function does not own the credentials."
+	case C.SEC_E_OK:
+		s = "The operation completed successfully."
+	case C.SEC_E_OUT_OF_SEQUENCE:
+		s = "The message supplied for verification is out of sequence."
+	case C.SEC_E_PKINIT_CLIENT_FAILURE:
+		s = "The smart card certificate used for authentication is not trusted."
+	case C.SEC_E_PKINIT_NAME_MISMATCH:
+		s = "The client certificate does not contain a valid UPN or does not match the client name in the logon request."
+	case C.SEC_E_QOP_NOT_SUPPORTED:
+		s = "The quality of protection attribute is not supported by this package."
+	case C.SEC_E_REVOCATION_OFFLINE_C:
+		s = "The revocation status of the smart card certificate used for authentication could not be determined."
+	case C.SEC_E_REVOCATION_OFFLINE_KDC:
+		s = "The revocation status of the domain controller certificate used for smart card authentication could not be determined. The system event log contains additional information."
+	case C.SEC_E_SECPKG_NOT_FOUND:
+		s = "The security package was not recognized."
+	case C.SEC_E_SECURITY_QOS_FAILED:
+		s = "The security context could not be established due to a failure in the requested quality of service (for example"
+	case C.SEC_E_SHUTDOWN_IN_PROGRESS:
+		s = "A system shutdown is in progress."
+	case C.SEC_E_SMARTCARD_CERT_EXPIRED:
+		s = "The smart card certificate used for authentication has expired."
+	case C.SEC_E_SMARTCARD_CERT_REVOKED:
+		s = "The smart card certificate used for authentication has been revoked. Additional information may exist in the event log."
+	case C.SEC_E_SMARTCARD_LOGON_REQUIRED:
+		s = "Smart card logon is required and was not used."
+	case C.SEC_E_STRONG_CRYPTO_NOT_SUPPORTED:
+		s = "The other end of the security negotiation requires strong cryptography"
+	case C.SEC_E_TARGET_UNKNOWN:
+		s = "The target was not recognized."
+	case C.SEC_E_TIME_SKEW:
+		s = "The clocks on the client and server computers do not match."
+	case C.SEC_E_TOO_MANY_PRINCIPALS:
+		s = "The KDC reply contained more than one principal name."
+	case C.SEC_E_UNFINISHED_CONTEXT_DELETED:
+		s = "A security context was deleted before the context was completed. This is considered a logon failure."
+	case C.SEC_E_UNKNOWN_CREDENTIALS:
+		s = "The credentials provided were not recognized."
+	case C.SEC_E_UNSUPPORTED_FUNCTION:
+		s = "The requested function is not supported."
+	case C.SEC_E_UNSUPPORTED_PREAUTH:
+		s = "An unsupported preauthentication mechanism was presented to the Kerberos package."
+	case C.SEC_E_UNTRUSTED_ROOT:
+		s = "The certificate chain was issued by an authority that is not trusted."
+	case C.SEC_E_WRONG_CREDENTIAL_HANDLE:
+		s = "The supplied credential handle does not match the credential associated with the security context."
+	case C.SEC_E_WRONG_PRINCIPAL:
+		s = "The target principal name is incorrect."
+	case C.SEC_I_COMPLETE_AND_CONTINUE:
+		s = "The function completed successfully"
+	case C.SEC_I_COMPLETE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_CONTEXT_EXPIRED:
+		s = "The message sender has finished using the connection and has initiated a shutdown. For information about initiating or recognizing a shutdown"
+	case C.SEC_I_CONTINUE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. Additional information can be returned from the context."
+	case C.SEC_I_LOCAL_LOGON:
+		s = "The logon was completed"
+	case C.SEC_I_NO_LSA_CONTEXT:
+		s = "There is no LSA mode context associated with this context."
+	case C.SEC_I_RENEGOTIATE:
+		s = "The context data must be renegotiated with the peer."
+	default:
+		return fmt.Errorf("%s: 0x%x", prefix, uint32(status))
+	}
+
+	return fmt.Errorf("%s: %s(0x%x)", prefix, s, uint32(status))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
new file mode 100644
index 0000000..9d218bd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
@@ -0,0 +1,218 @@
+//+build gssapi,windows
+
+#include "sspi_wrapper.h"
+
+static HINSTANCE sspi_secur32_dll = NULL;
+static PSecurityFunctionTable sspi_functions = NULL;
+static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
+
+int sspi_init(
+)
+{
+	sspi_secur32_dll = LoadLibrary("secur32.dll");
+	if (!sspi_secur32_dll) {
+		return GetLastError();
+	}
+
+    INIT_SECURITY_INTERFACE init_security_interface = (INIT_SECURITY_INTERFACE)GetProcAddress(sspi_secur32_dll, SECURITY_ENTRYPOINT);
+    if (!init_security_interface) {
+        return -1;
+    }
+
+    sspi_functions = (*init_security_interface)();
+    if (!sspi_functions) {
+        return -2;
+    }
+
+	return SSPI_OK;
+}
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+)
+{
+	TimeStamp timestamp;
+
+    if (username) {
+        if (password) {
+            SEC_WINNT_AUTH_IDENTITY auth_identity;
+            
+        #ifdef _UNICODE
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
+        #else
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
+        #endif
+            auth_identity.User = (LPSTR) username;
+            auth_identity.UserLength = strlen(username);
+            auth_identity.Password = (LPSTR) password;
+            auth_identity.PasswordLength = strlen(password);
+            auth_identity.Domain = NULL;
+            auth_identity.DomainLength = 0;
+            client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, &client->cred, &timestamp);
+        } else {
+            client->status = sspi_functions->AcquireCredentialsHandle(username, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+        }
+    } else {
+        client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+    }
+
+    if (client->status != SEC_E_OK) {
+        return SSPI_ERROR;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+)
+{
+    SecPkgCredentials_Names names;
+	client->status = sspi_functions->QueryCredentialsAttributes(&client->cred, SECPKG_CRED_ATTR_NAMES, &names);
+
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	int len = strlen(names.sUserName) + 1;
+	*username = malloc(len);
+	memcpy(*username, names.sUserName, len);
+
+	sspi_functions->FreeContextBuffer(names.sUserName);
+
+    return SSPI_OK;
+}
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+)
+{
+    SecBufferDesc inbuf;
+	SecBuffer in_bufs[1];
+	SecBufferDesc outbuf;
+	SecBuffer out_bufs[1];
+
+	if (client->has_ctx > 0) {
+		inbuf.ulVersion = SECBUFFER_VERSION;
+		inbuf.cBuffers = 1;
+		inbuf.pBuffers = in_bufs;
+		in_bufs[0].pvBuffer = input;
+		in_bufs[0].cbBuffer = input_length;
+		in_bufs[0].BufferType = SECBUFFER_TOKEN;
+	}
+
+	outbuf.ulVersion = SECBUFFER_VERSION;
+	outbuf.cBuffers = 1;
+	outbuf.pBuffers = out_bufs;
+	out_bufs[0].pvBuffer = NULL;
+	out_bufs[0].cbBuffer = 0;
+	out_bufs[0].BufferType = SECBUFFER_TOKEN;
+
+	ULONG context_attr = 0;
+
+	client->status = sspi_functions->InitializeSecurityContext(
+        &client->cred,
+        client->has_ctx > 0 ? &client->ctx : NULL,
+        (LPSTR) spn,
+        ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
+        0,
+        SECURITY_NETWORK_DREP,
+        client->has_ctx > 0 ? &inbuf : NULL,
+        0,
+        &client->ctx,
+        &outbuf,
+        &context_attr,
+        NULL);
+
+    if (client->status != SEC_E_OK && client->status != SEC_I_CONTINUE_NEEDED) {
+        return SSPI_ERROR;
+    }
+
+    client->has_ctx = 1;
+
+	*output = malloc(out_bufs[0].cbBuffer);
+	*output_length = out_bufs[0].cbBuffer;
+	memcpy(*output, out_bufs[0].pvBuffer, *output_length);
+    sspi_functions->FreeContextBuffer(out_bufs[0].pvBuffer);
+
+    if (client->status == SEC_I_CONTINUE_NEEDED) {
+        return SSPI_CONTINUE;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length 
+)
+{
+    SecPkgContext_Sizes sizes;
+
+	client->status = sspi_functions->QueryContextAttributes(&client->ctx, SECPKG_ATTR_SIZES, &sizes);
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	char *msg = malloc((sizes.cbSecurityTrailer + input_length + sizes.cbBlockSize) * sizeof(char));
+	memcpy(&msg[sizes.cbSecurityTrailer], input, input_length);
+
+	SecBuffer wrap_bufs[3];
+	SecBufferDesc wrap_buf_desc;
+	wrap_buf_desc.cBuffers = 3;
+	wrap_buf_desc.pBuffers = wrap_bufs;
+	wrap_buf_desc.ulVersion = SECBUFFER_VERSION;
+
+	wrap_bufs[0].cbBuffer = sizes.cbSecurityTrailer;
+	wrap_bufs[0].BufferType = SECBUFFER_TOKEN;
+	wrap_bufs[0].pvBuffer = msg;
+
+	wrap_bufs[1].cbBuffer = input_length;
+	wrap_bufs[1].BufferType = SECBUFFER_DATA;
+	wrap_bufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
+
+	wrap_bufs[2].cbBuffer = sizes.cbBlockSize;
+	wrap_bufs[2].BufferType = SECBUFFER_PADDING;
+	wrap_bufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + input_length;
+
+	client->status = sspi_functions->EncryptMessage(&client->ctx, SECQOP_WRAP_NO_ENCRYPT, &wrap_buf_desc, 0);
+	if (client->status != SEC_E_OK) {
+		free(msg);
+		return SSPI_ERROR;
+	}
+
+	*output_length = wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer + wrap_bufs[2].cbBuffer;
+	*output = malloc(*output_length);
+
+	memcpy(*output, wrap_bufs[0].pvBuffer, wrap_bufs[0].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer, wrap_bufs[1].pvBuffer, wrap_bufs[1].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer, wrap_bufs[2].pvBuffer, wrap_bufs[2].cbBuffer);
+
+	free(msg);
+
+	return SSPI_OK;
+}
+
+int sspi_client_destroy(
+    sspi_client_state *client
+)
+{
+    if (client->has_ctx > 0) {
+        sspi_functions->DeleteSecurityContext(&client->ctx);
+    }
+
+    sspi_functions->FreeCredentialsHandle(&client->cred);
+
+    return SSPI_OK;
+}
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
new file mode 100644
index 0000000..ee6e9a7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
@@ -0,0 +1,58 @@
+//+build gssapi,windows
+
+#ifndef SSPI_WRAPPER_H
+#define SSPI_WRAPPER_H
+
+#define SECURITY_WIN32 1  /* Required for SSPI */
+
+#include <windows.h>
+#include <sspi.h>
+
+#define SSPI_OK 0
+#define SSPI_CONTINUE 1
+#define SSPI_ERROR 2
+
+typedef struct {
+    CredHandle cred;
+    CtxtHandle ctx;
+
+    int has_ctx;
+
+    SECURITY_STATUS status;
+} sspi_client_state;
+
+int sspi_init();
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+);
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+);
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+);
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length 
+);
+
+int sspi_client_destroy(
+    sspi_client_state *client
+);
+
+#endif
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go
new file mode 100644
index 0000000..89dcf08
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go
@@ -0,0 +1,100 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"crypto/md5"
+	"fmt"
+
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// MONGODBCR is the mechanism name for MONGODB-CR.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+const MONGODBCR = "MONGODB-CR"
+
+func newMongoDBCRAuthenticator(cred *Cred) (Authenticator, error) {
+	return &MongoDBCRAuthenticator{
+		DB:       cred.Source,
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// MongoDBCRAuthenticator uses the MONGODB-CR algorithm to authenticate a connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+type MongoDBCRAuthenticator struct {
+	DB       string
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+func (a *MongoDBCRAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+
+	// Arbiters cannot be authenticated
+	if desc.Kind == description.RSArbiter {
+		return nil
+	}
+
+	db := a.DB
+	if db == "" {
+		db = defaultAuthDB
+	}
+
+	cmd := command.Read{DB: db, Command: bsonx.Doc{{"getnonce", bsonx.Int32(1)}}}
+	ssdesc := description.SelectedServer{Server: desc}
+	rdr, err := cmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+
+	var getNonceResult struct {
+		Nonce string `bson:"nonce"`
+	}
+
+	err = bson.Unmarshal(rdr, &getNonceResult)
+	if err != nil {
+		return newAuthError("unmarshal error", err)
+	}
+
+	cmd = command.Read{
+		DB: db,
+		Command: bsonx.Doc{
+			{"authenticate", bsonx.Int32(1)},
+			{"user", bsonx.String(a.Username)},
+			{"nonce", bsonx.String(getNonceResult.Nonce)},
+			{"key", bsonx.String(a.createKey(getNonceResult.Nonce))},
+		},
+	}
+	_, err = cmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+
+	return nil
+}
+
+func (a *MongoDBCRAuthenticator) createKey(nonce string) string {
+	h := md5.New()
+
+	_, _ = io.WriteString(h, nonce)
+	_, _ = io.WriteString(h, a.Username)
+	_, _ = io.WriteString(h, mongoPasswordDigest(a.Username, a.Password))
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go
new file mode 100644
index 0000000..3f66aee
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go
@@ -0,0 +1,56 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// PLAIN is the mechanism name for PLAIN.
+const PLAIN = "PLAIN"
+
+func newPlainAuthenticator(cred *Cred) (Authenticator, error) {
+	return &PlainAuthenticator{
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// PlainAuthenticator uses the PLAIN algorithm over SASL to authenticate a connection.
+type PlainAuthenticator struct {
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+func (a *PlainAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	return ConductSaslConversation(ctx, desc, rw, "$external", &plainSaslClient{
+		username: a.Username,
+		password: a.Password,
+	})
+}
+
+type plainSaslClient struct {
+	username string
+	password string
+}
+
+func (c *plainSaslClient) Start() (string, []byte, error) {
+	b := []byte("\x00" + c.username + "\x00" + c.password)
+	return PLAIN, b, nil
+}
+
+func (c *plainSaslClient) Next(challenge []byte) ([]byte, error) {
+	return nil, newAuthError("unexpected server challenge", nil)
+}
+
+func (c *plainSaslClient) Completed() bool {
+	return true
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go
new file mode 100644
index 0000000..5357f48
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go
@@ -0,0 +1,120 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// SaslClient is the client piece of a sasl conversation.
+type SaslClient interface {
+	Start() (string, []byte, error)
+	Next(challenge []byte) ([]byte, error)
+	Completed() bool
+}
+
+// SaslClientCloser is a SaslClient that has resources to clean up.
+type SaslClientCloser interface {
+	SaslClient
+	Close()
+}
+
+// ConductSaslConversation handles running a sasl conversation with MongoDB.
+func ConductSaslConversation(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter, db string, client SaslClient) error {
+	// Arbiters cannot be authenticated
+	if desc.Kind == description.RSArbiter {
+		return nil
+	}
+
+	if db == "" {
+		db = defaultAuthDB
+	}
+
+	if closer, ok := client.(SaslClientCloser); ok {
+		defer closer.Close()
+	}
+
+	mech, payload, err := client.Start()
+	if err != nil {
+		return newError(err, mech)
+	}
+
+	saslStartCmd := command.Read{
+		DB: db,
+		Command: bsonx.Doc{
+			{"saslStart", bsonx.Int32(1)},
+			{"mechanism", bsonx.String(mech)},
+			{"payload", bsonx.Binary(0x00, payload)},
+		},
+	}
+
+	type saslResponse struct {
+		ConversationID int    `bson:"conversationId"`
+		Code           int    `bson:"code"`
+		Done           bool   `bson:"done"`
+		Payload        []byte `bson:"payload"`
+	}
+
+	var saslResp saslResponse
+
+	ssdesc := description.SelectedServer{Server: desc}
+	rdr, err := saslStartCmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, mech)
+	}
+
+	err = bson.Unmarshal(rdr, &saslResp)
+	if err != nil {
+		return newAuthError("unmarshall error", err)
+	}
+
+	cid := saslResp.ConversationID
+
+	for {
+		if saslResp.Code != 0 {
+			return newError(err, mech)
+		}
+
+		if saslResp.Done && client.Completed() {
+			return nil
+		}
+
+		payload, err = client.Next(saslResp.Payload)
+		if err != nil {
+			return newError(err, mech)
+		}
+
+		if saslResp.Done && client.Completed() {
+			return nil
+		}
+
+		saslContinueCmd := command.Read{
+			DB: db,
+			Command: bsonx.Doc{
+				{"saslContinue", bsonx.Int32(1)},
+				{"conversationId", bsonx.Int32(int32(cid))},
+				{"payload", bsonx.Binary(0x00, payload)},
+			},
+		}
+
+		rdr, err = saslContinueCmd.RoundTrip(ctx, ssdesc, rw)
+		if err != nil {
+			return newError(err, mech)
+		}
+
+		err = bson.Unmarshal(rdr, &saslResp)
+		if err != nil {
+			return newAuthError("unmarshal error", err)
+		}
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go
new file mode 100644
index 0000000..170488d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go
@@ -0,0 +1,102 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Copyright (C) MongoDB, Inc. 2018-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"github.com/xdg/scram"
+	"github.com/xdg/stringprep"
+)
+
+// SCRAMSHA1 holds the mechanism name "SCRAM-SHA-1"
+const SCRAMSHA1 = "SCRAM-SHA-1"
+
+// SCRAMSHA256 holds the mechanism name "SCRAM-SHA-256"
+const SCRAMSHA256 = "SCRAM-SHA-256"
+
+func newScramSHA1Authenticator(cred *Cred) (Authenticator, error) {
+	passdigest := mongoPasswordDigest(cred.Username, cred.Password)
+	client, err := scram.SHA1.NewClientUnprepped(cred.Username, passdigest, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-1 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA1,
+		source:    cred.Source,
+		client:    client,
+	}, nil
+}
+
+func newScramSHA256Authenticator(cred *Cred) (Authenticator, error) {
+	passprep, err := stringprep.SASLprep.Prepare(cred.Password)
+	if err != nil {
+		return nil, newAuthError(fmt.Sprintf("error SASLprepping password '%s'", cred.Password), err)
+	}
+	client, err := scram.SHA256.NewClientUnprepped(cred.Username, passprep, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-256 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA256,
+		source:    cred.Source,
+		client:    client,
+	}, nil
+}
+
+// ScramAuthenticator uses the SCRAM algorithm over SASL to authenticate a connection.
+type ScramAuthenticator struct {
+	mechanism string
+	source    string
+	client    *scram.Client
+}
+
+// Auth authenticates the connection.
+func (a *ScramAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	adapter := &scramSaslAdapter{conversation: a.client.NewConversation(), mechanism: a.mechanism}
+	err := ConductSaslConversation(ctx, desc, rw, a.source, adapter)
+	if err != nil {
+		return newAuthError("sasl conversation error", err)
+	}
+	return nil
+}
+
+type scramSaslAdapter struct {
+	mechanism    string
+	conversation *scram.ClientConversation
+}
+
+func (a *scramSaslAdapter) Start() (string, []byte, error) {
+	step, err := a.conversation.Step("")
+	if err != nil {
+		return a.mechanism, nil, err
+	}
+	return a.mechanism, []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Next(challenge []byte) ([]byte, error) {
+	step, err := a.conversation.Step(string(challenge))
+	if err != nil {
+		return nil, err
+	}
+	return []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Completed() bool {
+	return a.conversation.Done()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go
new file mode 100644
index 0000000..36b8c07
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+)
+
+const defaultAuthDB = "admin"
+
+func mongoPasswordDigest(username, password string) string {
+	h := md5.New()
+	_, _ = io.WriteString(h, username)
+	_, _ = io.WriteString(h, ":mongo:")
+	_, _ = io.WriteString(h, password)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go
new file mode 100644
index 0000000..219513b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// MongoDBX509 is the mechanism name for MongoDBX509.
+const MongoDBX509 = "MONGODB-X509"
+
+func newMongoDBX509Authenticator(cred *Cred) (Authenticator, error) {
+	return &MongoDBX509Authenticator{User: cred.Username}, nil
+}
+
+// MongoDBX509Authenticator uses X.509 certificates over TLS to authenticate a connection.
+type MongoDBX509Authenticator struct {
+	User string
+}
+
+// Auth implements the Authenticator interface.
+func (a *MongoDBX509Authenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	authRequestDoc := bsonx.Doc{
+		{"authenticate", bsonx.Int32(1)},
+		{"mechanism", bsonx.String(MongoDBX509)},
+	}
+
+	if desc.WireVersion.Max < 5 {
+		authRequestDoc = append(authRequestDoc, bsonx.Elem{"user", bsonx.String(a.User)})
+	}
+
+	authCmd := command.Read{DB: "$external", Command: authRequestDoc}
+	ssdesc := description.SelectedServer{Server: desc}
+	_, err := authCmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newAuthError("round trip error", err)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go
new file mode 100644
index 0000000..da946c3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go
@@ -0,0 +1,424 @@
+package driver
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// BatchCursor is a batch implementation of a cursor. It returns documents in entire batches instead
+// of one at a time. An individual document cursor can be built on top of this batch cursor.
+type BatchCursor struct {
+	clientSession *session.Client
+	clock         *session.ClusterClock
+	namespace     command.Namespace
+	id            int64
+	err           error
+	server        *topology.Server
+	opts          []bsonx.Elem
+	currentBatch  []byte
+	firstBatch    bool
+	batchNumber   int
+
+	// legacy server (< 3.2) fields
+	batchSize   int32
+	limit       int32
+	numReturned int32 // number of docs returned by server
+}
+
+// NewBatchCursor creates a new BatchCursor from the provided parameters.
+func NewBatchCursor(result bsoncore.Document, clientSession *session.Client, clock *session.ClusterClock, server *topology.Server, opts ...bsonx.Elem) (*BatchCursor, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, err
+	}
+	bc := &BatchCursor{
+		clientSession: clientSession,
+		clock:         clock,
+		server:        server,
+		opts:          opts,
+		firstBatch:    true,
+	}
+
+	var ok bool
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok := elem.Value().ArrayOK()
+			if !ok {
+				return nil, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			vals, err := arr.Values()
+			if err != nil {
+				return nil, err
+			}
+
+			for _, val := range vals {
+				if val.Type != bsontype.EmbeddedDocument {
+					return nil, fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+				}
+				bc.currentBatch = append(bc.currentBatch, val.Data...)
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace := command.ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, err
+			}
+			bc.namespace = namespace
+		case "id":
+			bc.id, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	// close session if everything fits in first batch
+	if bc.id == 0 {
+		bc.closeImplicitSession()
+	}
+	return bc, nil
+}
+
+// NewEmptyBatchCursor returns a batch cursor that is empty.
+func NewEmptyBatchCursor() *BatchCursor {
+	return &BatchCursor{}
+}
+
+// NewLegacyBatchCursor creates a new BatchCursor for server versions 3.0 and below from the
+// provided parameters.
+//
+// TODO(GODRIVER-617): The batch parameter here should be []bsoncore.Document. Change it to this
+// once we have the new wiremessage package that uses bsoncore instead of bson.
+func NewLegacyBatchCursor(ns command.Namespace, cursorID int64, batch []bson.Raw, limit int32, batchSize int32, server *topology.Server) (*BatchCursor, error) {
+	bc := &BatchCursor{
+		id:          cursorID,
+		server:      server,
+		namespace:   ns,
+		limit:       limit,
+		batchSize:   batchSize,
+		numReturned: int32(len(batch)),
+		firstBatch:  true,
+	}
+
+	// take as many documents from the batch as needed
+	firstBatchSize := int32(len(batch))
+	if limit != 0 && limit < firstBatchSize {
+		firstBatchSize = limit
+	}
+	batch = batch[:firstBatchSize]
+	for _, doc := range batch {
+		bc.currentBatch = append(bc.currentBatch, doc...)
+	}
+
+	return bc, nil
+}
+
+// ID returns the cursor ID for this batch cursor.
+func (bc *BatchCursor) ID() int64 {
+	return bc.id
+}
+
+// Next indicates if there is another batch available. Returning false does not necessarily indicate
+// that the cursor is closed. This method will return false when an empty batch is returned.
+//
+// If Next returns true, there is a valid batch of documents available. If Next returns false, there
+// is not a valid batch of documents available.
+func (bc *BatchCursor) Next(ctx context.Context) bool {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if bc.firstBatch {
+		bc.firstBatch = false
+		return true
+	}
+
+	if bc.id == 0 || bc.server == nil {
+		return false
+	}
+
+	if bc.legacy() {
+		bc.legacyGetMore(ctx)
+	} else {
+		bc.getMore(ctx)
+	}
+
+	return len(bc.currentBatch) > 0
+}
+
+// Batch will append the current batch of documents to dst. RequiredBytes can be called to determine
+// the length of the current batch of documents.
+//
+// If there is no batch available, this method does nothing.
+func (bc *BatchCursor) Batch(dst []byte) []byte { return append(dst, bc.currentBatch...) }
+
+// RequiredBytes returns the number of bytes required for the current batch.
+func (bc *BatchCursor) RequiredBytes() int { return len(bc.currentBatch) }
+
+// Err returns the latest error encountered.
+func (bc *BatchCursor) Err() error { return bc.err }
+
+// Close closes this batch cursor.
+func (bc *BatchCursor) Close(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if bc.server == nil {
+		return nil
+	}
+
+	if bc.legacy() {
+		return bc.legacyKillCursor(ctx)
+	}
+
+	defer bc.closeImplicitSession()
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = (&command.KillCursors{
+		Clock: bc.clock,
+		NS:    bc.namespace,
+		IDs:   []int64{bc.id},
+	}).RoundTrip(ctx, bc.server.SelectedDescription(), conn)
+	if err != nil {
+		_ = conn.Close() // The command response error is more important here
+		return err
+	}
+
+	bc.id = 0
+	return conn.Close()
+}
+
+func (bc *BatchCursor) closeImplicitSession() {
+	if bc.clientSession != nil && bc.clientSession.SessionType == session.Implicit {
+		bc.clientSession.EndSession()
+	}
+}
+
+func (bc *BatchCursor) clearBatch() {
+	bc.currentBatch = bc.currentBatch[:0]
+}
+
+func (bc *BatchCursor) getMore(ctx context.Context) {
+	bc.clearBatch()
+	if bc.id == 0 {
+		return
+	}
+
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	response, err := (&command.GetMore{
+		Clock:   bc.clock,
+		ID:      bc.id,
+		NS:      bc.namespace,
+		Opts:    bc.opts,
+		Session: bc.clientSession,
+	}).RoundTrip(ctx, bc.server.SelectedDescription(), conn)
+	if err != nil {
+		_ = conn.Close() // The command response error is more important here
+		bc.err = err
+		return
+	}
+
+	err = conn.Close()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	id, err := response.LookupErr("cursor", "id")
+	if err != nil {
+		bc.err = err
+		return
+	}
+	var ok bool
+	bc.id, ok = id.Int64OK()
+	if !ok {
+		bc.err = fmt.Errorf("BSON Type %s is not %s", id.Type, bson.TypeInt64)
+		return
+	}
+
+	// if this is the last getMore, close the session
+	if bc.id == 0 {
+		bc.closeImplicitSession()
+	}
+
+	batch, err := response.LookupErr("cursor", "nextBatch")
+	if err != nil {
+		bc.err = err
+		return
+	}
+	var arr bson.Raw
+	arr, ok = batch.ArrayOK()
+	if !ok {
+		bc.err = fmt.Errorf("BSON Type %s is not %s", batch.Type, bson.TypeArray)
+		return
+	}
+	vals, err := arr.Values()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	for _, val := range vals {
+		if val.Type != bsontype.EmbeddedDocument {
+			bc.err = fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+			bc.currentBatch = bc.currentBatch[:0] // don't return a batch on error
+			return
+		}
+		bc.currentBatch = append(bc.currentBatch, val.Value...)
+	}
+
+	return
+}
+
+func (bc *BatchCursor) legacy() bool {
+	return bc.server.Description().WireVersion == nil || bc.server.Description().WireVersion.Max < 4
+}
+
+func (bc *BatchCursor) legacyKillCursor(ctx context.Context) error {
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		return err
+	}
+
+	kc := wiremessage.KillCursors{
+		NumberOfCursorIDs: 1,
+		CursorIDs:         []int64{bc.id},
+		CollectionName:    bc.namespace.Collection,
+		DatabaseName:      bc.namespace.DB,
+	}
+
+	err = conn.WriteWireMessage(ctx, kc)
+	if err != nil {
+		_ = conn.Close()
+		return err
+	}
+
+	err = conn.Close() // no reply from OP_KILL_CURSORS
+	if err != nil {
+		return err
+	}
+
+	bc.id = 0
+	bc.clearBatch()
+	return nil
+}
+
+func (bc *BatchCursor) legacyGetMore(ctx context.Context) {
+	bc.clearBatch()
+	if bc.id == 0 {
+		return
+	}
+
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	numToReturn := bc.batchSize
+	if bc.limit != 0 && bc.numReturned+bc.batchSize > bc.limit {
+		numToReturn = bc.limit - bc.numReturned
+	}
+	gm := wiremessage.GetMore{
+		FullCollectionName: bc.namespace.DB + "." + bc.namespace.Collection,
+		CursorID:           bc.id,
+		NumberToReturn:     numToReturn,
+	}
+
+	err = conn.WriteWireMessage(ctx, gm)
+	if err != nil {
+		_ = conn.Close()
+		bc.err = err
+		return
+	}
+
+	response, err := conn.ReadWireMessage(ctx)
+	if err != nil {
+		_ = conn.Close()
+		bc.err = err
+		return
+	}
+
+	err = conn.Close()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	reply, ok := response.(wiremessage.Reply)
+	if !ok {
+		bc.err = errors.New("did not receive OP_REPLY response")
+		return
+	}
+
+	err = validateGetMoreReply(reply)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	bc.id = reply.CursorID
+	bc.numReturned += reply.NumberReturned
+	if bc.limit != 0 && bc.numReturned >= bc.limit {
+		err = bc.Close(ctx)
+		if err != nil {
+			bc.err = err
+			return
+		}
+	}
+
+	for _, doc := range reply.Documents {
+		bc.currentBatch = append(bc.currentBatch, doc...)
+	}
+}
+
+func validateGetMoreReply(reply wiremessage.Reply) error {
+	if int(reply.NumberReturned) != len(reply.Documents) {
+		return command.NewCommandResponseError("malformed OP_REPLY: NumberReturned does not match number of returned documents", nil)
+	}
+
+	if reply.ResponseFlags&wiremessage.CursorNotFound == wiremessage.CursorNotFound {
+		return command.QueryFailureError{
+			Message: "query failure - cursor not found",
+		}
+	}
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return command.QueryFailureError{
+			Message:  "query failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go
new file mode 100644
index 0000000..4d461d5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go
@@ -0,0 +1,627 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// BulkWriteError is an error from one operation in a bulk write.
+type BulkWriteError struct {
+	result.WriteError
+	Model WriteModel
+}
+
+// BulkWriteException is a collection of errors returned by a bulk write operation.
+type BulkWriteException struct {
+	WriteConcernError *result.WriteConcernError
+	WriteErrors       []BulkWriteError
+}
+
+func (BulkWriteException) Error() string {
+	return ""
+}
+
+type bulkWriteBatch struct {
+	models   []WriteModel
+	canRetry bool
+}
+
+// BulkWrite handles the full dispatch cycle for a bulk write operation.
+func BulkWrite(
+	ctx context.Context,
+	ns command.Namespace,
+	models []WriteModel,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	sess *session.Client,
+	writeConcern *writeconcern.WriteConcern,
+	clock *session.ClusterClock,
+	registry *bsoncodec.Registry,
+	opts ...*options.BulkWriteOptions,
+) (result.BulkWrite, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.BulkWrite{}, err
+	}
+
+	err = verifyOptions(models, ss)
+	if err != nil {
+		return result.BulkWrite{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if sess == nil && topo.SupportsSessions() {
+		sess, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.BulkWrite{}, err
+		}
+
+		defer sess.EndSession()
+	}
+
+	bwOpts := options.MergeBulkWriteOptions(opts...)
+
+	ordered := *bwOpts.Ordered
+
+	batches := createBatches(models, ordered)
+	bwRes := result.BulkWrite{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+	bwErr := BulkWriteException{
+		WriteErrors: make([]BulkWriteError, 0),
+	}
+
+	var opIndex int64 // the operation index for the upsertedIDs map
+	continueOnError := !ordered
+	for _, batch := range batches {
+		if len(batch.models) == 0 {
+			continue
+		}
+
+		batchRes, batchErr, err := runBatch(ctx, ns, topo, selector, ss, sess, clock, writeConcern, retryWrite,
+			bwOpts.BypassDocumentValidation, continueOnError, batch, registry)
+
+		mergeResults(&bwRes, batchRes, opIndex)
+		bwErr.WriteConcernError = batchErr.WriteConcernError
+		for i := range batchErr.WriteErrors {
+			batchErr.WriteErrors[i].Index = batchErr.WriteErrors[i].Index + int(opIndex)
+		}
+		bwErr.WriteErrors = append(bwErr.WriteErrors, batchErr.WriteErrors...)
+
+		if !continueOnError && (err != nil || len(batchErr.WriteErrors) > 0 || batchErr.WriteConcernError != nil) {
+			if err != nil {
+				return result.BulkWrite{}, err
+			}
+
+			return result.BulkWrite{}, bwErr
+		}
+
+		opIndex += int64(len(batch.models))
+	}
+
+	bwRes.MatchedCount -= bwRes.UpsertedCount
+	return bwRes, nil
+}
+
+func runBatch(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	batch bulkWriteBatch,
+	registry *bsoncodec.Registry,
+) (result.BulkWrite, BulkWriteException, error) {
+	batchRes := result.BulkWrite{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+	batchErr := BulkWriteException{}
+
+	var writeErrors []result.WriteError
+	switch batch.models[0].(type) {
+	case InsertOneModel:
+		res, err := runInsert(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, bypassDocValidation,
+			continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.InsertedCount = int64(res.N)
+		writeErrors = res.WriteErrors
+	case DeleteOneModel, DeleteManyModel:
+		res, err := runDelete(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.DeletedCount = int64(res.N)
+		writeErrors = res.WriteErrors
+	case ReplaceOneModel, UpdateOneModel, UpdateManyModel:
+		res, err := runUpdate(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, bypassDocValidation,
+			continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.MatchedCount = res.MatchedCount
+		batchRes.ModifiedCount = res.ModifiedCount
+		batchRes.UpsertedCount = int64(len(res.Upserted))
+		writeErrors = res.WriteErrors
+		for _, upsert := range res.Upserted {
+			batchRes.UpsertedIDs[upsert.Index] = upsert.ID
+		}
+	}
+
+	batchErr.WriteErrors = make([]BulkWriteError, 0, len(writeErrors))
+	for _, we := range writeErrors {
+		batchErr.WriteErrors = append(batchErr.WriteErrors, BulkWriteError{
+			WriteError: we,
+			Model:      batch.models[0],
+		})
+	}
+
+	return batchRes, batchErr, nil
+}
+
+func runInsert(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Insert, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+	var i int
+	for _, model := range batch.models {
+		converted := model.(InsertOneModel)
+		doc, err := interfaceToDocument(converted.Document, registry)
+		if err != nil {
+			return result.Insert{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	cmd := command.Insert{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Docs:            docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+
+	if bypassDocValidation != nil {
+		cmd.Opts = []bsonx.Elem{{"bypassDocumentValidation", bsonx.Boolean(*bypassDocValidation)}}
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return insert(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := insert(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return insert(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func runDelete(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Delete, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+	var i int
+
+	for _, model := range batch.models {
+		var doc bsonx.Doc
+		var err error
+
+		if dom, ok := model.(DeleteOneModel); ok {
+			doc, err = createDeleteDoc(dom.Filter, dom.Collation, false, registry)
+		} else if dmm, ok := model.(DeleteManyModel); ok {
+			doc, err = createDeleteDoc(dmm.Filter, dmm.Collation, true, registry)
+		}
+
+		if err != nil {
+			return result.Delete{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	cmd := command.Delete{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Deletes:         docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return delete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := delete(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return delete(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func runUpdate(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Update, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+
+	for i, model := range batch.models {
+		var doc bsonx.Doc
+		var err error
+
+		if rom, ok := model.(ReplaceOneModel); ok {
+			doc, err = createUpdateDoc(rom.Filter, rom.Replacement, options.ArrayFilters{}, false, rom.UpdateModel, false,
+				registry)
+		} else if uom, ok := model.(UpdateOneModel); ok {
+			doc, err = createUpdateDoc(uom.Filter, uom.Update, uom.ArrayFilters, uom.ArrayFiltersSet, uom.UpdateModel, false,
+				registry)
+		} else if umm, ok := model.(UpdateManyModel); ok {
+			doc, err = createUpdateDoc(umm.Filter, umm.Update, umm.ArrayFilters, umm.ArrayFiltersSet, umm.UpdateModel, true,
+				registry)
+		}
+
+		if err != nil {
+			return result.Update{}, err
+		}
+
+		docs[i] = doc
+	}
+
+	cmd := command.Update{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Docs:            docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+	if bypassDocValidation != nil {
+		// TODO this is temporary!
+		cmd.Opts = []bsonx.Elem{{"bypassDocumentValidation", bsonx.Boolean(*bypassDocValidation)}}
+		//cmd.Opts = []option.UpdateOptioner{option.OptBypassDocumentValidation(bypassDocValidation)}
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return update(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := update(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return update(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func verifyOptions(models []WriteModel, ss *topology.SelectedServer) error {
+	maxVersion := ss.Description().WireVersion.Max
+	// 3.4 is wire version 5
+	// 3.6 is wire version 6
+
+	for _, model := range models {
+		var collationSet bool
+		var afSet bool // arrayFilters
+
+		switch converted := model.(type) {
+		case DeleteOneModel:
+			collationSet = converted.Collation != nil
+		case DeleteManyModel:
+			collationSet = converted.Collation != nil
+		case ReplaceOneModel:
+			collationSet = converted.Collation != nil
+		case UpdateOneModel:
+			afSet = converted.ArrayFiltersSet
+			collationSet = converted.Collation != nil
+		case UpdateManyModel:
+			afSet = converted.ArrayFiltersSet
+			collationSet = converted.Collation != nil
+		}
+
+		if afSet && maxVersion < 6 {
+			return ErrArrayFilters
+		}
+
+		if collationSet && maxVersion < 5 {
+			return ErrCollation
+		}
+	}
+
+	return nil
+}
+
+func createBatches(models []WriteModel, ordered bool) []bulkWriteBatch {
+	if ordered {
+		return createOrderedBatches(models)
+	}
+
+	batches := make([]bulkWriteBatch, 3)
+	var i int
+	for i = 0; i < 3; i++ {
+		batches[i].canRetry = true
+	}
+
+	var numBatches int // number of batches used. can't use len(batches) because it's set to 3
+	insertInd := -1
+	updateInd := -1
+	deleteInd := -1
+
+	for _, model := range models {
+		switch converted := model.(type) {
+		case InsertOneModel:
+			if insertInd == -1 {
+				// this is the first InsertOneModel
+				insertInd = numBatches
+				numBatches++
+			}
+
+			batches[insertInd].models = append(batches[insertInd].models, model)
+		case DeleteOneModel, DeleteManyModel:
+			if deleteInd == -1 {
+				deleteInd = numBatches
+				numBatches++
+			}
+
+			batches[deleteInd].models = append(batches[deleteInd].models, model)
+			if _, ok := converted.(DeleteManyModel); ok {
+				batches[deleteInd].canRetry = false
+			}
+		case ReplaceOneModel, UpdateOneModel, UpdateManyModel:
+			if updateInd == -1 {
+				updateInd = numBatches
+				numBatches++
+			}
+
+			batches[updateInd].models = append(batches[updateInd].models, model)
+			if _, ok := converted.(UpdateManyModel); ok {
+				batches[updateInd].canRetry = false
+			}
+		}
+	}
+
+	return batches
+}
+
+func createOrderedBatches(models []WriteModel) []bulkWriteBatch {
+	var batches []bulkWriteBatch
+	var prevKind command.WriteCommandKind = -1
+	i := -1 // batch index
+
+	for _, model := range models {
+		var createNewBatch bool
+		var canRetry bool
+		var newKind command.WriteCommandKind
+
+		switch model.(type) {
+		case InsertOneModel:
+			createNewBatch = prevKind != command.InsertCommand
+			canRetry = true
+			newKind = command.InsertCommand
+		case DeleteOneModel:
+			createNewBatch = prevKind != command.DeleteCommand
+			canRetry = true
+			newKind = command.DeleteCommand
+		case DeleteManyModel:
+			createNewBatch = prevKind != command.DeleteCommand
+			newKind = command.DeleteCommand
+		case ReplaceOneModel, UpdateOneModel:
+			createNewBatch = prevKind != command.UpdateCommand
+			canRetry = true
+			newKind = command.UpdateCommand
+		case UpdateManyModel:
+			createNewBatch = prevKind != command.UpdateCommand
+			newKind = command.UpdateCommand
+		}
+
+		if createNewBatch {
+			batches = append(batches, bulkWriteBatch{
+				models:   []WriteModel{model},
+				canRetry: canRetry,
+			})
+			i++
+		} else {
+			batches[i].models = append(batches[i].models, model)
+			if !canRetry {
+				batches[i].canRetry = false // don't make it true if it was already false
+			}
+		}
+
+		prevKind = newKind
+	}
+
+	return batches
+}
+
+func shouldRetry(cmdErr error, wcErr *result.WriteConcernError) bool {
+	if cerr, ok := cmdErr.(command.Error); ok && cerr.Retryable() ||
+		wcErr != nil && command.IsWriteConcernErrorRetryable(wcErr) {
+		return true
+	}
+
+	return false
+}
+
+func createUpdateDoc(
+	filter interface{},
+	update interface{},
+	arrayFilters options.ArrayFilters,
+	arrayFiltersSet bool,
+	updateModel UpdateModel,
+	multi bool,
+	registry *bsoncodec.Registry,
+) (bsonx.Doc, error) {
+	f, err := interfaceToDocument(filter, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := interfaceToDocument(update, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	doc := bsonx.Doc{
+		{"q", bsonx.Document(f)},
+		{"u", bsonx.Document(u)},
+		{"multi", bsonx.Boolean(multi)},
+	}
+
+	if arrayFiltersSet {
+		arr, err := arrayFilters.ToArray()
+		if err != nil {
+			return nil, err
+		}
+		doc = append(doc, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+
+	if updateModel.Collation != nil {
+		doc = append(doc, bsonx.Elem{"collation", bsonx.Document(updateModel.Collation.ToDocument())})
+	}
+
+	if updateModel.UpsertSet {
+		doc = append(doc, bsonx.Elem{"upsert", bsonx.Boolean(updateModel.Upsert)})
+	}
+
+	return doc, nil
+}
+
+func createDeleteDoc(
+	filter interface{},
+	collation *options.Collation,
+	many bool,
+	registry *bsoncodec.Registry,
+) (bsonx.Doc, error) {
+	f, err := interfaceToDocument(filter, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	var limit int32 = 1
+	if many {
+		limit = 0
+	}
+
+	doc := bsonx.Doc{
+		{"q", bsonx.Document(f)},
+		{"limit", bsonx.Int32(limit)},
+	}
+
+	if collation != nil {
+		doc = append(doc, bsonx.Elem{"collation", bsonx.Document(collation.ToDocument())})
+	}
+
+	return doc, nil
+}
+
+func mergeResults(aggResult *result.BulkWrite, newResult result.BulkWrite, opIndex int64) {
+	aggResult.InsertedCount += newResult.InsertedCount
+	aggResult.MatchedCount += newResult.MatchedCount
+	aggResult.ModifiedCount += newResult.ModifiedCount
+	aggResult.DeletedCount += newResult.DeletedCount
+	aggResult.UpsertedCount += newResult.UpsertedCount
+
+	for index, upsertID := range newResult.UpsertedIDs {
+		aggResult.UpsertedIDs[index+opIndex] = upsertID
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go
new file mode 100644
index 0000000..a3b3439
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// CommitTransaction handles the full cycle dispatch and execution of committing a transaction
+// against the provided topology.
+func CommitTransaction(
+	ctx context.Context,
+	cmd command.CommitTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.TransactionResult, error) {
+	res, err := commitTransaction(ctx, cmd, topo, selector, nil)
+	if cerr, ok := err.(command.Error); ok && err != nil {
+		// Retry if appropriate
+		if cerr.Retryable() {
+			res, err = commitTransaction(ctx, cmd, topo, selector, cerr)
+			if cerr2, ok := err.(command.Error); ok && err != nil {
+				// Retry failures also get label
+				cerr2.Labels = append(cerr2.Labels, command.UnknownTransactionCommitResult)
+			} else if err != nil {
+				err = command.Error{Message: err.Error(), Labels: []string{command.UnknownTransactionCommitResult}}
+			}
+		}
+	}
+	return res, err
+}
+
+func commitTransaction(
+	ctx context.Context,
+	cmd command.CommitTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	oldErr error,
+) (result.TransactionResult, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		// If retrying server selection, return the original error if it fails
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+
+	desc := ss.Description()
+
+	if oldErr != nil && (!topo.SupportsSessions() || !description.SessionsSupported(desc.WireVersion)) {
+		// Assuming we are retrying (oldErr != nil),
+		// if server doesn't support retryable writes, return the original error
+		// Conditions for retry write support are the same as that of sessions
+		return result.TransactionResult{}, oldErr
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+	defer conn.Close()
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+
+	// Add UnknownCommitTransaction Error label where appropriate
+	if err != nil {
+		var newLabels []string
+		if cerr, ok := err.(command.Error); ok {
+			// Replace the label TransientTransactionError with UnknownTransactionCommitResult
+			// if network error, write concern shutdown, or write concern failed errors
+			hasUnknownCommitErr := false
+			for _, label := range cerr.Labels {
+				if label == command.NetworkError {
+					hasUnknownCommitErr = true
+					break
+				}
+			}
+
+			// network error, retryable error, or write concern fail/timeout (64) get the unknown label
+			if hasUnknownCommitErr || cerr.Retryable() || cerr.Code == 64 {
+				for _, label := range cerr.Labels {
+					if label != command.TransientTransactionError {
+						newLabels = append(newLabels, label)
+					}
+				}
+				newLabels = append(newLabels, command.UnknownTransactionCommitResult)
+				cerr.Labels = newLabels
+			}
+			err = cerr
+		}
+	}
+	return res, err
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go
new file mode 100644
index 0000000..7b7fba4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go
@@ -0,0 +1,93 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Count handles the full cycle dispatch and execution of a count command against the provided
+// topology.
+func Count(
+	ctx context.Context,
+	cmd command.Count,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.CountOptions,
+) (int64, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return 0, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return 0, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return 0, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	if countOpts.Limit != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"limit", bsonx.Int64(*countOpts.Limit)})
+	}
+	if countOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*countOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if countOpts.Skip != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"skip", bsonx.Int64(*countOpts.Skip)})
+	}
+	if countOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return 0, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(countOpts.Collation.ToDocument())})
+	}
+	if countOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", countOpts.Hint, registry)
+		if err != nil {
+			return 0, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go
new file mode 100644
index 0000000..b727d53
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"time"
+)
+
+// CountDocuments handles the full cycle dispatch and execution of a countDocuments command against the provided
+// topology.
+func CountDocuments(
+	ctx context.Context,
+	cmd command.CountDocuments,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.CountOptions,
+) (int64, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return 0, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return 0, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return 0, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	// ignore Skip and Limit because we already have these options in the pipeline
+	if countOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*countOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if countOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return 0, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(countOpts.Collation.ToDocument())})
+	}
+	if countOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", countOpts.Hint, registry)
+		if err != nil {
+			return 0, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go
new file mode 100644
index 0000000..48b277e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// CreateIndexes handles the full cycle dispatch and execution of a createIndexes
+// command against the provided topology.
+func CreateIndexes(
+	ctx context.Context,
+	cmd command.CreateIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.CreateIndexesOptions,
+) (result.CreateIndexes, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	desc := ss.Description()
+	if desc.WireVersion.Max < 5 && hasCollation(cmd) {
+		return result.CreateIndexes{}, ErrCollation
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+	defer conn.Close()
+
+	cio := options.MergeCreateIndexesOptions(opts...)
+	if cio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*cio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.CreateIndexes{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
+
+func hasCollation(cmd command.CreateIndexes) bool {
+	for _, ind := range cmd.Indexes {
+		if _, err := ind.Document().LookupErr("collation"); err == nil {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go
new file mode 100644
index 0000000..c622f9b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go
@@ -0,0 +1,116 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Delete handles the full cycle dispatch and execution of a delete command against the provided
+// topology.
+func Delete(
+	ctx context.Context,
+	cmd command.Delete,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.DeleteOptions,
+) (result.Delete, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Delete{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() && writeconcern.AckWrite(cmd.WriteConcern) {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Delete{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	deleteOpts := options.MergeDeleteOptions(opts...)
+	if deleteOpts.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.Delete{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(deleteOpts.Collation.ToDocument())})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return delete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := delete(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return delete(ctx, cmd, ss, cerr)
+	}
+	return res, originalErr
+}
+
+func delete(
+	ctx context.Context,
+	cmd command.Delete,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Delete, error) {
+	desc := ss.Description()
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Delete{}, oldErr
+		}
+		return result.Delete{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Delete{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go
new file mode 100644
index 0000000..669c797
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go
@@ -0,0 +1,62 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropIndexes handles the full cycle dispatch and execution of a dropIndexes
+// command against the provided topology.
+func DropIndexes(
+	ctx context.Context,
+	cmd command.DropIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.DropIndexesOptions,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	dio := options.MergeDropIndexesOptions(opts...)
+	if dio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*dio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go
new file mode 100644
index 0000000..2ec642a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go
@@ -0,0 +1,67 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+)
+
+// ErrCollation is caused if a collation is given for an invalid server version.
+var ErrCollation = errors.New("collation cannot be set for server versions < 3.4")
+
+// ErrArrayFilters is caused if array filters are given for an invalid server version.
+var ErrArrayFilters = errors.New("array filters cannot be set for server versions < 3.6")
+
+func interfaceToDocument(val interface{}, registry *bsoncodec.Registry) (bsonx.Doc, error) {
+	if val == nil {
+		return bsonx.Doc{}, nil
+	}
+
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf, val)
+	if err != nil {
+		return nil, err
+	}
+	return bsonx.ReadDoc(b)
+}
+
+func interfaceToElement(key string, i interface{}, registry *bsoncodec.Registry) (bsonx.Elem, error) {
+	switch conv := i.(type) {
+	case string:
+		return bsonx.Elem{key, bsonx.String(conv)}, nil
+	case bsonx.Doc:
+		return bsonx.Elem{key, bsonx.Document(conv)}, nil
+	default:
+		doc, err := interfaceToDocument(i, registry)
+		if err != nil {
+			return bsonx.Elem{}, err
+		}
+
+		return bsonx.Elem{key, bsonx.Document(doc)}, nil
+	}
+}
+
+func closeImplicitSession(sess *session.Client) {
+	if sess != nil && sess.SessionType == session.Implicit {
+		sess.EndSession()
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go
new file mode 100644
index 0000000..eed7390
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"time"
+)
+
+// Distinct handles the full cycle dispatch and execution of a distinct command against the provided
+// topology.
+func Distinct(
+	ctx context.Context,
+	cmd command.Distinct,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.DistinctOptions,
+) (result.Distinct, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Distinct{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	distinctOpts := options.MergeDistinctOptions(opts...)
+
+	if distinctOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*distinctOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if distinctOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return result.Distinct{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(distinctOpts.Collation.ToDocument())})
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go
new file mode 100644
index 0000000..657ba3f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropCollection handles the full cycle dispatch and execution of a dropCollection
+// command against the provided topology.
+func DropCollection(
+	ctx context.Context,
+	cmd command.DropCollection,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go
new file mode 100644
index 0000000..7c9422f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropDatabase handles the full cycle dispatch and execution of a dropDatabase
+// command against the provided topology.
+func DropDatabase(
+	ctx context.Context,
+	cmd command.DropDatabase,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go
new file mode 100644
index 0000000..46bec84
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// EndSessions handles the full cycle dispatch and execution of an endSessions command against the provided
+// topology.
+func EndSessions(
+	ctx context.Context,
+	cmd command.EndSessions,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) ([]result.EndSessions, []error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, []error{err}
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, []error{err}
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go
new file mode 100644
index 0000000..0287ca7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go
@@ -0,0 +1,517 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Find handles the full cycle dispatch and execution of a find command against the provided
+// topology.
+func Find(
+	ctx context.Context,
+	cmd command.Find,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOptions,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if desc.WireVersion.Max < 4 {
+		return legacyFind(ctx, cmd, registry, ss, conn, opts...)
+	}
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	fo := options.MergeFindOptions(opts...)
+	if fo.AllowPartialResults != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"allowPartialResults", bsonx.Boolean(*fo.AllowPartialResults)})
+	}
+	if fo.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*fo.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+
+		if fo.Limit != nil && *fo.BatchSize != 0 && *fo.Limit <= int64(*fo.BatchSize) {
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"singleBatch", bsonx.Boolean(true)})
+		}
+	}
+	if fo.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return nil, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(fo.Collation.ToDocument())})
+	}
+	if fo.Comment != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"comment", bsonx.String(*fo.Comment)})
+	}
+	if fo.CursorType != nil {
+		switch *fo.CursorType {
+		case options.Tailable:
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"tailable", bsonx.Boolean(true)})
+		case options.TailableAwait:
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"tailable", bsonx.Boolean(true)}, bsonx.Elem{"awaitData", bsonx.Boolean(true)})
+		}
+	}
+	if fo.Hint != nil {
+		hintElem, err := interfaceToElement("hint", fo.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+	if fo.Limit != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"limit", bsonx.Int64(*fo.Limit)})
+	}
+	if fo.Max != nil {
+		maxElem, err := interfaceToElement("max", fo.Max, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, maxElem)
+	}
+	if fo.MaxAwaitTime != nil {
+		// Specified as maxTimeMS on the in the getMore command and not given in initial find command.
+		cmd.CursorOpts = append(cmd.CursorOpts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*fo.MaxAwaitTime / time.Millisecond))})
+	}
+	if fo.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*fo.MaxTime / time.Millisecond))})
+	}
+	if fo.Min != nil {
+		minElem, err := interfaceToElement("min", fo.Min, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, minElem)
+	}
+	if fo.NoCursorTimeout != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"noCursorTimeout", bsonx.Boolean(*fo.NoCursorTimeout)})
+	}
+	if fo.OplogReplay != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"oplogReplay", bsonx.Boolean(*fo.OplogReplay)})
+	}
+	if fo.Projection != nil {
+		projElem, err := interfaceToElement("projection", fo.Projection, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if fo.ReturnKey != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"returnKey", bsonx.Boolean(*fo.ReturnKey)})
+	}
+	if fo.ShowRecordID != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"showRecordId", bsonx.Boolean(*fo.ShowRecordID)})
+	}
+	if fo.Skip != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"skip", bsonx.Int64(*fo.Skip)})
+	}
+	if fo.Snapshot != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"snapshot", bsonx.Boolean(*fo.Snapshot)})
+	}
+	if fo.Sort != nil {
+		sortElem, err := interfaceToElement("sort", fo.Sort, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+// legacyFind handles the dispatch and execution of a find operation against a pre-3.2 server.
+func legacyFind(
+	ctx context.Context,
+	cmd command.Find,
+	registry *bsoncodec.Registry,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+	opts ...*options.FindOptions,
+) (*BatchCursor, error) {
+	query := wiremessage.Query{
+		FullCollectionName: cmd.NS.DB + "." + cmd.NS.Collection,
+	}
+
+	fo := options.MergeFindOptions(opts...)
+	optsDoc, err := createLegacyOptionsDoc(fo, registry)
+	if err != nil {
+		return nil, err
+	}
+	if fo.Projection != nil {
+		projDoc, err := interfaceToDocument(fo.Projection, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		projRaw, err := projDoc.MarshalBSON()
+		if err != nil {
+			return nil, err
+		}
+		query.ReturnFieldsSelector = projRaw
+	}
+	if fo.Skip != nil {
+		query.NumberToSkip = int32(*fo.Skip)
+		query.SkipSet = true
+	}
+	// batch size of 1 not possible with OP_QUERY because the cursor will be closed immediately
+	if fo.BatchSize != nil && *fo.BatchSize == 1 {
+		query.NumberToReturn = 2
+	} else {
+		query.NumberToReturn = calculateNumberToReturn(fo)
+	}
+	query.Flags = calculateLegacyFlags(fo)
+
+	query.BatchSize = fo.BatchSize
+	if fo.Limit != nil {
+		i := int32(*fo.Limit)
+		query.Limit = &i
+	}
+
+	// set read preference and/or slaveOK flag
+	desc := ss.SelectedDescription()
+	if slaveOkNeeded(cmd.ReadPref, desc) {
+		query.Flags |= wiremessage.SlaveOK
+	}
+	optsDoc = addReadPref(cmd.ReadPref, desc.Server.Kind, optsDoc)
+
+	if cmd.Filter == nil {
+		cmd.Filter = bsonx.Doc{}
+	}
+
+	// filter must be wrapped in $query if other $modifiers are used
+	var queryDoc bsonx.Doc
+	if len(optsDoc) == 0 {
+		queryDoc = cmd.Filter
+	} else {
+		filterDoc := bsonx.Doc{
+			{"$query", bsonx.Document(cmd.Filter)},
+		}
+		// $query should go first
+		queryDoc = append(filterDoc, optsDoc...)
+	}
+
+	queryRaw, err := queryDoc.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query.Query = queryRaw
+
+	reply, err := roundTripQuery(ctx, query, conn)
+	if err != nil {
+		return nil, err
+	}
+
+	var cursorLimit int32
+	var cursorBatchSize int32
+	if query.Limit != nil {
+		cursorLimit = int32(*query.Limit)
+		if cursorLimit < 0 {
+			cursorLimit *= -1
+		}
+	}
+	if query.BatchSize != nil {
+		cursorBatchSize = int32(*query.BatchSize)
+	}
+
+	return NewLegacyBatchCursor(cmd.NS, reply.CursorID, reply.Documents, cursorLimit, cursorBatchSize, ss.Server)
+}
+
+func createLegacyOptionsDoc(fo *options.FindOptions, registry *bsoncodec.Registry) (bsonx.Doc, error) {
+	var optsDoc bsonx.Doc
+
+	if fo.Collation != nil {
+		return nil, ErrCollation
+	}
+	if fo.Comment != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$comment", bsonx.String(*fo.Comment)})
+	}
+	if fo.Hint != nil {
+		hintElem, err := interfaceToElement("$hint", fo.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, hintElem)
+	}
+	if fo.Max != nil {
+		maxElem, err := interfaceToElement("$max", fo.Max, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, maxElem)
+	}
+	if fo.MaxTime != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$maxTimeMS", bsonx.Int64(int64(*fo.MaxTime / time.Millisecond))})
+	}
+	if fo.Min != nil {
+		minElem, err := interfaceToElement("$min", fo.Min, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, minElem)
+	}
+	if fo.ReturnKey != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$returnKey", bsonx.Boolean(*fo.ReturnKey)})
+	}
+	if fo.ShowRecordID != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$showDiskLoc", bsonx.Boolean(*fo.ShowRecordID)})
+	}
+	if fo.Snapshot != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$snapshot", bsonx.Boolean(*fo.Snapshot)})
+	}
+	if fo.Sort != nil {
+		sortElem, err := interfaceToElement("$orderby", fo.Sort, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, sortElem)
+	}
+
+	return optsDoc, nil
+}
+
+func calculateLegacyFlags(fo *options.FindOptions) wiremessage.QueryFlag {
+	var flags wiremessage.QueryFlag
+
+	if fo.AllowPartialResults != nil {
+		flags |= wiremessage.Partial
+	}
+	if fo.CursorType != nil {
+		switch *fo.CursorType {
+		case options.Tailable:
+			flags |= wiremessage.TailableCursor
+		case options.TailableAwait:
+			flags |= wiremessage.TailableCursor
+			flags |= wiremessage.AwaitData
+		}
+	}
+	if fo.NoCursorTimeout != nil {
+		flags |= wiremessage.NoCursorTimeout
+	}
+	if fo.OplogReplay != nil {
+		flags |= wiremessage.OplogReplay
+	}
+
+	return flags
+}
+
+// calculate the number to return for the first find query
+func calculateNumberToReturn(opts *options.FindOptions) int32 {
+	var numReturn int32
+	var limit int32
+	var batchSize int32
+
+	if opts.Limit != nil {
+		limit = int32(*opts.Limit)
+	}
+	if opts.BatchSize != nil {
+		batchSize = int32(*opts.BatchSize)
+	}
+
+	if limit < 0 {
+		numReturn = limit
+	} else if limit == 0 {
+		numReturn = batchSize
+	} else if limit < batchSize {
+		numReturn = limit
+	} else {
+		numReturn = batchSize
+	}
+
+	return numReturn
+}
+
+func slaveOkNeeded(rp *readpref.ReadPref, desc description.SelectedServer) bool {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return true
+	}
+	if rp == nil {
+		// assume primary
+		return false
+	}
+
+	return rp.Mode() != readpref.PrimaryMode
+}
+
+func addReadPref(rp *readpref.ReadPref, kind description.ServerKind, query bsonx.Doc) bsonx.Doc {
+	if !readPrefNeeded(rp, kind) {
+		return query
+	}
+
+	doc := createReadPref(rp)
+	if doc == nil {
+		return query
+	}
+
+	return query.Append("$readPreference", bsonx.Document(doc))
+}
+
+func readPrefNeeded(rp *readpref.ReadPref, kind description.ServerKind) bool {
+	if kind != description.Mongos || rp == nil {
+		return false
+	}
+
+	// simple Primary or SecondaryPreferred is communicated via slaveOk to Mongos.
+	if rp.Mode() == readpref.PrimaryMode || rp.Mode() == readpref.SecondaryPreferredMode {
+		if _, ok := rp.MaxStaleness(); !ok && len(rp.TagSets()) == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+func createReadPref(rp *readpref.ReadPref) bsonx.Doc {
+	if rp == nil {
+		return nil
+	}
+
+	doc := bsonx.Doc{}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primary")})
+	case readpref.PrimaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+	case readpref.SecondaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondaryPreferred")})
+	case readpref.SecondaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondary")})
+	case readpref.NearestMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("nearest")})
+	}
+
+	sets := make([]bsonx.Val, 0, len(rp.TagSets()))
+	for _, ts := range rp.TagSets() {
+		if len(ts) == 0 {
+			continue
+		}
+		set := bsonx.Doc{}
+		for _, t := range ts {
+			set = append(set, bsonx.Elem{t.Name, bsonx.String(t.Value)})
+		}
+		sets = append(sets, bsonx.Document(set))
+	}
+	if len(sets) > 0 {
+		doc = append(doc, bsonx.Elem{"tags", bsonx.Array(sets)})
+	}
+	if d, ok := rp.MaxStaleness(); ok {
+		doc = append(doc, bsonx.Elem{"maxStalenessSeconds", bsonx.Int32(int32(d.Seconds()))})
+	}
+
+	return doc
+}
+
+func roundTripQuery(ctx context.Context, query wiremessage.Query, conn connection.Connection) (wiremessage.Reply, error) {
+	err := conn.WriteWireMessage(ctx, query)
+	if err != nil {
+		if _, ok := err.(command.Error); ok {
+			return wiremessage.Reply{}, err
+		}
+		return wiremessage.Reply{}, command.Error{
+			Message: err.Error(),
+			Labels:  []string{command.NetworkError},
+		}
+	}
+
+	wm, err := conn.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(command.Error); ok {
+			return wiremessage.Reply{}, err
+		}
+		// Connection errors are transient
+		return wiremessage.Reply{}, command.Error{
+			Message: err.Error(),
+			Labels:  []string{command.NetworkError},
+		}
+	}
+
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		return wiremessage.Reply{}, errors.New("did not receive OP_REPLY response")
+	}
+
+	err = validateOpReply(reply)
+	if err != nil {
+		return wiremessage.Reply{}, err
+	}
+
+	return reply, nil
+}
+
+func validateOpReply(reply wiremessage.Reply) error {
+	if int(reply.NumberReturned) != len(reply.Documents) {
+		return command.NewCommandResponseError(command.ReplyDocumentMismatch, nil)
+	}
+
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return command.QueryFailureError{
+			Message:  "query failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go
new file mode 100644
index 0000000..8e45e88
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go
@@ -0,0 +1,137 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndDelete handles the full cycle dispatch and execution of a FindOneAndDelete command against the provided
+// topology.
+func FindOneAndDelete(
+	ctx context.Context,
+	cmd command.FindOneAndDelete,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndDeleteOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	do := options.MergeFindOneAndDeleteOptions(opts...)
+	if do.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(do.Collation.ToDocument())})
+	}
+	if do.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMs", bsonx.Int64(int64(*do.MaxTime / time.Millisecond))})
+	}
+	if do.Projection != nil {
+		projElem, err := interfaceToElement("fields", do.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if do.Sort != nil {
+		sortElem, err := interfaceToElement("sort", do.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndDelete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndDelete(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndDelete(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndDelete(
+	ctx context.Context,
+	cmd command.FindOneAndDelete,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go
new file mode 100644
index 0000000..cde5bf5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go
@@ -0,0 +1,146 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndReplace handles the full cycle dispatch and execution of a FindOneAndReplace command against the provided
+// topology.
+func FindOneAndReplace(
+	ctx context.Context,
+	cmd command.FindOneAndReplace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndReplaceOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	ro := options.MergeFindOneAndReplaceOptions(opts...)
+	if ro.BypassDocumentValidation != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"byapssDocumentValidation", bsonx.Boolean(*ro.BypassDocumentValidation)})
+	}
+	if ro.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(ro.Collation.ToDocument())})
+	}
+	if ro.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*ro.MaxTime / time.Millisecond))})
+	}
+	if ro.Projection != nil {
+		maxElem, err := interfaceToElement("fields", ro.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, maxElem)
+	}
+	if ro.ReturnDocument != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"new", bsonx.Boolean(*ro.ReturnDocument == options.After)})
+	}
+	if ro.Sort != nil {
+		sortElem, err := interfaceToElement("sort", ro.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+	if ro.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*ro.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndReplace(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndReplace(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndReplace(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndReplace(
+	ctx context.Context,
+	cmd command.FindOneAndReplace,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go
new file mode 100644
index 0000000..d6c8e67
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndUpdate handles the full cycle dispatch and execution of a FindOneAndUpdate command against the provided
+// topology.
+func FindOneAndUpdate(
+	ctx context.Context,
+	cmd command.FindOneAndUpdate,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndUpdateOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	uo := options.MergeFindOneAndUpdateOptions(opts...)
+	if uo.ArrayFilters != nil {
+		arr, err := uo.ArrayFilters.ToArray()
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+	if uo.BypassDocumentValidation != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*uo.BypassDocumentValidation)})
+	}
+	if uo.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(uo.Collation.ToDocument())})
+	}
+	if uo.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*uo.MaxTime / time.Millisecond))})
+	}
+	if uo.Projection != nil {
+		projElem, err := interfaceToElement("fields", uo.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if uo.ReturnDocument != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"new", bsonx.Boolean(*uo.ReturnDocument == options.After)})
+	}
+	if uo.Sort != nil {
+		sortElem, err := interfaceToElement("sort", uo.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+	if uo.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*uo.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndUpdate(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndUpdate(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndUpdate(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndUpdate(
+	ctx context.Context,
+	cmd command.FindOneAndUpdate,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go
new file mode 100644
index 0000000..214774c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go
@@ -0,0 +1,118 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Insert handles the full cycle dispatch and execution of an insert command against the provided
+// topology.
+func Insert(
+	ctx context.Context,
+	cmd command.Insert,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.InsertManyOptions,
+) (result.Insert, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Insert{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Insert{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	insertOpts := options.MergeInsertManyOptions(opts...)
+
+	if insertOpts.BypassDocumentValidation != nil && ss.Description().WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*insertOpts.BypassDocumentValidation)})
+	}
+	if insertOpts.Ordered != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"ordered", bsonx.Boolean(*insertOpts.Ordered)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return insert(ctx, cmd, ss, nil)
+	}
+
+	// TODO figure out best place to put retry write.  Command shouldn't have to know about this field.
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := insert(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return insert(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func insert(
+	ctx context.Context,
+	cmd command.Insert,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Insert, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Insert{}, oldErr
+		}
+		return result.Insert{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Insert{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go
new file mode 100644
index 0000000..574d027
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// KillCursors handles the full cycle dispatch and execution of an aggregate command against the provided
+// topology.
+func KillCursors(
+	ctx context.Context,
+	cmd command.KillCursors,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.KillCursors, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+	defer conn.Close()
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go
new file mode 100644
index 0000000..c52df2d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go
@@ -0,0 +1,133 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrFilterType is thrown when a non-string filter is specified.
+var ErrFilterType = errors.New("filter must be a string")
+
+// ListCollections handles the full cycle dispatch and execution of a listCollections command against the provided
+// topology.
+func ListCollections(
+	ctx context.Context,
+	cmd command.ListCollections,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListCollectionsOptions,
+) (*ListCollectionsBatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if ss.Description().WireVersion.Max < 3 {
+		return legacyListCollections(ctx, cmd, ss, conn)
+	}
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	lc := options.MergeListCollectionsOptions(opts...)
+	if lc.NameOnly != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"nameOnly", bsonx.Boolean(*lc.NameOnly)})
+	}
+
+	res, err := cmd.RoundTrip(ctx, ss.Description(), conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	batchCursor, err := NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewListCollectionsBatchCursor(batchCursor)
+}
+
+func legacyListCollections(
+	ctx context.Context,
+	cmd command.ListCollections,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+) (*ListCollectionsBatchCursor, error) {
+	filter, err := transformFilter(cmd.Filter, cmd.DB)
+	if err != nil {
+		return nil, err
+	}
+
+	findCmd := command.Find{
+		NS:       command.NewNamespace(cmd.DB, "system.namespaces"),
+		ReadPref: cmd.ReadPref,
+		Filter:   filter,
+	}
+
+	// don't need registry because it's used to create BSON docs for find options that don't exist in this case
+	batchCursor, err := legacyFind(ctx, findCmd, nil, ss, conn)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewLegacyListCollectionsBatchCursor(batchCursor)
+}
+
+// modify the user-supplied filter to prefix the "name" field with the database name.
+// returns the original filter if the name field is not present or a copy with the modified name field if it is
+func transformFilter(filter bsonx.Doc, dbName string) (bsonx.Doc, error) {
+	if filter == nil {
+		return filter, nil
+	}
+
+	if nameVal, err := filter.LookupErr("name"); err == nil {
+		name, ok := nameVal.StringValueOK()
+		if !ok {
+			return nil, ErrFilterType
+		}
+
+		filterCopy := filter.Copy()
+		filterCopy.Set("name", bsonx.String(dbName+"."+name))
+		return filterCopy, nil
+	}
+	return filter, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go
new file mode 100644
index 0000000..11fa77d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go
@@ -0,0 +1,121 @@
+package driver
+
+import (
+	"context"
+	"errors"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ListCollectionsBatchCursor is a special batch cursor returned from ListCollections that properly
+// handles current and legacy ListCollections operations.
+type ListCollectionsBatchCursor struct {
+	legacy       bool
+	bc           *BatchCursor
+	currentBatch []byte
+	err          error
+}
+
+// NewListCollectionsBatchCursor creates a new non-legacy ListCollectionsCursor.
+func NewListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) {
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &ListCollectionsBatchCursor{bc: bc}, nil
+}
+
+// NewLegacyListCollectionsBatchCursor creates a new legacy ListCollectionsCursor.
+func NewLegacyListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) {
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &ListCollectionsBatchCursor{legacy: true, bc: bc}, nil
+}
+
+// ID returns the cursor ID for this batch cursor.
+func (lcbc *ListCollectionsBatchCursor) ID() int64 {
+	return lcbc.bc.ID()
+}
+
+// Next indicates if there is another batch available. Returning false does not necessarily indicate
+// that the cursor is closed. This method will return false when an empty batch is returned.
+//
+// If Next returns true, there is a valid batch of documents available. If Next returns false, there
+// is not a valid batch of documents available.
+func (lcbc *ListCollectionsBatchCursor) Next(ctx context.Context) bool {
+	if !lcbc.bc.Next(ctx) {
+		return false
+	}
+
+	if !lcbc.legacy {
+		lcbc.currentBatch = lcbc.bc.currentBatch
+		return true
+	}
+
+	batch := lcbc.bc.currentBatch
+	lcbc.currentBatch = lcbc.currentBatch[:0]
+	var doc bsoncore.Document
+	var ok bool
+	for {
+		doc, batch, ok = bsoncore.ReadDocument(batch)
+		if !ok {
+			break
+		}
+
+		doc, lcbc.err = lcbc.projectNameElement(doc)
+		if lcbc.err != nil {
+			return false
+		}
+		lcbc.currentBatch = append(lcbc.currentBatch, doc...)
+	}
+
+	return true
+}
+
+// Batch will append the current batch of documents to dst. RequiredBytes can be called to determine
+// the length of the current batch of documents.
+//
+// If there is no batch available, this method does nothing.
+func (lcbc *ListCollectionsBatchCursor) Batch(dst []byte) []byte {
+	return append(dst, lcbc.currentBatch...)
+}
+
+// RequiredBytes returns the number of bytes required for the current batch.
+func (lcbc *ListCollectionsBatchCursor) RequiredBytes() int { return len(lcbc.currentBatch) }
+
+// Err returns the latest error encountered.
+func (lcbc *ListCollectionsBatchCursor) Err() error {
+	if lcbc.err != nil {
+		return lcbc.err
+	}
+	return lcbc.bc.Err()
+}
+
+// Close closes this batch cursor.
+func (lcbc *ListCollectionsBatchCursor) Close(ctx context.Context) error { return lcbc.bc.Close(ctx) }
+
+// project out the database name for a legacy server
+func (*ListCollectionsBatchCursor) projectNameElement(rawDoc bsoncore.Document) (bsoncore.Document, error) {
+	elems, err := rawDoc.Elements()
+	if err != nil {
+		return nil, err
+	}
+
+	var filteredElems []byte
+	for _, elem := range elems {
+		key := elem.Key()
+		if key != "name" {
+			filteredElems = append(filteredElems, elem...)
+			continue
+		}
+
+		name := elem.Value().StringValue()
+		collName := name[strings.Index(name, ".")+1:]
+		filteredElems = bsoncore.AppendStringElement(filteredElems, "name", collName)
+	}
+
+	var filteredDoc []byte
+	filteredDoc = bsoncore.BuildDocument(filteredDoc, filteredElems)
+	return filteredDoc, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go
new file mode 100644
index 0000000..608269b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// ListDatabases handles the full cycle dispatch and execution of a listDatabases command against the provided
+// topology.
+func ListDatabases(
+	ctx context.Context,
+	cmd command.ListDatabases,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListDatabasesOptions,
+) (result.ListDatabases, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.ListDatabases{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	ld := options.MergeListDatabasesOptions(opts...)
+	if ld.NameOnly != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"nameOnly", bsonx.Boolean(*ld.NameOnly)})
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go
new file mode 100644
index 0000000..d40ef96
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go
@@ -0,0 +1,105 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ListIndexes handles the full cycle dispatch and execution of a listIndexes command against the provided
+// topology.
+func ListIndexes(
+	ctx context.Context,
+	cmd command.ListIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListIndexesOptions,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if ss.Description().WireVersion.Max < 3 {
+		return legacyListIndexes(ctx, cmd, ss, conn, opts...)
+	}
+
+	lio := options.MergeListIndexesOptions(opts...)
+	if lio.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*lio.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+	}
+	if lio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*lio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	res, err := cmd.RoundTrip(ctx, ss.Description(), conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+func legacyListIndexes(
+	ctx context.Context,
+	cmd command.ListIndexes,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+	opts ...*options.ListIndexesOptions,
+) (*BatchCursor, error) {
+	lio := options.MergeListIndexesOptions(opts...)
+	ns := cmd.NS.DB + "." + cmd.NS.Collection
+
+	findCmd := command.Find{
+		NS: command.NewNamespace(cmd.NS.DB, "system.indexes"),
+		Filter: bsonx.Doc{
+			{"ns", bsonx.String(ns)},
+		},
+	}
+
+	findOpts := options.Find()
+	if lio.BatchSize != nil {
+		findOpts.SetBatchSize(*lio.BatchSize)
+	}
+	if lio.MaxTime != nil {
+		findOpts.SetMaxTime(*lio.MaxTime)
+	}
+
+	return legacyFind(ctx, findCmd, nil, ss, conn, findOpts)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go
new file mode 100644
index 0000000..17ceb60
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go
@@ -0,0 +1,81 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+)
+
+// WriteModel is the interface satisfied by all models for bulk writes.
+type WriteModel interface {
+	writeModel()
+}
+
+// InsertOneModel is the write model for insert operations.
+type InsertOneModel struct {
+	Document interface{}
+}
+
+func (InsertOneModel) writeModel() {}
+
+// DeleteOneModel is the write model for delete operations.
+type DeleteOneModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+func (DeleteOneModel) writeModel() {}
+
+// DeleteManyModel is the write model for deleteMany operations.
+type DeleteManyModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+func (DeleteManyModel) writeModel() {}
+
+// UpdateModel contains the fields that are shared between the ReplaceOneModel, UpdateOneModel, and UpdateManyModel types
+type UpdateModel struct {
+	Collation *options.Collation
+	Upsert    bool
+	UpsertSet bool
+}
+
+// ReplaceOneModel is the write model for replace operations.
+type ReplaceOneModel struct {
+	Filter      interface{}
+	Replacement interface{}
+	UpdateModel
+}
+
+func (ReplaceOneModel) writeModel() {}
+
+// UpdateOneModel is the write model for update operations.
+type UpdateOneModel struct {
+	Filter interface{}
+	Update interface{}
+	// default is to not send a value. for servers < 3.6, error raised if value given. for unack writes using opcodes,
+	// error raised if value given
+	ArrayFilters    options.ArrayFilters
+	ArrayFiltersSet bool
+	UpdateModel
+}
+
+func (UpdateOneModel) writeModel() {}
+
+// UpdateManyModel is the write model for updateMany operations.
+type UpdateManyModel struct {
+	Filter interface{}
+	Update interface{}
+	// default is to not send a value. for servers < 3.6, error raised if value given. for unack writes using opcodes,
+	// error raised if value given
+	ArrayFilters    options.ArrayFilters
+	ArrayFiltersSet bool
+	UpdateModel
+}
+
+func (UpdateManyModel) writeModel() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go
new file mode 100644
index 0000000..e31877c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go
@@ -0,0 +1,85 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Read handles the full cycle dispatch and execution of a read command against the provided
+// topology.
+func Read(
+	ctx context.Context,
+	cmd command.Read,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if cmd.Session != nil && cmd.Session.TransactionRunning() {
+		// When command.read is directly used, this implies an operation level
+		// read preference, so we do not override it with the transaction read pref.
+		err = checkTransactionReadPref(cmd.ReadPref)
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
+
+func getReadPrefBasedOnTransaction(current *readpref.ReadPref, sess *session.Client) (*readpref.ReadPref, error) {
+	if sess != nil && sess.TransactionRunning() {
+		// Transaction's read preference always takes priority
+		current = sess.CurrentRp
+		err := checkTransactionReadPref(current)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return current, nil
+}
+
+func checkTransactionReadPref(pref *readpref.ReadPref) error {
+	if pref != nil && (pref.Mode() == readpref.SecondaryMode ||
+		pref.Mode() == readpref.SecondaryPreferredMode ||
+		pref.Mode() == readpref.NearestMode ||
+		pref.Mode() == readpref.PrimaryPreferredMode) {
+		return command.ErrNonPrimaryRP
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go
new file mode 100644
index 0000000..fdc792c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go
@@ -0,0 +1,69 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ReadCursor handles the full dispatch cycle and execution of a read command against the provided topology and returns
+// a Cursor over the resulting BSON reader.
+func ReadCursor(
+	ctx context.Context,
+	cmd command.Read,
+	topo *topology.Topology,
+	selecctor description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	cursorOpts ...bsonx.Elem,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selecctor)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		if cmd.Session != nil && cmd.Session.SessionType == session.Implicit {
+			cmd.Session.EndSession()
+		}
+		return nil, err
+	}
+
+	cursor, err := NewBatchCursor(bsoncore.Document(rdr), cmd.Session, cmd.Clock, ss.Server, cursorOpts...)
+	if err != nil {
+		if cmd.Session != nil && cmd.Session.SessionType == session.Implicit {
+			cmd.Session.EndSession()
+		}
+		return nil, err
+	}
+
+	return cursor, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go
new file mode 100644
index 0000000..405d507
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go
@@ -0,0 +1,347 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+)
+
+// ErrSessionEnded is returned when a client session is used after a call to endSession().
+var ErrSessionEnded = errors.New("ended session was used")
+
+// ErrNoTransactStarted is returned if a transaction operation is called when no transaction has started.
+var ErrNoTransactStarted = errors.New("no transaction started")
+
+// ErrTransactInProgress is returned if startTransaction() is called when a transaction is in progress.
+var ErrTransactInProgress = errors.New("transaction already in progress")
+
+// ErrAbortAfterCommit is returned when abort is called after a commit.
+var ErrAbortAfterCommit = errors.New("cannot call abortTransaction after calling commitTransaction")
+
+// ErrAbortTwice is returned if abort is called after transaction is already aborted.
+var ErrAbortTwice = errors.New("cannot call abortTransaction twice")
+
+// ErrCommitAfterAbort is returned if commit is called after an abort.
+var ErrCommitAfterAbort = errors.New("cannot call commitTransaction after calling abortTransaction")
+
+// ErrUnackWCUnsupported is returned if an unacknowledged write concern is supported for a transaciton.
+var ErrUnackWCUnsupported = errors.New("transactions do not support unacknowledged write concerns")
+
+// Type describes the type of the session
+type Type uint8
+
+// These constants are the valid types for a client session.
+const (
+	Explicit Type = iota
+	Implicit
+)
+
+// State indicates the state of the FSM.
+type state uint8
+
+// Client Session states
+const (
+	None state = iota
+	Starting
+	InProgress
+	Committed
+	Aborted
+)
+
+// Client is a session for clients to run commands.
+type Client struct {
+	*Server
+	ClientID       uuid.UUID
+	ClusterTime    bson.Raw
+	Consistent     bool // causal consistency
+	OperationTime  *primitive.Timestamp
+	SessionType    Type
+	Terminated     bool
+	RetryingCommit bool
+	Committing     bool
+	Aborting       bool
+	RetryWrite     bool
+
+	// options for the current transaction
+	// most recently set by transactionopt
+	CurrentRc *readconcern.ReadConcern
+	CurrentRp *readpref.ReadPref
+	CurrentWc *writeconcern.WriteConcern
+
+	// default transaction options
+	transactionRc *readconcern.ReadConcern
+	transactionRp *readpref.ReadPref
+	transactionWc *writeconcern.WriteConcern
+
+	pool  *Pool
+	state state
+}
+
+func getClusterTime(clusterTime bson.Raw) (uint32, uint32) {
+	if clusterTime == nil {
+		return 0, 0
+	}
+
+	clusterTimeVal, err := clusterTime.LookupErr("$clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	timestampVal, err := bson.Raw(clusterTimeVal.Value).LookupErr("clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	return timestampVal.Timestamp()
+}
+
+// MaxClusterTime compares 2 clusterTime documents and returns the document representing the highest cluster time.
+func MaxClusterTime(ct1, ct2 bson.Raw) bson.Raw {
+	epoch1, ord1 := getClusterTime(ct1)
+	epoch2, ord2 := getClusterTime(ct2)
+
+	if epoch1 > epoch2 {
+		return ct1
+	} else if epoch1 < epoch2 {
+		return ct2
+	} else if ord1 > ord2 {
+		return ct1
+	} else if ord1 < ord2 {
+		return ct2
+	}
+
+	return ct1
+}
+
+// NewClientSession creates a Client.
+func NewClientSession(pool *Pool, clientID uuid.UUID, sessionType Type, opts ...*ClientOptions) (*Client, error) {
+	c := &Client{
+		Consistent:  true, // set default
+		ClientID:    clientID,
+		SessionType: sessionType,
+		pool:        pool,
+	}
+
+	mergedOpts := mergeClientOptions(opts...)
+	if mergedOpts.CausalConsistency != nil {
+		c.Consistent = *mergedOpts.CausalConsistency
+	}
+	if mergedOpts.DefaultReadPreference != nil {
+		c.transactionRp = mergedOpts.DefaultReadPreference
+	}
+	if mergedOpts.DefaultReadConcern != nil {
+		c.transactionRc = mergedOpts.DefaultReadConcern
+	}
+	if mergedOpts.DefaultWriteConcern != nil {
+		c.transactionWc = mergedOpts.DefaultWriteConcern
+	}
+
+	servSess, err := pool.GetSession()
+	if err != nil {
+		return nil, err
+	}
+
+	c.Server = servSess
+
+	return c, nil
+}
+
+// AdvanceClusterTime updates the session's cluster time.
+func (c *Client) AdvanceClusterTime(clusterTime bson.Raw) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.ClusterTime = MaxClusterTime(c.ClusterTime, clusterTime)
+	return nil
+}
+
+// AdvanceOperationTime updates the session's operation time.
+func (c *Client) AdvanceOperationTime(opTime *primitive.Timestamp) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+
+	if c.OperationTime == nil {
+		c.OperationTime = opTime
+		return nil
+	}
+
+	if opTime.T > c.OperationTime.T {
+		c.OperationTime = opTime
+	} else if (opTime.T == c.OperationTime.T) && (opTime.I > c.OperationTime.I) {
+		c.OperationTime = opTime
+	}
+
+	return nil
+}
+
+// UpdateUseTime updates the session's last used time.
+// Must be called whenver this session is used to send a command to the server.
+func (c *Client) UpdateUseTime() error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.updateUseTime()
+	return nil
+}
+
+// EndSession ends the session.
+func (c *Client) EndSession() {
+	if c.Terminated {
+		return
+	}
+
+	c.Terminated = true
+	c.pool.ReturnSession(c.Server)
+
+	return
+}
+
+// TransactionInProgress returns true if the client session is in an active transaction.
+func (c *Client) TransactionInProgress() bool {
+	return c.state == InProgress
+}
+
+// TransactionStarting returns true if the client session is starting a transaction.
+func (c *Client) TransactionStarting() bool {
+	return c.state == Starting
+}
+
+// TransactionRunning returns true if the client session has started the transaction
+// and it hasn't been committed or aborted
+func (c *Client) TransactionRunning() bool {
+	return c.state == Starting || c.state == InProgress
+}
+
+// TransactionCommitted returns true of the client session just committed a transaciton.
+func (c *Client) TransactionCommitted() bool {
+	return c.state == Committed
+}
+
+// CheckStartTransaction checks to see if allowed to start transaction and returns
+// an error if not allowed
+func (c *Client) CheckStartTransaction() error {
+	if c.state == InProgress || c.state == Starting {
+		return ErrTransactInProgress
+	}
+	return nil
+}
+
+// StartTransaction initializes the transaction options and advances the state machine.
+// It does not contact the server to start the transaction.
+func (c *Client) StartTransaction(opts *TransactionOptions) error {
+	err := c.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	c.IncrementTxnNumber()
+	c.RetryingCommit = false
+
+	if opts != nil {
+		c.CurrentRc = opts.ReadConcern
+		c.CurrentRp = opts.ReadPreference
+		c.CurrentWc = opts.WriteConcern
+	}
+
+	if c.CurrentRc == nil {
+		c.CurrentRc = c.transactionRc
+	}
+
+	if c.CurrentRp == nil {
+		c.CurrentRp = c.transactionRp
+	}
+
+	if c.CurrentWc == nil {
+		c.CurrentWc = c.transactionWc
+	}
+
+	if !writeconcern.AckWrite(c.CurrentWc) {
+		c.clearTransactionOpts()
+		return ErrUnackWCUnsupported
+	}
+
+	c.state = Starting
+	return nil
+}
+
+// CheckCommitTransaction checks to see if allowed to commit transaction and returns
+// an error if not allowed.
+func (c *Client) CheckCommitTransaction() error {
+	if c.state == None {
+		return ErrNoTransactStarted
+	} else if c.state == Aborted {
+		return ErrCommitAfterAbort
+	}
+	return nil
+}
+
+// CommitTransaction updates the state for a successfully committed transaction and returns
+// an error if not permissible.  It does not actually perform the commit.
+func (c *Client) CommitTransaction() error {
+	err := c.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+	c.state = Committed
+	return nil
+}
+
+// CheckAbortTransaction checks to see if allowed to abort transaction and returns
+// an error if not allowed.
+func (c *Client) CheckAbortTransaction() error {
+	if c.state == None {
+		return ErrNoTransactStarted
+	} else if c.state == Committed {
+		return ErrAbortAfterCommit
+	} else if c.state == Aborted {
+		return ErrAbortTwice
+	}
+	return nil
+}
+
+// AbortTransaction updates the state for a successfully committed transaction and returns
+// an error if not permissible.  It does not actually perform the abort.
+func (c *Client) AbortTransaction() error {
+	err := c.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+	c.state = Aborted
+	c.clearTransactionOpts()
+	return nil
+}
+
+// ApplyCommand advances the state machine upon command execution.
+func (c *Client) ApplyCommand() {
+	if c.Committing {
+		// Do not change state if committing after already committed
+		return
+	}
+	if c.state == Starting {
+		c.state = InProgress
+	} else if c.state == Committed || c.state == Aborted {
+		c.clearTransactionOpts()
+		c.state = None
+	}
+}
+
+func (c *Client) clearTransactionOpts() {
+	c.RetryingCommit = false
+	c.Aborting = false
+	c.Committing = false
+	c.CurrentWc = nil
+	c.CurrentRp = nil
+	c.CurrentRc = nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go
new file mode 100644
index 0000000..85376bf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// ClusterClock represents a logical clock for keeping track of cluster time.
+type ClusterClock struct {
+	clusterTime bson.Raw
+	lock        sync.Mutex
+}
+
+// GetClusterTime returns the cluster's current time.
+func (cc *ClusterClock) GetClusterTime() bson.Raw {
+	var ct bson.Raw
+	cc.lock.Lock()
+	ct = cc.clusterTime
+	cc.lock.Unlock()
+
+	return ct
+}
+
+// AdvanceClusterTime updates the cluster's current time.
+func (cc *ClusterClock) AdvanceClusterTime(clusterTime bson.Raw) {
+	cc.lock.Lock()
+	cc.clusterTime = MaxClusterTime(cc.clusterTime, clusterTime)
+	cc.lock.Unlock()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go
new file mode 100644
index 0000000..ac10518
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// ClientOptions represents all possible options for creating a client session.
+type ClientOptions struct {
+	CausalConsistency     *bool
+	DefaultReadConcern    *readconcern.ReadConcern
+	DefaultWriteConcern   *writeconcern.WriteConcern
+	DefaultReadPreference *readpref.ReadPref
+}
+
+// TransactionOptions represents all possible options for starting a transaction in a session.
+type TransactionOptions struct {
+	ReadConcern    *readconcern.ReadConcern
+	WriteConcern   *writeconcern.WriteConcern
+	ReadPreference *readpref.ReadPref
+}
+
+func mergeClientOptions(opts ...*ClientOptions) *ClientOptions {
+	c := &ClientOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			c.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			c.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			c.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			c.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go
new file mode 100644
index 0000000..06aa51f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"time"
+
+	"crypto/rand"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+)
+
+var rander = rand.Reader
+
+// Server is an open session with the server.
+type Server struct {
+	SessionID bsonx.Doc
+	TxnNumber int64
+	LastUsed  time.Time
+}
+
+// returns whether or not a session has expired given a timeout in minutes
+// a session is considered expired if it has less than 1 minute left before becoming stale
+func (ss *Server) expired(timeoutMinutes uint32) bool {
+	if timeoutMinutes <= 0 {
+		return true
+	}
+	timeUnused := time.Since(ss.LastUsed).Minutes()
+	return timeUnused > float64(timeoutMinutes-1)
+}
+
+// update the last used time for this session.
+// must be called whenever this server session is used to send a command to the server.
+func (ss *Server) updateUseTime() {
+	ss.LastUsed = time.Now()
+}
+
+func newServerSession() (*Server, error) {
+	id, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+
+	idDoc := bsonx.Doc{{"id", bsonx.Binary(UUIDSubtype, id[:])}}
+
+	return &Server{
+		SessionID: idDoc,
+		LastUsed:  time.Now(),
+	}, nil
+}
+
+// IncrementTxnNumber increments the transaction number.
+func (ss *Server) IncrementTxnNumber() {
+	ss.TxnNumber++
+}
+
+// UUIDSubtype is the BSON binary subtype that a UUID should be encoded as
+const UUIDSubtype byte = 4
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go
new file mode 100644
index 0000000..af749da
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go
@@ -0,0 +1,175 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Node represents a server session in a linked list
+type Node struct {
+	*Server
+	next *Node
+	prev *Node
+}
+
+// Pool is a pool of server sessions that can be reused.
+type Pool struct {
+	descChan <-chan description.Topology
+	head     *Node
+	tail     *Node
+	timeout  uint32
+	mutex    sync.Mutex // mutex to protect list and sessionTimeout
+
+	checkedOut int // number of sessions checked out of pool
+}
+
+func (p *Pool) createServerSession() (*Server, error) {
+	s, err := newServerSession()
+	if err != nil {
+		return nil, err
+	}
+
+	p.checkedOut++
+	return s, nil
+}
+
+// NewPool creates a new server session pool
+func NewPool(descChan <-chan description.Topology) *Pool {
+	p := &Pool{
+		descChan: descChan,
+	}
+
+	return p
+}
+
+// assumes caller has mutex to protect the pool
+func (p *Pool) updateTimeout() {
+	select {
+	case newDesc := <-p.descChan:
+		p.timeout = newDesc.SessionTimeoutMinutes
+	default:
+		// no new description waiting
+	}
+}
+
+// GetSession retrieves an unexpired session from the pool.
+func (p *Pool) GetSession() (*Server, error) {
+	p.mutex.Lock() // prevent changing the linked list while seeing if sessions have expired
+	defer p.mutex.Unlock()
+
+	// empty pool
+	if p.head == nil && p.tail == nil {
+		return p.createServerSession()
+	}
+
+	p.updateTimeout()
+	for p.head != nil {
+		// pull session from head of queue and return if it is valid for at least 1 more minute
+		if p.head.expired(p.timeout) {
+			p.head = p.head.next
+			continue
+		}
+
+		// found unexpired session
+		session := p.head.Server
+		if p.head.next != nil {
+			p.head.next.prev = nil
+		}
+		if p.tail == p.head {
+			p.tail = nil
+			p.head = nil
+		} else {
+			p.head = p.head.next
+		}
+
+		p.checkedOut++
+		return session, nil
+	}
+
+	// no valid session found
+	p.tail = nil // empty list
+	return p.createServerSession()
+}
+
+// ReturnSession returns a session to the pool if it has not expired.
+func (p *Pool) ReturnSession(ss *Server) {
+	if ss == nil {
+		return
+	}
+
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	p.checkedOut--
+	p.updateTimeout()
+	// check sessions at end of queue for expired
+	// stop checking after hitting the first valid session
+	for p.tail != nil && p.tail.expired(p.timeout) {
+		if p.tail.prev != nil {
+			p.tail.prev.next = nil
+		}
+		p.tail = p.tail.prev
+	}
+
+	// session expired
+	if ss.expired(p.timeout) {
+		return
+	}
+
+	newNode := &Node{
+		Server: ss,
+		next:   nil,
+		prev:   nil,
+	}
+
+	// empty list
+	if p.tail == nil {
+		p.head = newNode
+		p.tail = newNode
+		return
+	}
+
+	// at least 1 valid session in list
+	newNode.next = p.head
+	p.head.prev = newNode
+	p.head = newNode
+}
+
+// IDSlice returns a slice of session IDs for each session in the pool
+func (p *Pool) IDSlice() []bsonx.Doc {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	ids := []bsonx.Doc{}
+	for node := p.head; node != nil; node = node.next {
+		ids = append(ids, node.SessionID)
+	}
+
+	return ids
+}
+
+// String implements the Stringer interface
+func (p *Pool) String() string {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	s := ""
+	for head := p.head; head != nil; head = head.next {
+		s += head.SessionID.String() + "\n"
+	}
+
+	return s
+}
+
+// CheckedOut returns number of sessions checked out from pool.
+func (p *Pool) CheckedOut() int {
+	return p.checkedOut
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go
new file mode 100644
index 0000000..d59f5b5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go
@@ -0,0 +1,96 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"net"
+
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// sconn is a wrapper around a connection.Connection. This type is returned by
+// a Server so that it can track network errors and when a non-timeout network
+// error is returned, the pool on the server can be cleared.
+type sconn struct {
+	connection.Connection
+	s  *Server
+	id uint64
+}
+
+var notMasterCodes = []int32{10107, 13435}
+var recoveringCodes = []int32{11600, 11602, 13436, 189, 91}
+
+func (sc *sconn) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	wm, err := sc.Connection.ReadWireMessage(ctx)
+	if err != nil {
+		sc.processErr(err)
+	} else {
+		e := command.DecodeError(wm)
+		sc.processErr(e)
+	}
+	return wm, err
+}
+
+func (sc *sconn) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	err := sc.Connection.WriteWireMessage(ctx, wm)
+	sc.processErr(err)
+	return err
+}
+
+func (sc *sconn) processErr(err error) {
+	// TODO(GODRIVER-524) handle the rest of sdam error handling
+	// Invalidate server description if not master or node recovering error occurs
+	if cerr, ok := err.(command.Error); ok && (isRecoveringError(cerr) || isNotMasterError(cerr)) {
+		desc := sc.s.Description()
+		desc.Kind = description.Unknown
+		desc.LastError = err
+		// updates description to unknown
+		sc.s.updateDescription(desc, false)
+	}
+
+	ne, ok := err.(connection.NetworkError)
+	if !ok {
+		return
+	}
+
+	if netErr, ok := ne.Wrapped.(net.Error); ok && netErr.Timeout() {
+		return
+	}
+	if ne.Wrapped == context.Canceled || ne.Wrapped == context.DeadlineExceeded {
+		return
+	}
+
+	desc := sc.s.Description()
+	desc.Kind = description.Unknown
+	desc.LastError = err
+	// updates description to unknown
+	sc.s.updateDescription(desc, false)
+}
+
+func isRecoveringError(err command.Error) bool {
+	for _, c := range recoveringCodes {
+		if c == err.Code {
+			return true
+		}
+	}
+	return strings.Contains(err.Error(), "node is recovering")
+}
+
+func isNotMasterError(err command.Error) bool {
+	for _, c := range notMasterCodes {
+		if c == err.Code {
+			return true
+		}
+	}
+	return strings.Contains(err.Error(), "not master")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go
new file mode 100644
index 0000000..3682b57
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go
@@ -0,0 +1,350 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+var supportedWireVersions = description.NewVersionRange(2, 6)
+var minSupportedMongoDBVersion = "2.6"
+
+type fsm struct {
+	description.Topology
+	SetName       string
+	maxElectionID primitive.ObjectID
+	maxSetVersion uint32
+}
+
+func newFSM() *fsm {
+	return new(fsm)
+}
+
+// apply should operate on immutable TopologyDescriptions and Descriptions. This way we don't have to
+// lock for the entire time we're applying server description.
+func (f *fsm) apply(s description.Server) (description.Topology, error) {
+
+	newServers := make([]description.Server, len(f.Servers))
+	copy(newServers, f.Servers)
+
+	oldMinutes := f.SessionTimeoutMinutes
+	f.Topology = description.Topology{
+		Kind:    f.Kind,
+		Servers: newServers,
+	}
+
+	// For data bearing servers, set SessionTimeoutMinutes to the lowest among them
+	if oldMinutes == 0 {
+		// If timeout currently 0, check all servers to see if any still don't have a timeout
+		// If they all have timeout, pick the lowest.
+		timeout := s.SessionTimeoutMinutes
+		for _, server := range f.Servers {
+			if server.DataBearing() && server.SessionTimeoutMinutes < timeout {
+				timeout = server.SessionTimeoutMinutes
+			}
+		}
+		f.SessionTimeoutMinutes = timeout
+	} else {
+		if s.DataBearing() && oldMinutes > s.SessionTimeoutMinutes {
+			f.SessionTimeoutMinutes = s.SessionTimeoutMinutes
+		} else {
+			f.SessionTimeoutMinutes = oldMinutes
+		}
+	}
+
+	if _, ok := f.findServer(s.Addr); !ok {
+		return f.Topology, nil
+	}
+
+	if s.WireVersion != nil {
+		if s.WireVersion.Max < supportedWireVersions.Min {
+			return description.Topology{}, fmt.Errorf(
+				"server at %s reports wire version %d, but this version of the Go driver requires "+
+					"at least %d (MongoDB %s)",
+				s.Addr.String(),
+				s.WireVersion.Max,
+				supportedWireVersions.Min,
+				minSupportedMongoDBVersion,
+			)
+		}
+
+		if s.WireVersion.Min > supportedWireVersions.Max {
+			return description.Topology{}, fmt.Errorf(
+				"server at %s requires wire version %d, but this version of the Go driver only "+
+					"supports up to %d",
+				s.Addr.String(),
+				s.WireVersion.Min,
+				supportedWireVersions.Max,
+			)
+		}
+	}
+
+	switch f.Kind {
+	case description.Unknown:
+		f.applyToUnknown(s)
+	case description.Sharded:
+		f.applyToSharded(s)
+	case description.ReplicaSetNoPrimary:
+		f.applyToReplicaSetNoPrimary(s)
+	case description.ReplicaSetWithPrimary:
+		f.applyToReplicaSetWithPrimary(s)
+	case description.Single:
+		f.applyToSingle(s)
+	}
+
+	return f.Topology, nil
+}
+
+func (f *fsm) applyToReplicaSetNoPrimary(s description.Server) {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithoutPrimary(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) applyToReplicaSetWithPrimary(s description.Server) {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithPrimaryFromMember(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+		f.checkIfHasPrimary()
+	}
+}
+
+func (f *fsm) applyToSharded(s description.Server) {
+	switch s.Kind {
+	case description.Mongos, description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		f.removeServerByAddr(s.Addr)
+	}
+}
+
+func (f *fsm) applyToSingle(s description.Server) {
+	switch s.Kind {
+	case description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.Mongos:
+		if f.SetName != "" {
+			f.removeServerByAddr(s.Addr)
+			return
+		}
+
+		f.replaceServer(s)
+	case description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		if f.SetName != "" && f.SetName != s.SetName {
+			f.removeServerByAddr(s.Addr)
+			return
+		}
+
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) applyToUnknown(s description.Server) {
+	switch s.Kind {
+	case description.Mongos:
+		f.setKind(description.Sharded)
+		f.replaceServer(s)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.setKind(description.ReplicaSetNoPrimary)
+		f.updateRSWithoutPrimary(s)
+	case description.Standalone:
+		f.updateUnknownWithStandalone(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) checkIfHasPrimary() {
+	if _, ok := f.findPrimary(); ok {
+		f.setKind(description.ReplicaSetWithPrimary)
+	} else {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+func (f *fsm) updateRSFromPrimary(s description.Server) {
+	if f.SetName == "" {
+		f.SetName = s.SetName
+	} else if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	if s.SetVersion != 0 && !bytes.Equal(s.ElectionID[:], primitive.NilObjectID[:]) {
+		if f.maxSetVersion > s.SetVersion || bytes.Compare(f.maxElectionID[:], s.ElectionID[:]) == 1 {
+			f.replaceServer(description.Server{
+				Addr:      s.Addr,
+				LastError: fmt.Errorf("was a primary, but its set version or election id is stale"),
+			})
+			f.checkIfHasPrimary()
+			return
+		}
+
+		f.maxElectionID = s.ElectionID
+	}
+
+	if s.SetVersion > f.maxSetVersion {
+		f.maxSetVersion = s.SetVersion
+	}
+
+	if j, ok := f.findPrimary(); ok {
+		f.setServer(j, description.Server{
+			Addr:      f.Servers[j].Addr,
+			LastError: fmt.Errorf("was a primary, but a new primary was discovered"),
+		})
+	}
+
+	f.replaceServer(s)
+
+	for j := len(f.Servers) - 1; j >= 0; j-- {
+		found := false
+		for _, member := range s.Members {
+			if member == f.Servers[j].Addr {
+				found = true
+				break
+			}
+		}
+		if !found {
+			f.removeServer(j)
+		}
+	}
+
+	for _, member := range s.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	f.checkIfHasPrimary()
+}
+
+func (f *fsm) updateRSWithPrimaryFromMember(s description.Server) {
+	if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	f.replaceServer(s)
+
+	if _, ok := f.findPrimary(); !ok {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+func (f *fsm) updateRSWithoutPrimary(s description.Server) {
+	if f.SetName == "" {
+		f.SetName = s.SetName
+	} else if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	for _, member := range s.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.replaceServer(s)
+}
+
+func (f *fsm) updateUnknownWithStandalone(s description.Server) {
+	if len(f.Servers) > 1 {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.setKind(description.Single)
+	f.replaceServer(s)
+}
+
+func (f *fsm) addServer(addr address.Address) {
+	f.Servers = append(f.Servers, description.Server{
+		Addr: addr.Canonicalize(),
+	})
+}
+
+func (f *fsm) findPrimary() (int, bool) {
+	for i, s := range f.Servers {
+		if s.Kind == description.RSPrimary {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) findServer(addr address.Address) (int, bool) {
+	canon := addr.Canonicalize()
+	for i, s := range f.Servers {
+		if canon == s.Addr {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) removeServer(i int) {
+	f.Servers = append(f.Servers[:i], f.Servers[i+1:]...)
+}
+
+func (f *fsm) removeServerByAddr(addr address.Address) {
+	if i, ok := f.findServer(addr); ok {
+		f.removeServer(i)
+	}
+}
+
+func (f *fsm) replaceServer(s description.Server) bool {
+	if i, ok := f.findServer(s.Addr); ok {
+		f.setServer(i, s)
+		return true
+	}
+	return false
+}
+
+func (f *fsm) setServer(i int, s description.Server) {
+	f.Servers[i] = s
+}
+
+func (f *fsm) setKind(k description.TopologyKind) {
+	f.Kind = k
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go
new file mode 100644
index 0000000..3a7ace2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go
@@ -0,0 +1,506 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const minHeartbeatInterval = 500 * time.Millisecond
+const connectionSemaphoreSize = math.MaxInt64
+
+// ErrServerClosed occurs when an attempt to get a connection is made after
+// the server has been closed.
+var ErrServerClosed = errors.New("server is closed")
+
+// ErrServerConnected occurs when at attempt to connect is made after a server
+// has already been connected.
+var ErrServerConnected = errors.New("server is connected")
+
+// SelectedServer represents a specific server that was selected during server selection.
+// It contains the kind of the typology it was selected from.
+type SelectedServer struct {
+	*Server
+
+	Kind description.TopologyKind
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (ss *SelectedServer) Description() description.SelectedServer {
+	sdesc := ss.Server.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   ss.Kind,
+	}
+}
+
+// These constants represent the connection states of a server.
+const (
+	disconnected int32 = iota
+	disconnecting
+	connected
+	connecting
+)
+
+func connectionStateString(state int32) string {
+	switch state {
+	case 0:
+		return "Disconnected"
+	case 1:
+		return "Disconnecting"
+	case 2:
+		return "Connected"
+	case 3:
+		return "Connecting"
+	}
+
+	return ""
+}
+
+// Server is a single server within a topology.
+type Server struct {
+	cfg     *serverConfig
+	address address.Address
+
+	connectionstate int32
+	done            chan struct{}
+	checkNow        chan struct{}
+	closewg         sync.WaitGroup
+	pool            connection.Pool
+
+	desc atomic.Value // holds a description.Server
+
+	averageRTTSet bool
+	averageRTT    time.Duration
+
+	subLock             sync.Mutex
+	subscribers         map[uint64]chan description.Server
+	currentSubscriberID uint64
+
+	subscriptionsClosed bool
+}
+
+// ConnectServer creates a new Server and then initializes it using the
+// Connect method.
+func ConnectServer(ctx context.Context, addr address.Address, opts ...ServerOption) (*Server, error) {
+	srvr, err := NewServer(addr, opts...)
+	if err != nil {
+		return nil, err
+	}
+	err = srvr.Connect(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return srvr, nil
+}
+
+// NewServer creates a new server. The mongodb server at the address will be monitored
+// on an internal monitoring goroutine.
+func NewServer(addr address.Address, opts ...ServerOption) (*Server, error) {
+	cfg, err := newServerConfig(opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	s := &Server{
+		cfg:     cfg,
+		address: addr,
+
+		done:     make(chan struct{}),
+		checkNow: make(chan struct{}, 1),
+
+		subscribers: make(map[uint64]chan description.Server),
+	}
+	s.desc.Store(description.Server{Addr: addr})
+
+	var maxConns uint64
+	if cfg.maxConns == 0 {
+		maxConns = math.MaxInt64
+	} else {
+		maxConns = uint64(cfg.maxConns)
+	}
+
+	s.pool, err = connection.NewPool(addr, uint64(cfg.maxIdleConns), maxConns, cfg.connectionOpts...)
+	if err != nil {
+		return nil, err
+	}
+
+	return s, nil
+}
+
+// Connect initialzies the Server by starting background monitoring goroutines.
+// This method must be called before a Server can be used.
+func (s *Server) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&s.connectionstate, disconnected, connected) {
+		return ErrServerConnected
+	}
+	s.desc.Store(description.Server{Addr: s.address})
+	go s.update()
+	s.closewg.Add(1)
+	return s.pool.Connect(ctx)
+}
+
+// Disconnect closes sockets to the server referenced by this Server.
+// Subscriptions to this Server will be closed. Disconnect will shutdown
+// any monitoring goroutines, close the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and are closed before returning. If the context expires via
+// cancellation, deadline, or timeout before the in use connections have been
+// returned, the in use connections will be closed, resulting in the failure of
+// any in flight read or write operations. If this method returns with no
+// errors, all connections associated with this Server have been closed.
+func (s *Server) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&s.connectionstate, connected, disconnecting) {
+		return ErrServerClosed
+	}
+
+	// For every call to Connect there must be at least 1 goroutine that is
+	// waiting on the done channel.
+	s.done <- struct{}{}
+	err := s.pool.Disconnect(ctx)
+	if err != nil {
+		return err
+	}
+
+	s.closewg.Wait()
+	atomic.StoreInt32(&s.connectionstate, disconnected)
+
+	return nil
+}
+
+// Connection gets a connection to the server.
+func (s *Server) Connection(ctx context.Context) (connection.Connection, error) {
+	if atomic.LoadInt32(&s.connectionstate) != connected {
+		return nil, ErrServerClosed
+	}
+	conn, desc, err := s.pool.Get(ctx)
+	if err != nil {
+		if _, ok := err.(*auth.Error); ok {
+			// authentication error --> drain connection
+			_ = s.pool.Drain()
+		}
+		if _, ok := err.(*connection.NetworkError); ok {
+			// update description to unknown and clears the connection pool
+			if desc != nil {
+				desc.Kind = description.Unknown
+				desc.LastError = err
+				s.updateDescription(*desc, false)
+			} else {
+				_ = s.pool.Drain()
+			}
+		}
+		return nil, err
+	}
+	if desc != nil {
+		go s.updateDescription(*desc, false)
+	}
+	sc := &sconn{Connection: conn, s: s}
+	return sc, nil
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (s *Server) Description() description.Server {
+	return s.desc.Load().(description.Server)
+}
+
+// SelectedDescription returns a description.SelectedServer with a Kind of
+// Single. This can be used when performing tasks like monitoring a batch
+// of servers and you want to run one off commands against those servers.
+func (s *Server) SelectedDescription() description.SelectedServer {
+	sdesc := s.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   description.Single,
+	}
+}
+
+// Subscribe returns a ServerSubscription which has a channel on which all
+// updated server descriptions will be sent. The channel will have a buffer
+// size of one, and will be pre-populated with the current description.
+func (s *Server) Subscribe() (*ServerSubscription, error) {
+	if atomic.LoadInt32(&s.connectionstate) != connected {
+		return nil, ErrSubscribeAfterClosed
+	}
+	ch := make(chan description.Server, 1)
+	ch <- s.desc.Load().(description.Server)
+
+	s.subLock.Lock()
+	defer s.subLock.Unlock()
+	if s.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := s.currentSubscriberID
+	s.subscribers[id] = ch
+	s.currentSubscriberID++
+
+	ss := &ServerSubscription{
+		C:  ch,
+		s:  s,
+		id: id,
+	}
+
+	return ss, nil
+}
+
+// RequestImmediateCheck will cause the server to send a heartbeat immediately
+// instead of waiting for the heartbeat timeout.
+func (s *Server) RequestImmediateCheck() {
+	select {
+	case s.checkNow <- struct{}{}:
+	default:
+	}
+}
+
+// update handles performing heartbeats and updating any subscribers of the
+// newest description.Server retrieved.
+func (s *Server) update() {
+	defer s.closewg.Done()
+	heartbeatTicker := time.NewTicker(s.cfg.heartbeatInterval)
+	rateLimiter := time.NewTicker(minHeartbeatInterval)
+	defer heartbeatTicker.Stop()
+	defer rateLimiter.Stop()
+	checkNow := s.checkNow
+	done := s.done
+
+	var doneOnce bool
+	defer func() {
+		if r := recover(); r != nil {
+			if doneOnce {
+				return
+			}
+			// We keep this goroutine alive attempting to read from the done channel.
+			<-done
+		}
+	}()
+
+	var conn connection.Connection
+	var desc description.Server
+
+	desc, conn = s.heartbeat(nil)
+	s.updateDescription(desc, true)
+
+	closeServer := func() {
+		doneOnce = true
+		s.subLock.Lock()
+		for id, c := range s.subscribers {
+			close(c)
+			delete(s.subscribers, id)
+		}
+		s.subscriptionsClosed = true
+		s.subLock.Unlock()
+		if conn == nil {
+			return
+		}
+		conn.Close()
+	}
+	for {
+		select {
+		case <-heartbeatTicker.C:
+		case <-checkNow:
+		case <-done:
+			closeServer()
+			return
+		}
+
+		select {
+		case <-rateLimiter.C:
+		case <-done:
+			closeServer()
+			return
+		}
+
+		desc, conn = s.heartbeat(conn)
+		s.updateDescription(desc, false)
+	}
+}
+
+// updateDescription handles updating the description on the Server, notifying
+// subscribers, and potentially draining the connection pool. The initial
+// parameter is used to determine if this is the first description from the
+// server.
+func (s *Server) updateDescription(desc description.Server, initial bool) {
+	defer func() {
+		//  ¯\_(ツ)_/¯
+		_ = recover()
+	}()
+	s.desc.Store(desc)
+
+	s.subLock.Lock()
+	for _, c := range s.subscribers {
+		select {
+		// drain the channel if it isn't empty
+		case <-c:
+		default:
+		}
+		c <- desc
+	}
+	s.subLock.Unlock()
+
+	if initial {
+		// We don't clear the pool on the first update on the description.
+		return
+	}
+
+	switch desc.Kind {
+	case description.Unknown:
+		_ = s.pool.Drain()
+	}
+}
+
+// heartbeat sends a heartbeat to the server using the given connection. The connection can be nil.
+func (s *Server) heartbeat(conn connection.Connection) (description.Server, connection.Connection) {
+	const maxRetry = 2
+	var saved error
+	var desc description.Server
+	var set bool
+	var err error
+	ctx := context.Background()
+
+	for i := 1; i <= maxRetry; i++ {
+		if conn != nil && conn.Expired() {
+			conn.Close()
+			conn = nil
+		}
+
+		if conn == nil {
+			opts := []connection.Option{
+				connection.WithConnectTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+				connection.WithReadTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+				connection.WithWriteTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+			}
+			opts = append(opts, s.cfg.connectionOpts...)
+			// We override whatever handshaker is currently attached to the options with an empty
+			// one because need to make sure we don't do auth.
+			opts = append(opts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				return nil
+			}))
+
+			// Override any command monitors specified in options with nil to avoid monitoring heartbeats.
+			opts = append(opts, connection.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
+				return nil
+			}))
+			conn, _, err = connection.New(ctx, s.address, opts...)
+			if err != nil {
+				saved = err
+				if conn != nil {
+					conn.Close()
+				}
+				conn = nil
+				continue
+			}
+		}
+
+		now := time.Now()
+
+		isMasterCmd := &command.IsMaster{Compressors: s.cfg.compressionOpts}
+		isMaster, err := isMasterCmd.RoundTrip(ctx, conn)
+		if err != nil {
+			saved = err
+			conn.Close()
+			conn = nil
+			continue
+		}
+
+		clusterTime := isMaster.ClusterTime
+		if s.cfg.clock != nil {
+			s.cfg.clock.AdvanceClusterTime(clusterTime)
+		}
+
+		delay := time.Since(now)
+		desc = description.NewServer(s.address, isMaster).SetAverageRTT(s.updateAverageRTT(delay))
+		desc.HeartbeatInterval = s.cfg.heartbeatInterval
+		set = true
+
+		break
+	}
+
+	if !set {
+		desc = description.Server{
+			Addr:      s.address,
+			LastError: saved,
+		}
+	}
+
+	return desc, conn
+}
+
+func (s *Server) updateAverageRTT(delay time.Duration) time.Duration {
+	if !s.averageRTTSet {
+		s.averageRTT = delay
+	} else {
+		alpha := 0.2
+		s.averageRTT = time.Duration(alpha*float64(delay) + (1-alpha)*float64(s.averageRTT))
+	}
+	return s.averageRTT
+}
+
+// Drain will drain the connection pool of this server. This is mainly here so the
+// pool for the server doesn't need to be directly exposed and so that when an error
+// is returned from reading or writing, a client can drain the pool for this server.
+// This is exposed here so we don't have to wrap the Connection type and sniff responses
+// for errors that would cause the pool to be drained, which can in turn centralize the
+// logic for handling errors in the Client type.
+func (s *Server) Drain() error { return s.pool.Drain() }
+
+// String implements the Stringer interface.
+func (s *Server) String() string {
+	desc := s.Description()
+	str := fmt.Sprintf("Addr: %s, Type: %s, State: %s",
+		s.address, desc.Kind, connectionStateString(s.connectionstate))
+	if len(desc.Tags) != 0 {
+		str += fmt.Sprintf(", Tag sets: %s", desc.Tags)
+	}
+	if s.connectionstate == connected {
+		str += fmt.Sprintf(", Avergage RTT: %d", s.averageRTT)
+	}
+	if desc.LastError != nil {
+		str += fmt.Sprintf(", Last error: %s", desc.LastError)
+	}
+
+	return str
+}
+
+// ServerSubscription represents a subscription to the description.Server updates for
+// a specific server.
+type ServerSubscription struct {
+	C  <-chan description.Server
+	s  *Server
+	id uint64
+}
+
+// Unsubscribe unsubscribes this ServerSubscription from updates and closes the
+// subscription channel.
+func (ss *ServerSubscription) Unsubscribe() error {
+	ss.s.subLock.Lock()
+	defer ss.s.subLock.Unlock()
+	if ss.s.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := ss.s.subscribers[ss.id]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(ss.s.subscribers, ss.id)
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go
new file mode 100644
index 0000000..0ebbecf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go
@@ -0,0 +1,121 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+)
+
+var defaultRegistry = bson.NewRegistryBuilder().Build()
+
+type serverConfig struct {
+	clock             *session.ClusterClock
+	compressionOpts   []string
+	connectionOpts    []connection.Option
+	appname           string
+	heartbeatInterval time.Duration
+	heartbeatTimeout  time.Duration
+	maxConns          uint16
+	maxIdleConns      uint16
+	registry          *bsoncodec.Registry
+}
+
+func newServerConfig(opts ...ServerOption) (*serverConfig, error) {
+	cfg := &serverConfig{
+		heartbeatInterval: 10 * time.Second,
+		heartbeatTimeout:  10 * time.Second,
+		maxConns:          100,
+		maxIdleConns:      100,
+		registry:          defaultRegistry,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// ServerOption configures a server.
+type ServerOption func(*serverConfig) error
+
+// WithConnectionOptions configures the server's connections.
+func WithConnectionOptions(fn func(...connection.Option) []connection.Option) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.connectionOpts = fn(cfg.connectionOpts...)
+		return nil
+	}
+}
+
+// WithCompressionOptions configures the server's compressors.
+func WithCompressionOptions(fn func(...string) []string) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.compressionOpts = fn(cfg.compressionOpts...)
+		return nil
+	}
+}
+
+// WithHeartbeatInterval configures a server's heartbeat interval.
+func WithHeartbeatInterval(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.heartbeatInterval = fn(cfg.heartbeatInterval)
+		return nil
+	}
+}
+
+// WithHeartbeatTimeout configures how long to wait for a heartbeat socket to
+// connection.
+func WithHeartbeatTimeout(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.heartbeatTimeout = fn(cfg.heartbeatTimeout)
+		return nil
+	}
+}
+
+// WithMaxConnections configures the maximum number of connections to allow for
+// a given server. If max is 0, then there is no upper limit to the number of
+// connections.
+func WithMaxConnections(fn func(uint16) uint16) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.maxConns = fn(cfg.maxConns)
+		return nil
+	}
+}
+
+// WithMaxIdleConnections configures the maximum number of idle connections
+// allowed for the server.
+func WithMaxIdleConnections(fn func(uint16) uint16) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.maxIdleConns = fn(cfg.maxIdleConns)
+		return nil
+	}
+}
+
+// WithClock configures the ClusterClock for the server to use.
+func WithClock(fn func(clock *session.ClusterClock) *session.ClusterClock) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.clock = fn(cfg.clock)
+		return nil
+	}
+}
+
+// WithRegistry configures the registry for the server to use when creating
+// cursors.
+func WithRegistry(fn func(*bsoncodec.Registry) *bsoncodec.Registry) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.registry = fn(cfg.registry)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go
new file mode 100644
index 0000000..09a319c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go
@@ -0,0 +1,471 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package topology contains types that handles the discovery, monitoring, and selection
+// of servers. This package is designed to expose enough inner workings of service discovery
+// and monitoring to allow low level applications to have fine grained control, while hiding
+// most of the detailed implementation of the algorithms.
+package topology
+
+import (
+	"context"
+	"errors"
+	"math/rand"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrSubscribeAfterClosed is returned when a user attempts to subscribe to a
+// closed Server or Topology.
+var ErrSubscribeAfterClosed = errors.New("cannot subscribe after close")
+
+// ErrTopologyClosed is returned when a user attempts to call a method on a
+// closed Topology.
+var ErrTopologyClosed = errors.New("topology is closed")
+
+// ErrTopologyConnected is returned whena  user attempts to connect to an
+// already connected Topology.
+var ErrTopologyConnected = errors.New("topology is connected or connecting")
+
+// ErrServerSelectionTimeout is returned from server selection when the server
+// selection process took longer than allowed by the timeout.
+var ErrServerSelectionTimeout = errors.New("server selection timeout")
+
+// MonitorMode represents the way in which a server is monitored.
+type MonitorMode uint8
+
+// These constants are the available monitoring modes.
+const (
+	AutomaticMode MonitorMode = iota
+	SingleMode
+)
+
+// Topology represents a MongoDB deployment.
+type Topology struct {
+	registry *bsoncodec.Registry
+
+	connectionstate int32
+
+	cfg *config
+
+	desc atomic.Value // holds a description.Topology
+
+	done chan struct{}
+
+	fsm       *fsm
+	changes   chan description.Server
+	changeswg sync.WaitGroup
+
+	SessionPool *session.Pool
+
+	// This should really be encapsulated into it's own type. This will likely
+	// require a redesign so we can share a minimum of data between the
+	// subscribers and the topology.
+	subscribers         map[uint64]chan description.Topology
+	currentSubscriberID uint64
+	subscriptionsClosed bool
+	subLock             sync.Mutex
+
+	// We should redesign how we connect and handle individal servers. This is
+	// too difficult to maintain and it's rather easy to accidentally access
+	// the servers without acquiring the lock or checking if the servers are
+	// closed. This lock should also be an RWMutex.
+	serversLock   sync.Mutex
+	serversClosed bool
+	servers       map[address.Address]*Server
+
+	wg sync.WaitGroup
+}
+
+// New creates a new topology.
+func New(opts ...Option) (*Topology, error) {
+	cfg, err := newConfig(opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	t := &Topology{
+		cfg:         cfg,
+		done:        make(chan struct{}),
+		fsm:         newFSM(),
+		changes:     make(chan description.Server),
+		subscribers: make(map[uint64]chan description.Topology),
+		servers:     make(map[address.Address]*Server),
+	}
+	t.desc.Store(description.Topology{})
+
+	if cfg.replicaSetName != "" {
+		t.fsm.SetName = cfg.replicaSetName
+		t.fsm.Kind = description.ReplicaSetNoPrimary
+	}
+
+	if cfg.mode == SingleMode {
+		t.fsm.Kind = description.Single
+	}
+
+	return t, nil
+}
+
+// Connect initializes a Topology and starts the monitoring process. This function
+// must be called to properly monitor the topology.
+func (t *Topology) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&t.connectionstate, disconnected, connecting) {
+		return ErrTopologyConnected
+	}
+
+	t.desc.Store(description.Topology{})
+	var err error
+	t.serversLock.Lock()
+	for _, a := range t.cfg.seedList {
+		addr := address.Address(a).Canonicalize()
+		t.fsm.Servers = append(t.fsm.Servers, description.Server{Addr: addr})
+		err = t.addServer(ctx, addr)
+	}
+	t.serversLock.Unlock()
+
+	go t.update()
+	t.changeswg.Add(1)
+
+	t.subscriptionsClosed = false // explicitly set in case topology was disconnected and then reconnected
+
+	atomic.StoreInt32(&t.connectionstate, connected)
+
+	// After connection, make a subscription to keep the pool updated
+	sub, err := t.Subscribe()
+	t.SessionPool = session.NewPool(sub.C)
+	return err
+}
+
+// Disconnect closes the topology. It stops the monitoring thread and
+// closes all open subscriptions.
+func (t *Topology) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&t.connectionstate, connected, disconnecting) {
+		return ErrTopologyClosed
+	}
+
+	t.serversLock.Lock()
+	t.serversClosed = true
+	for addr, server := range t.servers {
+		t.removeServer(ctx, addr, server)
+	}
+	t.serversLock.Unlock()
+
+	t.wg.Wait()
+	t.done <- struct{}{}
+	t.changeswg.Wait()
+
+	t.desc.Store(description.Topology{})
+
+	atomic.StoreInt32(&t.connectionstate, disconnected)
+	return nil
+}
+
+// Description returns a description of the topology.
+func (t *Topology) Description() description.Topology {
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	return td
+}
+
+// Subscribe returns a Subscription on which all updated description.Topologys
+// will be sent. The channel of the subscription will have a buffer size of one,
+// and will be pre-populated with the current description.Topology.
+func (t *Topology) Subscribe() (*Subscription, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, errors.New("cannot subscribe to Topology that is not connected")
+	}
+	ch := make(chan description.Topology, 1)
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	ch <- td
+
+	t.subLock.Lock()
+	defer t.subLock.Unlock()
+	if t.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := t.currentSubscriberID
+	t.subscribers[id] = ch
+	t.currentSubscriberID++
+
+	return &Subscription{
+		C:  ch,
+		t:  t,
+		id: id,
+	}, nil
+}
+
+// RequestImmediateCheck will send heartbeats to all the servers in the
+// topology right away, instead of waiting for the heartbeat timeout.
+func (t *Topology) RequestImmediateCheck() {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return
+	}
+	t.serversLock.Lock()
+	for _, server := range t.servers {
+		server.RequestImmediateCheck()
+	}
+	t.serversLock.Unlock()
+}
+
+// SupportsSessions returns true if the topology supports sessions.
+func (t *Topology) SupportsSessions() bool {
+	return t.Description().SessionTimeoutMinutes != 0 && t.Description().Kind != description.Single
+}
+
+// SelectServer selects a server given a selector.SelectServer complies with the
+// server selection spec, and will time out after severSelectionTimeout or when the
+// parent context is done.
+func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelector) (*SelectedServer, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, ErrTopologyClosed
+	}
+	var ssTimeoutCh <-chan time.Time
+
+	if t.cfg.serverSelectionTimeout > 0 {
+		ssTimeout := time.NewTimer(t.cfg.serverSelectionTimeout)
+		ssTimeoutCh = ssTimeout.C
+		defer ssTimeout.Stop()
+	}
+
+	sub, err := t.Subscribe()
+	if err != nil {
+		return nil, err
+	}
+	defer sub.Unsubscribe()
+
+	for {
+		suitable, err := t.selectServer(ctx, sub.C, ss, ssTimeoutCh)
+		if err != nil {
+			return nil, err
+		}
+
+		selected := suitable[rand.Intn(len(suitable))]
+		selectedS, err := t.FindServer(selected)
+		switch {
+		case err != nil:
+			return nil, err
+		case selectedS != nil:
+			return selectedS, nil
+		default:
+			// We don't have an actual server for the provided description.
+			// This could happen for a number of reasons, including that the
+			// server has since stopped being a part of this topology, or that
+			// the server selector returned no suitable servers.
+		}
+	}
+}
+
+// FindServer will attempt to find a server that fits the given server description.
+// This method will return nil, nil if a matching server could not be found.
+func (t *Topology) FindServer(selected description.Server) (*SelectedServer, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, ErrTopologyClosed
+	}
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+	server, ok := t.servers[selected.Addr]
+	if !ok {
+		return nil, nil
+	}
+
+	desc := t.Description()
+	return &SelectedServer{
+		Server: server,
+		Kind:   desc.Kind,
+	}, nil
+}
+
+func wrapServerSelectionError(err error, t *Topology) error {
+	return fmt.Errorf("server selection error: %v\ncurrent topology: %s", err, t.String())
+}
+
+// selectServer is the core piece of server selection. It handles getting
+// topology descriptions and running sever selection on those descriptions.
+func (t *Topology) selectServer(ctx context.Context, subscriptionCh <-chan description.Topology, ss description.ServerSelector, timeoutCh <-chan time.Time) ([]description.Server, error) {
+	var current description.Topology
+	for {
+		select {
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		case <-timeoutCh:
+			return nil, wrapServerSelectionError(ErrServerSelectionTimeout, t)
+		case current = <-subscriptionCh:
+		}
+
+		var allowed []description.Server
+		for _, s := range current.Servers {
+			if s.Kind != description.Unknown {
+				allowed = append(allowed, s)
+			}
+		}
+
+		suitable, err := ss.SelectServer(current, allowed)
+		if err != nil {
+			return nil, wrapServerSelectionError(err, t)
+		}
+
+		if len(suitable) > 0 {
+			return suitable, nil
+		}
+
+		t.RequestImmediateCheck()
+	}
+}
+
+func (t *Topology) update() {
+	defer t.changeswg.Done()
+	defer func() {
+		//  ¯\_(ツ)_/¯
+		if r := recover(); r != nil {
+			<-t.done
+		}
+	}()
+
+	for {
+		select {
+		case change := <-t.changes:
+			current, err := t.apply(context.TODO(), change)
+			if err != nil {
+				continue
+			}
+
+			t.desc.Store(current)
+			t.subLock.Lock()
+			for _, ch := range t.subscribers {
+				// We drain the description if there's one in the channel
+				select {
+				case <-ch:
+				default:
+				}
+				ch <- current
+			}
+			t.subLock.Unlock()
+		case <-t.done:
+			t.subLock.Lock()
+			for id, ch := range t.subscribers {
+				close(ch)
+				delete(t.subscribers, id)
+			}
+			t.subscriptionsClosed = true
+			t.subLock.Unlock()
+			return
+		}
+	}
+}
+
+func (t *Topology) apply(ctx context.Context, desc description.Server) (description.Topology, error) {
+	var err error
+	prev := t.fsm.Topology
+
+	current, err := t.fsm.apply(desc)
+	if err != nil {
+		return description.Topology{}, err
+	}
+
+	diff := description.DiffTopology(prev, current)
+	t.serversLock.Lock()
+	if t.serversClosed {
+		t.serversLock.Unlock()
+		return description.Topology{}, nil
+	}
+
+	for _, removed := range diff.Removed {
+		if s, ok := t.servers[removed.Addr]; ok {
+			t.removeServer(ctx, removed.Addr, s)
+		}
+	}
+
+	for _, added := range diff.Added {
+		_ = t.addServer(ctx, added.Addr)
+	}
+	t.serversLock.Unlock()
+	return current, nil
+}
+
+func (t *Topology) addServer(ctx context.Context, addr address.Address) error {
+	if _, ok := t.servers[addr]; ok {
+		return nil
+	}
+
+	svr, err := ConnectServer(ctx, addr, t.cfg.serverOpts...)
+	if err != nil {
+		return err
+	}
+
+	t.servers[addr] = svr
+	var sub *ServerSubscription
+	sub, err = svr.Subscribe()
+	if err != nil {
+		return err
+	}
+
+	t.wg.Add(1)
+	go func() {
+		for c := range sub.C {
+			t.changes <- c
+		}
+
+		t.wg.Done()
+	}()
+
+	return nil
+}
+
+func (t *Topology) removeServer(ctx context.Context, addr address.Address, server *Server) {
+	_ = server.Disconnect(ctx)
+	delete(t.servers, addr)
+}
+
+// String implements the Stringer interface
+func (t *Topology) String() string {
+	desc := t.Description()
+	str := fmt.Sprintf("Type: %s\nServers:\n", desc.Kind)
+	for _, s := range t.servers {
+		str += s.String() + "\n"
+	}
+	return str
+}
+
+// Subscription is a subscription to updates to the description of the Topology that created this
+// Subscription.
+type Subscription struct {
+	C  <-chan description.Topology
+	t  *Topology
+	id uint64
+}
+
+// Unsubscribe unsubscribes this Subscription from updates and closes the
+// subscription channel.
+func (s *Subscription) Unsubscribe() error {
+	s.t.subLock.Lock()
+	defer s.t.subLock.Unlock()
+	if s.t.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := s.t.subscribers[s.id]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(s.t.subscribers, s.id)
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go
new file mode 100644
index 0000000..9fa98e6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go
@@ -0,0 +1,269 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"bytes"
+	"strings"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+)
+
+// Option is a configuration option for a topology.
+type Option func(*config) error
+
+type config struct {
+	mode                   MonitorMode
+	replicaSetName         string
+	seedList               []string
+	serverOpts             []ServerOption
+	cs                     connstring.ConnString
+	serverSelectionTimeout time.Duration
+}
+
+func newConfig(opts ...Option) (*config, error) {
+	cfg := &config{
+		seedList:               []string{"localhost:27017"},
+		serverSelectionTimeout: 30 * time.Second,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// WithConnString configures the topology using the connection string.
+func WithConnString(fn func(connstring.ConnString) connstring.ConnString) Option {
+	return func(c *config) error {
+		cs := fn(c.cs)
+		c.cs = cs
+
+		if cs.ServerSelectionTimeoutSet {
+			c.serverSelectionTimeout = cs.ServerSelectionTimeout
+		}
+
+		var connOpts []connection.Option
+
+		if cs.AppName != "" {
+			connOpts = append(connOpts, connection.WithAppName(func(string) string { return cs.AppName }))
+		}
+
+		switch cs.Connect {
+		case connstring.SingleConnect:
+			c.mode = SingleMode
+		}
+
+		c.seedList = cs.Hosts
+
+		if cs.ConnectTimeout > 0 {
+			c.serverOpts = append(c.serverOpts, WithHeartbeatTimeout(func(time.Duration) time.Duration { return cs.ConnectTimeout }))
+			connOpts = append(connOpts, connection.WithConnectTimeout(func(time.Duration) time.Duration { return cs.ConnectTimeout }))
+		}
+
+		if cs.SocketTimeoutSet {
+			connOpts = append(
+				connOpts,
+				connection.WithReadTimeout(func(time.Duration) time.Duration { return cs.SocketTimeout }),
+				connection.WithWriteTimeout(func(time.Duration) time.Duration { return cs.SocketTimeout }),
+			)
+		}
+
+		if cs.HeartbeatInterval > 0 {
+			c.serverOpts = append(c.serverOpts, WithHeartbeatInterval(func(time.Duration) time.Duration { return cs.HeartbeatInterval }))
+		}
+
+		if cs.MaxConnIdleTime > 0 {
+			connOpts = append(connOpts, connection.WithIdleTimeout(func(time.Duration) time.Duration { return cs.MaxConnIdleTime }))
+		}
+
+		if cs.MaxPoolSizeSet {
+			c.serverOpts = append(c.serverOpts, WithMaxConnections(func(uint16) uint16 { return cs.MaxPoolSize }))
+			c.serverOpts = append(c.serverOpts, WithMaxIdleConnections(func(uint16) uint16 { return cs.MaxPoolSize }))
+		}
+
+		if cs.ReplicaSet != "" {
+			c.replicaSetName = cs.ReplicaSet
+		}
+
+		var x509Username string
+		if cs.SSL {
+			tlsConfig := connection.NewTLSConfig()
+
+			if cs.SSLCaFileSet {
+				err := tlsConfig.AddCACertFromFile(cs.SSLCaFile)
+				if err != nil {
+					return err
+				}
+			}
+
+			if cs.SSLInsecure {
+				tlsConfig.SetInsecure(true)
+			}
+
+			if cs.SSLClientCertificateKeyFileSet {
+				if cs.SSLClientCertificateKeyPasswordSet && cs.SSLClientCertificateKeyPassword != nil {
+					tlsConfig.SetClientCertDecryptPassword(cs.SSLClientCertificateKeyPassword)
+				}
+				s, err := tlsConfig.AddClientCertFromFile(cs.SSLClientCertificateKeyFile)
+				if err != nil {
+					return err
+				}
+
+				// The Go x509 package gives the subject with the pairs in reverse order that we want.
+				pairs := strings.Split(s, ",")
+				b := bytes.NewBufferString("")
+
+				for i := len(pairs) - 1; i >= 0; i-- {
+					b.WriteString(pairs[i])
+
+					if i > 0 {
+						b.WriteString(",")
+					}
+				}
+
+				x509Username = b.String()
+			}
+
+			connOpts = append(connOpts, connection.WithTLSConfig(func(*connection.TLSConfig) *connection.TLSConfig { return tlsConfig }))
+		}
+
+		if cs.Username != "" || cs.AuthMechanism == auth.MongoDBX509 || cs.AuthMechanism == auth.GSSAPI {
+			cred := &auth.Cred{
+				Source:      "admin",
+				Username:    cs.Username,
+				Password:    cs.Password,
+				PasswordSet: cs.PasswordSet,
+				Props:       cs.AuthMechanismProperties,
+			}
+
+			if cs.AuthSource != "" {
+				cred.Source = cs.AuthSource
+			} else {
+				switch cs.AuthMechanism {
+				case auth.MongoDBX509:
+					if cred.Username == "" {
+						cred.Username = x509Username
+					}
+					fallthrough
+				case auth.GSSAPI, auth.PLAIN:
+					cred.Source = "$external"
+				default:
+					cred.Source = cs.Database
+				}
+			}
+
+			authenticator, err := auth.CreateAuthenticator(cs.AuthMechanism, cred)
+			if err != nil {
+				return err
+			}
+
+			connOpts = append(connOpts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				options := &auth.HandshakeOptions{
+					AppName:       cs.AppName,
+					Authenticator: authenticator,
+					Compressors:   cs.Compressors,
+				}
+				if cs.AuthMechanism == "" {
+					// Required for SASL mechanism negotiation during handshake
+					options.DBUser = cred.Source + "." + cred.Username
+				}
+				return auth.Handshaker(h, options)
+			}))
+		} else {
+			// We need to add a non-auth Handshaker to the connection options
+			connOpts = append(connOpts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				return &command.Handshake{Client: command.ClientDoc(cs.AppName), Compressors: cs.Compressors}
+			}))
+		}
+
+		if len(cs.Compressors) > 0 {
+			comp := make([]compressor.Compressor, 0, len(cs.Compressors))
+
+			for _, c := range cs.Compressors {
+				switch c {
+				case "snappy":
+					comp = append(comp, compressor.CreateSnappy())
+				case "zlib":
+					zlibComp, err := compressor.CreateZlib(cs.ZlibLevel)
+					if err != nil {
+						return err
+					}
+
+					comp = append(comp, zlibComp)
+				}
+			}
+
+			connOpts = append(connOpts, connection.WithCompressors(func(compressors []compressor.Compressor) []compressor.Compressor {
+				return append(compressors, comp...)
+			}))
+
+			c.serverOpts = append(c.serverOpts, WithCompressionOptions(func(opts ...string) []string {
+				return append(opts, cs.Compressors...)
+			}))
+		}
+
+		if len(connOpts) > 0 {
+			c.serverOpts = append(c.serverOpts, WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+				return append(opts, connOpts...)
+			}))
+		}
+
+		return nil
+	}
+}
+
+// WithMode configures the topology's monitor mode.
+func WithMode(fn func(MonitorMode) MonitorMode) Option {
+	return func(cfg *config) error {
+		cfg.mode = fn(cfg.mode)
+		return nil
+	}
+}
+
+// WithReplicaSetName configures the topology's default replica set name.
+func WithReplicaSetName(fn func(string) string) Option {
+	return func(cfg *config) error {
+		cfg.replicaSetName = fn(cfg.replicaSetName)
+		return nil
+	}
+}
+
+// WithSeedList configures a topology's seed list.
+func WithSeedList(fn func(...string) []string) Option {
+	return func(cfg *config) error {
+		cfg.seedList = fn(cfg.seedList...)
+		return nil
+	}
+}
+
+// WithServerOptions configures a topology's server options for when a new server
+// needs to be created.
+func WithServerOptions(fn func(...ServerOption) []ServerOption) Option {
+	return func(cfg *config) error {
+		cfg.serverOpts = fn(cfg.serverOpts...)
+		return nil
+	}
+}
+
+// WithServerSelectionTimeout configures a topology's server selection timeout.
+// A server selection timeout of 0 means there is no timeout for server selection.
+func WithServerSelectionTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(cfg *config) error {
+		cfg.serverSelectionTimeout = fn(cfg.serverSelectionTimeout)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go
new file mode 100644
index 0000000..d8d8d16
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go
@@ -0,0 +1,134 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Update handles the full cycle dispatch and execution of an update command against the provided
+// topology.
+func Update(
+	ctx context.Context,
+	cmd command.Update,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.UpdateOptions,
+) (result.Update, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Update{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Update{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	updateOpts := options.MergeUpdateOptions(opts...)
+
+	if updateOpts.ArrayFilters != nil {
+		if ss.Description().WireVersion.Max < 6 {
+			return result.Update{}, ErrArrayFilters
+		}
+		arr, err := updateOpts.ArrayFilters.ToArray()
+		if err != nil {
+			return result.Update{}, err
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+	if updateOpts.BypassDocumentValidation != nil && ss.Description().WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*updateOpts.BypassDocumentValidation)})
+	}
+	if updateOpts.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.Update{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(updateOpts.Collation.ToDocument())})
+	}
+	if updateOpts.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*updateOpts.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return update(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := update(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return update(ctx, cmd, ss, cerr)
+	}
+	return res, originalErr
+
+}
+
+func update(
+	ctx context.Context,
+	cmd command.Update,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Update, error) {
+	desc := ss.Description()
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Update{}, oldErr
+		}
+		return result.Update{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Update{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go
new file mode 100644
index 0000000..7ad5eea
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"io"
+)
+
+// UUID represents a UUID.
+type UUID [16]byte
+
+var rander = rand.Reader
+
+// New generates a new uuid.
+func New() (UUID, error) {
+	var uuid [16]byte
+
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return [16]byte{}, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+
+	return uuid, nil
+}
+
+// Equal returns true if two UUIDs are equal.
+func Equal(a, b UUID) bool {
+	return bytes.Equal([]byte(a[:]), []byte(b[:]))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go
new file mode 100644
index 0000000..6447a49
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Write handles the full cycle dispatch and execution of a write command against the provided
+// topology.
+func Write(
+	ctx context.Context,
+	cmd command.Write,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return nil, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
+
+// Retryable writes are supported if the server supports sessions, the operation is not
+// within a transaction, and the write is acknowledged
+func retrySupported(
+	topo *topology.Topology,
+	desc description.SelectedServer,
+	sess *session.Client,
+	wc *writeconcern.WriteConcern,
+) bool {
+	return topo.SupportsSessions() &&
+		description.SessionsSupported(desc.WireVersion) &&
+		!(sess.TransactionInProgress() || sess.TransactionStarting()) &&
+		writeconcern.AckWrite(wc)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go
new file mode 100644
index 0000000..c2a3bb7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package address
+
+import (
+	"net"
+	"strings"
+)
+
+const defaultPort = "27017"
+
+// Address is a network address. It can either be an IP address or a DNS name.
+type Address string
+
+// Network is the network protocol for this address. In most cases this will be
+// "tcp" or "unix".
+func (a Address) Network() string {
+	if strings.HasSuffix(string(a), "sock") {
+		return "unix"
+	}
+	return "tcp"
+}
+
+// String is the canonical version of this address, e.g. localhost:27017,
+// 1.2.3.4:27017, example.com:27017.
+func (a Address) String() string {
+	// TODO: unicode case folding?
+	s := strings.ToLower(string(a))
+	if len(s) == 0 {
+		return ""
+	}
+	if a.Network() != "unix" {
+		_, _, err := net.SplitHostPort(s)
+		if err != nil && strings.Contains(err.Error(), "missing port in address") {
+			s += ":" + defaultPort
+		}
+	}
+
+	return s
+}
+
+// Canonicalize creates a canonicalized address.
+func (a Address) Canonicalize() Address {
+	return Address(a.String())
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go
new file mode 100644
index 0000000..54e5cca
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// AbortTransaction represents the abortTransaction() command
+type AbortTransaction struct {
+	Session *session.Client
+	err     error
+	result  result.TransactionResult
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (at *AbortTransaction) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := at.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (at *AbortTransaction) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"abortTransaction", bsonx.Int32(1)}}
+	return &Write{
+		DB:           "admin",
+		Command:      cmd,
+		Session:      at.Session,
+		WriteConcern: at.Session.CurrentWc,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (at *AbortTransaction) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *AbortTransaction {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		at.err = err
+		return at
+	}
+
+	return at.decode(desc, rdr)
+}
+
+func (at *AbortTransaction) decode(desc description.SelectedServer, rdr bson.Raw) *AbortTransaction {
+	at.err = bson.Unmarshal(rdr, &at.result)
+	if at.err == nil && at.result.WriteConcernError != nil {
+		at.err = Error{
+			Code:    int32(at.result.WriteConcernError.Code),
+			Message: at.result.WriteConcernError.ErrMsg,
+		}
+	}
+	return at
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (at *AbortTransaction) Result() (result.TransactionResult, error) {
+	if at.err != nil {
+		return result.TransactionResult{}, at.err
+	}
+
+	return at.result, nil
+}
+
+// Err returns the error set on this command
+func (at *AbortTransaction) Err() error {
+	return at.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (at *AbortTransaction) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.TransactionResult, error) {
+	cmd := at.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.TransactionResult{}, err
+	}
+
+	return at.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go
new file mode 100644
index 0000000..106f583
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go
@@ -0,0 +1,159 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Aggregate represents the aggregate command.
+//
+// The aggregate command performs an aggregation.
+type Aggregate struct {
+	NS           Namespace
+	Pipeline     bsonx.Arr
+	CursorOpts   []bsonx.Elem
+	Opts         []bsonx.Elem
+	ReadPref     *readpref.ReadPref
+	WriteConcern *writeconcern.WriteConcern
+	ReadConcern  *readconcern.ReadConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (a *Aggregate) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := a.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (a *Aggregate) encode(desc description.SelectedServer) (*Read, error) {
+	if err := a.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"aggregate", bsonx.String(a.NS.Collection)},
+		{"pipeline", bsonx.Array(a.Pipeline)},
+	}
+
+	cursor := bsonx.Doc{}
+	hasOutStage := a.HasDollarOut()
+
+	for _, opt := range a.Opts {
+		switch opt.Key {
+		case "batchSize":
+			if opt.Value.Int32() == 0 && hasOutStage {
+				continue
+			}
+			cursor = append(cursor, opt)
+		default:
+			command = append(command, opt)
+		}
+	}
+	command = append(command, bsonx.Elem{"cursor", bsonx.Document(cursor)})
+
+	// add write concern because it won't be added by the Read command's Encode()
+	if desc.WireVersion.Max >= 5 && hasOutStage && a.WriteConcern != nil {
+		t, data, err := a.WriteConcern.MarshalBSONValue()
+		if err != nil {
+			return nil, err
+		}
+		var xval bsonx.Val
+		err = xval.UnmarshalBSONValue(t, data)
+		if err != nil {
+			return nil, err
+		}
+		command = append(command, bsonx.Elem{Key: "writeConcern", Value: xval})
+	}
+
+	return &Read{
+		DB:          a.NS.DB,
+		Command:     command,
+		ReadPref:    a.ReadPref,
+		ReadConcern: a.ReadConcern,
+		Clock:       a.Clock,
+		Session:     a.Session,
+	}, nil
+}
+
+// HasDollarOut returns true if the Pipeline field contains a $out stage.
+func (a *Aggregate) HasDollarOut() bool {
+	if a.Pipeline == nil {
+		return false
+	}
+	if len(a.Pipeline) == 0 {
+		return false
+	}
+
+	val := a.Pipeline[len(a.Pipeline)-1]
+
+	doc, ok := val.DocumentOK()
+	if !ok || len(doc) != 1 {
+		return false
+	}
+	return doc[0].Key == "$out"
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (a *Aggregate) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Aggregate {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		a.err = err
+		return a
+	}
+
+	return a.decode(desc, rdr)
+}
+
+func (a *Aggregate) decode(desc description.SelectedServer, rdr bson.Raw) *Aggregate {
+	a.result = rdr
+	return a
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (a *Aggregate) Result() (bson.Raw, error) {
+	if a.err != nil {
+		return nil, a.err
+	}
+	return a.result, nil
+}
+
+// Err returns the error set on this command.
+func (a *Aggregate) Err() error { return a.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (a *Aggregate) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := a.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return a.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go
new file mode 100644
index 0000000..6bd09fa
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go
@@ -0,0 +1,95 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// BuildInfo represents the buildInfo command.
+//
+// The buildInfo command is used for getting the build information for a
+// MongoDB server.
+type BuildInfo struct {
+	err error
+	res result.BuildInfo
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (bi *BuildInfo) Encode() (wiremessage.WireMessage, error) {
+	// This can probably just be a global variable that we reuse.
+	cmd := bsonx.Doc{{"buildInfo", bsonx.Int32(1)}}
+	rdr, err := cmd.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: "admin.$cmd",
+		Flags:              wiremessage.SlaveOK,
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+	return query, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (bi *BuildInfo) Decode(wm wiremessage.WireMessage) *BuildInfo {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		bi.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return bi
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		bi.err = err
+		return bi
+	}
+	err = bson.Unmarshal(rdr, &bi.res)
+	if err != nil {
+		bi.err = err
+		return bi
+	}
+	return bi
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (bi *BuildInfo) Result() (result.BuildInfo, error) {
+	if bi.err != nil {
+		return result.BuildInfo{}, bi.err
+	}
+
+	return bi.res, nil
+}
+
+// Err returns the error set on this command.
+func (bi *BuildInfo) Err() error { return bi.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (bi *BuildInfo) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.BuildInfo, error) {
+	wm, err := bi.Encode()
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+	return bi.Decode(wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go
new file mode 100644
index 0000000..859f797
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go
@@ -0,0 +1,708 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// WriteBatch represents a single batch for a write operation.
+type WriteBatch struct {
+	*Write
+	numDocs int
+}
+
+// DecodeError attempts to decode the wiremessage as an error
+func DecodeError(wm wiremessage.WireMessage) error {
+	var rdr bson.Raw
+	switch msg := wm.(type) {
+	case wiremessage.Msg:
+		for _, section := range msg.Sections {
+			switch converted := section.(type) {
+			case wiremessage.SectionBody:
+				rdr = converted.Document
+			}
+		}
+	case wiremessage.Reply:
+		if msg.ResponseFlags&wiremessage.QueryFailure != wiremessage.QueryFailure {
+			return nil
+		}
+		rdr = msg.Documents[0]
+	}
+
+	err := rdr.Validate()
+	if err != nil {
+		return nil
+	}
+
+	extractedError := extractError(rdr)
+
+	// If parsed successfully return the error
+	if _, ok := extractedError.(Error); ok {
+		return err
+	}
+
+	return nil
+}
+
+// helper method to extract an error from a reader if there is one; first returned item is the
+// error if it exists, the second holds parsing errors
+func extractError(rdr bson.Raw) error {
+	var errmsg, codeName string
+	var code int32
+	var labels []string
+	elems, err := rdr.Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "ok":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				if elem.Value().Int32() == 1 {
+					return nil
+				}
+			case bson.TypeInt64:
+				if elem.Value().Int64() == 1 {
+					return nil
+				}
+			case bson.TypeDouble:
+				if elem.Value().Double() == 1 {
+					return nil
+				}
+			}
+		case "errmsg":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				errmsg = str
+			}
+		case "codeName":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				codeName = str
+			}
+		case "code":
+			if c, okay := elem.Value().Int32OK(); okay {
+				code = c
+			}
+		case "errorLabels":
+			if arr, okay := elem.Value().ArrayOK(); okay {
+				elems, err := arr.Elements()
+				if err != nil {
+					continue
+				}
+				for _, elem := range elems {
+					if str, ok := elem.Value().StringValueOK(); ok {
+						labels = append(labels, str)
+					}
+				}
+
+			}
+		}
+	}
+
+	if errmsg == "" {
+		errmsg = "command failed"
+	}
+
+	return Error{
+		Code:    code,
+		Message: errmsg,
+		Name:    codeName,
+		Labels:  labels,
+	}
+}
+
+func responseClusterTime(response bson.Raw) bson.Raw {
+	clusterTime, err := response.LookupErr("$clusterTime")
+	if err != nil {
+		// $clusterTime not included by the server
+		return nil
+	}
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendHeader(doc, clusterTime.Type, "$clusterTime")
+	doc = append(doc, clusterTime.Value...)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+	return doc
+}
+
+func updateClusterTimes(sess *session.Client, clock *session.ClusterClock, response bson.Raw) error {
+	clusterTime := responseClusterTime(response)
+	if clusterTime == nil {
+		return nil
+	}
+
+	if sess != nil {
+		err := sess.AdvanceClusterTime(clusterTime)
+		if err != nil {
+			return err
+		}
+	}
+
+	if clock != nil {
+		clock.AdvanceClusterTime(clusterTime)
+	}
+
+	return nil
+}
+
+func updateOperationTime(sess *session.Client, response bson.Raw) error {
+	if sess == nil {
+		return nil
+	}
+
+	opTimeElem, err := response.LookupErr("operationTime")
+	if err != nil {
+		// operationTime not included by the server
+		return nil
+	}
+
+	t, i := opTimeElem.Timestamp()
+	return sess.AdvanceOperationTime(&primitive.Timestamp{
+		T: t,
+		I: i,
+	})
+}
+
+func marshalCommand(cmd bsonx.Doc) (bson.Raw, error) {
+	if cmd == nil {
+		return bson.Raw{5, 0, 0, 0, 0}, nil
+	}
+
+	return cmd.MarshalBSON()
+}
+
+// adds session related fields to a BSON doc representing a command
+func addSessionFields(cmd bsonx.Doc, desc description.SelectedServer, client *session.Client) (bsonx.Doc, error) {
+	if client == nil || !description.SessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutes == 0 {
+		return cmd, nil
+	}
+
+	if client.Terminated {
+		return cmd, session.ErrSessionEnded
+	}
+
+	if _, err := cmd.LookupElementErr("lsid"); err != nil {
+		cmd = cmd.Delete("lsid")
+	}
+
+	cmd = append(cmd, bsonx.Elem{"lsid", bsonx.Document(client.SessionID)})
+
+	if client.TransactionRunning() ||
+		client.RetryingCommit {
+		cmd = addTransaction(cmd, client)
+	}
+
+	client.ApplyCommand() // advance the state machine based on a command executing
+
+	return cmd, nil
+}
+
+// if in a transaction, add the transaction fields
+func addTransaction(cmd bsonx.Doc, client *session.Client) bsonx.Doc {
+	cmd = append(cmd, bsonx.Elem{"txnNumber", bsonx.Int64(client.TxnNumber)})
+	if client.TransactionStarting() {
+		// When starting transaction, always transition to the next state, even on error
+		cmd = append(cmd, bsonx.Elem{"startTransaction", bsonx.Boolean(true)})
+	}
+	return append(cmd, bsonx.Elem{"autocommit", bsonx.Boolean(false)})
+}
+
+func addClusterTime(cmd bsonx.Doc, desc description.SelectedServer, sess *session.Client, clock *session.ClusterClock) bsonx.Doc {
+	if (clock == nil && sess == nil) || !description.SessionsSupported(desc.WireVersion) {
+		return cmd
+	}
+
+	var clusterTime bson.Raw
+	if clock != nil {
+		clusterTime = clock.GetClusterTime()
+	}
+
+	if sess != nil {
+		if clusterTime == nil {
+			clusterTime = sess.ClusterTime
+		} else {
+			clusterTime = session.MaxClusterTime(clusterTime, sess.ClusterTime)
+		}
+	}
+
+	if clusterTime == nil {
+		return cmd
+	}
+
+	d, err := bsonx.ReadDoc(clusterTime)
+	if err != nil {
+		return cmd // broken clusterTime
+	}
+
+	cmd = cmd.Delete("$clusterTime")
+
+	return append(cmd, d...)
+}
+
+// add a read concern to a BSON doc representing a command
+func addReadConcern(cmd bsonx.Doc, desc description.SelectedServer, rc *readconcern.ReadConcern, sess *session.Client) (bsonx.Doc, error) {
+	// Starting transaction's read concern overrides all others
+	if sess != nil && sess.TransactionStarting() && sess.CurrentRc != nil {
+		rc = sess.CurrentRc
+	}
+
+	// start transaction must append afterclustertime IF causally consistent and operation time exists
+	if rc == nil && sess != nil && sess.TransactionStarting() && sess.Consistent && sess.OperationTime != nil {
+		rc = readconcern.New()
+	}
+
+	if rc == nil {
+		return cmd, nil
+	}
+
+	t, data, err := rc.MarshalBSONValue()
+	if err != nil {
+		return cmd, err
+	}
+
+	var rcDoc bsonx.Doc
+	err = rcDoc.UnmarshalBSONValue(t, data)
+	if err != nil {
+		return cmd, err
+	}
+	if description.SessionsSupported(desc.WireVersion) && sess != nil && sess.Consistent && sess.OperationTime != nil {
+		rcDoc = append(rcDoc, bsonx.Elem{"afterClusterTime", bsonx.Timestamp(sess.OperationTime.T, sess.OperationTime.I)})
+	}
+
+	cmd = cmd.Delete("readConcern")
+
+	if len(rcDoc) != 0 {
+		cmd = append(cmd, bsonx.Elem{"readConcern", bsonx.Document(rcDoc)})
+	}
+	return cmd, nil
+}
+
+// add a write concern to a BSON doc representing a command
+func addWriteConcern(cmd bsonx.Doc, wc *writeconcern.WriteConcern) (bsonx.Doc, error) {
+	if wc == nil {
+		return cmd, nil
+	}
+
+	t, data, err := wc.MarshalBSONValue()
+	if err != nil {
+		if err == writeconcern.ErrEmptyWriteConcern {
+			return cmd, nil
+		}
+		return cmd, err
+	}
+
+	var xval bsonx.Val
+	err = xval.UnmarshalBSONValue(t, data)
+	if err != nil {
+		return cmd, err
+	}
+
+	// delete if doc already has write concern
+	cmd = cmd.Delete("writeConcern")
+
+	return append(cmd, bsonx.Elem{Key: "writeConcern", Value: xval}), nil
+}
+
+// Get the error labels from a command response
+func getErrorLabels(rdr *bson.Raw) ([]string, error) {
+	var labels []string
+	labelsElem, err := rdr.LookupErr("errorLabels")
+	if err != bsoncore.ErrElementNotFound {
+		return nil, err
+	}
+	if labelsElem.Type == bsontype.Array {
+		labelsIt, err := labelsElem.Array().Elements()
+		if err != nil {
+			return nil, err
+		}
+		for _, elem := range labelsIt {
+			labels = append(labels, elem.Value().StringValue())
+		}
+	}
+	return labels, nil
+}
+
+// Remove command arguments for insert, update, and delete commands from the BSON document so they can be encoded
+// as a Section 1 payload in OP_MSG
+func opmsgRemoveArray(cmd bsonx.Doc) (bsonx.Doc, bsonx.Arr, string) {
+	var array bsonx.Arr
+	var id string
+
+	keys := []string{"documents", "updates", "deletes"}
+
+	for _, key := range keys {
+		val, err := cmd.LookupErr(key)
+		if err != nil {
+			continue
+		}
+
+		array = val.Array()
+		cmd = cmd.Delete(key)
+		id = key
+		break
+	}
+
+	return cmd, array, id
+}
+
+// Add the $db and $readPreference keys to the command
+// If the command has no read preference, pass nil for rpDoc
+func opmsgAddGlobals(cmd bsonx.Doc, dbName string, rpDoc bsonx.Doc) (bson.Raw, error) {
+	cmd = append(cmd, bsonx.Elem{"$db", bsonx.String(dbName)})
+	if rpDoc != nil {
+		cmd = append(cmd, bsonx.Elem{"$readPreference", bsonx.Document(rpDoc)})
+	}
+
+	return cmd.MarshalBSON() // bsonx.Doc.MarshalBSON never returns an error.
+}
+
+func opmsgCreateDocSequence(arr bsonx.Arr, identifier string) (wiremessage.SectionDocumentSequence, error) {
+	docSequence := wiremessage.SectionDocumentSequence{
+		PayloadType: wiremessage.DocumentSequence,
+		Identifier:  identifier,
+		Documents:   make([]bson.Raw, 0, len(arr)),
+	}
+
+	for _, val := range arr {
+		d, _ := val.Document().MarshalBSON()
+		docSequence.Documents = append(docSequence.Documents, d)
+	}
+
+	docSequence.Size = int32(docSequence.PayloadLen())
+	return docSequence, nil
+}
+
+func splitBatches(docs []bsonx.Doc, maxCount, targetBatchSize int) ([][]bsonx.Doc, error) {
+	batches := [][]bsonx.Doc{}
+
+	if targetBatchSize > reservedCommandBufferBytes {
+		targetBatchSize -= reservedCommandBufferBytes
+	}
+
+	if maxCount <= 0 {
+		maxCount = 1
+	}
+
+	startAt := 0
+splitInserts:
+	for {
+		size := 0
+		batch := []bsonx.Doc{}
+	assembleBatch:
+		for idx := startAt; idx < len(docs); idx++ {
+			raw, _ := docs[idx].MarshalBSON()
+
+			if len(raw) > targetBatchSize {
+				return nil, ErrDocumentTooLarge
+			}
+			if size+len(raw) > targetBatchSize {
+				break assembleBatch
+			}
+
+			size += len(raw)
+			batch = append(batch, docs[idx])
+			startAt++
+			if len(batch) == maxCount {
+				break assembleBatch
+			}
+		}
+		batches = append(batches, batch)
+		if startAt == len(docs) {
+			break splitInserts
+		}
+	}
+
+	return batches, nil
+}
+
+func encodeBatch(
+	docs []bsonx.Doc,
+	opts []bsonx.Elem,
+	cmdKind WriteCommandKind,
+	collName string,
+) (bsonx.Doc, error) {
+	var cmdName string
+	var docString string
+
+	switch cmdKind {
+	case InsertCommand:
+		cmdName = "insert"
+		docString = "documents"
+	case UpdateCommand:
+		cmdName = "update"
+		docString = "updates"
+	case DeleteCommand:
+		cmdName = "delete"
+		docString = "deletes"
+	}
+
+	cmd := bsonx.Doc{{cmdName, bsonx.String(collName)}}
+
+	vals := make(bsonx.Arr, 0, len(docs))
+	for _, doc := range docs {
+		vals = append(vals, bsonx.Document(doc))
+	}
+	cmd = append(cmd, bsonx.Elem{docString, bsonx.Array(vals)})
+	cmd = append(cmd, opts...)
+
+	return cmd, nil
+}
+
+// converts batches of Write Commands to wire messages
+func batchesToWireMessage(batches []*WriteBatch, desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	wms := make([]wiremessage.WireMessage, len(batches))
+	for _, cmd := range batches {
+		wm, err := cmd.Encode(desc)
+		if err != nil {
+			return nil, err
+		}
+
+		wms = append(wms, wm)
+	}
+
+	return wms, nil
+}
+
+// Roundtrips the write batches, returning the result structs (as interface),
+// the write batches that weren't round tripped and any errors
+func roundTripBatches(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+	batches []*WriteBatch,
+	continueOnError bool,
+	sess *session.Client,
+	cmdKind WriteCommandKind,
+) (interface{}, []*WriteBatch, error) {
+	var res interface{}
+	var upsertIndex int64 // the operation index for the upserted IDs map
+
+	// hold onto txnNumber, reset it when loop exits to ensure reuse of same
+	// transaction number if retry is needed
+	var txnNumber int64
+	if sess != nil && sess.RetryWrite {
+		txnNumber = sess.TxnNumber
+	}
+	for j, cmd := range batches {
+		rdr, err := cmd.RoundTrip(ctx, desc, rw)
+		if err != nil {
+			if sess != nil && sess.RetryWrite {
+				sess.TxnNumber = txnNumber + int64(j)
+			}
+			return res, batches, err
+		}
+
+		// TODO can probably DRY up this code
+		switch cmdKind {
+		case InsertCommand:
+			if res == nil {
+				res = result.Insert{}
+			}
+
+			conv, _ := res.(result.Insert)
+			insertCmd := &Insert{}
+			r, err := insertCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return res, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.N += r.N
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+		case UpdateCommand:
+			if res == nil {
+				res = result.Update{}
+			}
+
+			conv, _ := res.(result.Update)
+			updateCmd := &Update{}
+			r, err := updateCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return conv, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.MatchedCount += r.MatchedCount
+			conv.ModifiedCount += r.ModifiedCount
+			for _, upsert := range r.Upserted {
+				conv.Upserted = append(conv.Upserted, result.Upsert{
+					Index: upsert.Index + upsertIndex,
+					ID:    upsert.ID,
+				})
+			}
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+			upsertIndex += int64(cmd.numDocs)
+		case DeleteCommand:
+			if res == nil {
+				res = result.Delete{}
+			}
+
+			conv, _ := res.(result.Delete)
+			deleteCmd := &Delete{}
+			r, err := deleteCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return conv, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.N += r.N
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+		}
+
+		// Increment txnNumber for each batch
+		if sess != nil && sess.RetryWrite {
+			sess.IncrementTxnNumber()
+			batches = batches[1:] // if batch encoded successfully, remove it from the slice
+		}
+	}
+
+	if sess != nil && sess.RetryWrite {
+		// if retryable write succeeded, transaction number will be incremented one extra time,
+		// so we decrement it here
+		sess.TxnNumber--
+	}
+
+	return res, batches, nil
+}
+
+// get the firstBatch, cursor ID, and namespace from a bson.Raw
+func getCursorValues(result bson.Raw) ([]bson.RawValue, Namespace, int64, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, Namespace{}, 0, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+
+	var ok bool
+	var arr bson.Raw
+	var namespace Namespace
+	var cursorID int64
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok = elem.Value().ArrayOK()
+			if !ok {
+				return nil, Namespace{}, 0, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			if err != nil {
+				return nil, Namespace{}, 0, err
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, Namespace{}, 0, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace = ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, Namespace{}, 0, err
+			}
+		case "id":
+			cursorID, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, Namespace{}, 0, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	vals, err := arr.Values()
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+
+	return vals, namespace, cursorID, nil
+}
+
+func getBatchSize(opts []bsonx.Elem) int32 {
+	for _, opt := range opts {
+		if opt.Key == "batchSize" {
+			return opt.Value.Int32()
+		}
+	}
+
+	return 0
+}
+
+// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
+// write concern.
+var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+
+// WriteCommandKind is the type of command represented by a Write
+type WriteCommandKind int8
+
+// These constants represent the valid types of write commands.
+const (
+	InsertCommand WriteCommandKind = iota
+	UpdateCommand
+	DeleteCommand
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go
new file mode 100644
index 0000000..e04bd6f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CommitTransaction represents the commitTransaction() command
+type CommitTransaction struct {
+	Session *session.Client
+	err     error
+	result  result.TransactionResult
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (ct *CommitTransaction) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := ct.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (ct *CommitTransaction) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"commitTransaction", bsonx.Int32(1)}}
+	return &Write{
+		DB:           "admin",
+		Command:      cmd,
+		Session:      ct.Session,
+		WriteConcern: ct.Session.CurrentWc,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (ct *CommitTransaction) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *CommitTransaction {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ct.err = err
+		return ct
+	}
+
+	return ct.decode(desc, rdr)
+}
+
+func (ct *CommitTransaction) decode(desc description.SelectedServer, rdr bson.Raw) *CommitTransaction {
+	ct.err = bson.Unmarshal(rdr, &ct.result)
+	if ct.err == nil && ct.result.WriteConcernError != nil {
+		ct.err = Error{
+			Code:    int32(ct.result.WriteConcernError.Code),
+			Message: ct.result.WriteConcernError.ErrMsg,
+		}
+	}
+	return ct
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ct *CommitTransaction) Result() (result.TransactionResult, error) {
+	if ct.err != nil {
+		return result.TransactionResult{}, ct.err
+	}
+
+	return ct.result, nil
+}
+
+// Err returns the error set on this command
+func (ct *CommitTransaction) Err() error {
+	return ct.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (ct *CommitTransaction) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.TransactionResult, error) {
+	cmd := ct.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.TransactionResult{}, err
+	}
+
+	return ct.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go
new file mode 100644
index 0000000..419a78b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go
@@ -0,0 +1,128 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Count represents the count command.
+//
+// The count command counts how many documents in a collection match the given query.
+type Count struct {
+	NS          Namespace
+	Query       bsonx.Doc
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result int64
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (c *Count) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := c.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (c *Count) encode(desc description.SelectedServer) (*Read, error) {
+	if err := c.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"count", bsonx.String(c.NS.Collection)}, {"query", bsonx.Document(c.Query)}}
+	command = append(command, c.Opts...)
+
+	return &Read{
+		Clock:       c.Clock,
+		DB:          c.NS.DB,
+		ReadPref:    c.ReadPref,
+		Command:     command,
+		ReadConcern: c.ReadConcern,
+		Session:     c.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (c *Count) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Count {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		c.err = err
+		return c
+	}
+
+	return c.decode(desc, rdr)
+}
+
+func (c *Count) decode(desc description.SelectedServer, rdr bson.Raw) *Count {
+	val, err := rdr.LookupErr("n")
+	switch {
+	case err == bsoncore.ErrElementNotFound:
+		c.err = errors.New("invalid response from server, no 'n' field")
+		return c
+	case err != nil:
+		c.err = err
+		return c
+	}
+
+	switch val.Type {
+	case bson.TypeInt32:
+		c.result = int64(val.Int32())
+	case bson.TypeInt64:
+		c.result = val.Int64()
+	case bson.TypeDouble:
+		c.result = int64(val.Double())
+	default:
+		c.err = errors.New("invalid response from server, value field is not a number")
+	}
+
+	return c
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (c *Count) Result() (int64, error) {
+	if c.err != nil {
+		return 0, c.err
+	}
+	return c.result, nil
+}
+
+// Err returns the error set on this command.
+func (c *Count) Err() error { return c.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (c *Count) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (int64, error) {
+	cmd, err := c.encode(desc)
+	if err != nil {
+		return 0, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return 0, err
+	}
+
+	return c.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go
new file mode 100644
index 0000000..a9a27f1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go
@@ -0,0 +1,122 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CountDocuments represents the CountDocuments command.
+//
+// The countDocuments command counts how many documents in a collection match the given query.
+type CountDocuments struct {
+	NS          Namespace
+	Pipeline    bsonx.Arr
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result int64
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (c *CountDocuments) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	if err := c.NS.Validate(); err != nil {
+		return nil, err
+	}
+	command := bsonx.Doc{{"aggregate", bsonx.String(c.NS.Collection)}, {"pipeline", bsonx.Array(c.Pipeline)}}
+
+	command = append(command, bsonx.Elem{"cursor", bsonx.Document(bsonx.Doc{})})
+	command = append(command, c.Opts...)
+
+	return (&Read{DB: c.NS.DB, ReadPref: c.ReadPref, Command: command}).Encode(desc)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (c *CountDocuments) Decode(ctx context.Context, desc description.SelectedServer, wm wiremessage.WireMessage) *CountDocuments {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		c.err = err
+		return c
+	}
+
+	cursor, err := rdr.LookupErr("cursor")
+	if err != nil || cursor.Type != bsontype.EmbeddedDocument {
+		c.err = errors.New("Invalid response from server, no 'cursor' field")
+		return c
+	}
+	batch, err := cursor.Document().LookupErr("firstBatch")
+	if err != nil || batch.Type != bsontype.Array {
+		c.err = errors.New("Invalid response from server, no 'firstBatch' field")
+		return c
+	}
+
+	elem, err := batch.Array().IndexErr(0)
+	if err != nil || elem.Value().Type != bsontype.EmbeddedDocument {
+		c.result = 0
+		return c
+	}
+
+	val, err := elem.Value().Document().LookupErr("n")
+	if err != nil {
+		c.err = errors.New("Invalid response from server, no 'n' field")
+		return c
+	}
+
+	switch val.Type {
+	case bsontype.Int32:
+		c.result = int64(val.Int32())
+	case bsontype.Int64:
+		c.result = val.Int64()
+	default:
+		c.err = errors.New("Invalid response from server, value field is not a number")
+	}
+
+	return c
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (c *CountDocuments) Result() (int64, error) {
+	if c.err != nil {
+		return 0, c.err
+	}
+	return c.result, nil
+}
+
+// Err returns the error set on this command.
+func (c *CountDocuments) Err() error { return c.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (c *CountDocuments) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (int64, error) {
+	wm, err := c.Encode(desc)
+	if err != nil {
+		return 0, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return 0, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return 0, err
+	}
+	return c.Decode(ctx, desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go
new file mode 100644
index 0000000..e2b33c3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CreateIndexes represents the createIndexes command.
+//
+// The createIndexes command creates indexes for a namespace.
+type CreateIndexes struct {
+	NS           Namespace
+	Indexes      bsonx.Arr
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.CreateIndexes
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (ci *CreateIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := ci.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (ci *CreateIndexes) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{
+		{"createIndexes", bsonx.String(ci.NS.Collection)},
+		{"indexes", bsonx.Array(ci.Indexes)},
+	}
+	cmd = append(cmd, ci.Opts...)
+
+	write := &Write{
+		Clock:   ci.Clock,
+		DB:      ci.NS.DB,
+		Command: cmd,
+		Session: ci.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = ci.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (ci *CreateIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *CreateIndexes {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ci.err = err
+		return ci
+	}
+
+	return ci.decode(desc, rdr)
+}
+
+func (ci *CreateIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *CreateIndexes {
+	ci.err = bson.Unmarshal(rdr, &ci.result)
+	return ci
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ci *CreateIndexes) Result() (result.CreateIndexes, error) {
+	if ci.err != nil {
+		return result.CreateIndexes{}, ci.err
+	}
+	return ci.result, nil
+}
+
+// Err returns the error set on this command.
+func (ci *CreateIndexes) Err() error { return ci.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (ci *CreateIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.CreateIndexes, error) {
+	cmd, err := ci.encode(desc)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	return ci.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go
new file mode 100644
index 0000000..de20dd7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Delete represents the delete command.
+//
+// The delete command executes a delete with a given set of delete documents
+// and options.
+type Delete struct {
+	ContinueOnError bool
+	NS              Namespace
+	Deletes         []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Clock           *session.ClusterClock
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Delete
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Delete) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := d.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(d.batches, desc)
+}
+
+func (d *Delete) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(d.Deletes, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := d.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		d.batches = append(d.batches, cmd)
+	}
+
+	return nil
+}
+
+func (d *Delete) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	copyDocs := make([]bsonx.Doc, 0, len(docs))
+	for _, doc := range docs {
+		copyDocs = append(copyDocs, doc.Copy())
+	}
+
+	var options []bsonx.Elem
+	for _, opt := range d.Opts {
+		if opt.Key == "collation" {
+			for idx := range copyDocs {
+				copyDocs[idx] = append(copyDocs[idx], opt)
+			}
+		} else {
+			options = append(options, opt)
+		}
+	}
+
+	command, err := encodeBatch(copyDocs, options, DeleteCommand, d.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        d.Clock,
+			DB:           d.NS.DB,
+			Command:      command,
+			WriteConcern: d.WriteConcern,
+			Session:      d.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (d *Delete) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Delete {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		d.err = err
+		return d
+	}
+
+	return d.decode(desc, rdr)
+}
+
+func (d *Delete) decode(desc description.SelectedServer, rdr bson.Raw) *Delete {
+	d.err = bson.Unmarshal(rdr, &d.result)
+	return d
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (d *Delete) Result() (result.Delete, error) {
+	if d.err != nil {
+		return result.Delete{}, d.err
+	}
+	return d.result, nil
+}
+
+// Err returns the error set on this command.
+func (d *Delete) Err() error { return d.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (d *Delete) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Delete, error) {
+	if d.batches == nil {
+		if err := d.encode(desc); err != nil {
+			return result.Delete{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		d.batches,
+		d.ContinueOnError,
+		d.Session,
+		DeleteCommand,
+	)
+
+	if batches != nil {
+		d.batches = batches
+	}
+
+	if err != nil {
+		return result.Delete{}, err
+	}
+
+	return r.(result.Delete), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go
new file mode 100644
index 0000000..ba793e1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go
@@ -0,0 +1,115 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Distinct represents the disctinct command.
+//
+// The distinct command returns the distinct values for a specified field
+// across a single collection.
+type Distinct struct {
+	NS          Namespace
+	Field       string
+	Query       bsonx.Doc
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result result.Distinct
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Distinct) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := d.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Distinct) encode(desc description.SelectedServer) (*Read, error) {
+	if err := d.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"distinct", bsonx.String(d.NS.Collection)}, {"key", bsonx.String(d.Field)}}
+
+	if d.Query != nil {
+		command = append(command, bsonx.Elem{"query", bsonx.Document(d.Query)})
+	}
+
+	command = append(command, d.Opts...)
+
+	return &Read{
+		Clock:       d.Clock,
+		DB:          d.NS.DB,
+		ReadPref:    d.ReadPref,
+		Command:     command,
+		ReadConcern: d.ReadConcern,
+		Session:     d.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (d *Distinct) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Distinct {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		d.err = err
+		return d
+	}
+
+	return d.decode(desc, rdr)
+}
+
+func (d *Distinct) decode(desc description.SelectedServer, rdr bson.Raw) *Distinct {
+	d.err = bson.Unmarshal(rdr, &d.result)
+	return d
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (d *Distinct) Result() (result.Distinct, error) {
+	if d.err != nil {
+		return result.Distinct{}, d.err
+	}
+	return d.result, nil
+}
+
+// Err returns the error set on this command.
+func (d *Distinct) Err() error { return d.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (d *Distinct) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Distinct, error) {
+	cmd, err := d.encode(desc)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	return d.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go
new file mode 100644
index 0000000..ea7a308
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package command contains abstractions for operations that can be performed against a MongoDB
+// deployment. The types in this package are meant to provide a general set of commands that a
+// user can run against a MongoDB database without knowing the version of the database.
+//
+// Each type consists of two levels of interaction. The lowest level are the Encode and Decode
+// methods. These are meant to be symmetric eventually, but currently only support the driver
+// side of commands. The higher level is the RoundTrip method. This only makes sense from the
+// driver side of commands and this method handles the encoding of the request and decoding of
+// the response using the given wiremessage.ReadWriter.
+package command
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go
new file mode 100644
index 0000000..c067ccc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go
@@ -0,0 +1,101 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropCollection represents the drop command.
+//
+// The dropCollections command drops collection for a database.
+type DropCollection struct {
+	DB           string
+	Collection   string
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (dc *DropCollection) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := dc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (dc *DropCollection) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{{"drop", bsonx.String(dc.Collection)}}
+
+	write := &Write{
+		Clock:   dc.Clock,
+		DB:      dc.DB,
+		Command: cmd,
+		Session: dc.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = dc.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (dc *DropCollection) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropCollection {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		dc.err = err
+		return dc
+	}
+
+	return dc.decode(desc, rdr)
+}
+
+func (dc *DropCollection) decode(desc description.SelectedServer, rdr bson.Raw) *DropCollection {
+	dc.result = rdr
+	return dc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (dc *DropCollection) Result() (bson.Raw, error) {
+	if dc.err != nil {
+		return nil, dc.err
+	}
+
+	return dc.result, nil
+}
+
+// Err returns the error set on this command.
+func (dc *DropCollection) Err() error { return dc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (dc *DropCollection) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := dc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go
new file mode 100644
index 0000000..e3b3f7c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go
@@ -0,0 +1,100 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropDatabase represents the DropDatabase command.
+//
+// The DropDatabases command drops database.
+type DropDatabase struct {
+	DB           string
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (dd *DropDatabase) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := dd.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (dd *DropDatabase) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{{"dropDatabase", bsonx.Int32(1)}}
+
+	write := &Write{
+		Clock:   dd.Clock,
+		DB:      dd.DB,
+		Command: cmd,
+		Session: dd.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = dd.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (dd *DropDatabase) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropDatabase {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		dd.err = err
+		return dd
+	}
+
+	return dd.decode(desc, rdr)
+}
+
+func (dd *DropDatabase) decode(desc description.SelectedServer, rdr bson.Raw) *DropDatabase {
+	dd.result = rdr
+	return dd
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (dd *DropDatabase) Result() (bson.Raw, error) {
+	if dd.err != nil {
+		return nil, dd.err
+	}
+
+	return dd.result, nil
+}
+
+// Err returns the error set on this command.
+func (dd *DropDatabase) Err() error { return dd.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (dd *DropDatabase) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := dd.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dd.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go
new file mode 100644
index 0000000..bf6a7d6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropIndexes represents the dropIndexes command.
+//
+// The dropIndexes command drops indexes for a namespace.
+type DropIndexes struct {
+	NS           Namespace
+	Index        string
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (di *DropIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := di.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (di *DropIndexes) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{
+		{"dropIndexes", bsonx.String(di.NS.Collection)},
+		{"index", bsonx.String(di.Index)},
+	}
+	cmd = append(cmd, di.Opts...)
+
+	write := &Write{
+		Clock:   di.Clock,
+		DB:      di.NS.DB,
+		Command: cmd,
+		Session: di.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = di.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (di *DropIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropIndexes {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		di.err = err
+		return di
+	}
+
+	return di.decode(desc, rdr)
+}
+
+func (di *DropIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *DropIndexes {
+	di.result = rdr
+	return di
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (di *DropIndexes) Result() (bson.Raw, error) {
+	if di.err != nil {
+		return nil, di.err
+	}
+
+	return di.result, nil
+}
+
+// Err returns the error set on this command.
+func (di *DropIndexes) Err() error { return di.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (di *DropIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := di.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	di.result, err = cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return di.Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go
new file mode 100644
index 0000000..11ac140
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go
@@ -0,0 +1,138 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// must be sent to admin db
+// { endSessions: [ {id: uuid}, ... ], $clusterTime: ... }
+// only send $clusterTime when gossiping the cluster time
+// send 10k sessions at a time
+
+// EndSessions represents an endSessions command.
+type EndSessions struct {
+	Clock      *session.ClusterClock
+	SessionIDs []bsonx.Doc
+
+	results []result.EndSessions
+	errors  []error
+}
+
+// BatchSize is the max number of sessions to be included in 1 endSessions command.
+const BatchSize = 10000
+
+func (es *EndSessions) split() [][]bsonx.Doc {
+	batches := [][]bsonx.Doc{}
+	docIndex := 0
+	totalNumDocs := len(es.SessionIDs)
+
+createBatches:
+	for {
+		batch := []bsonx.Doc{}
+
+		for i := 0; i < BatchSize; i++ {
+			if docIndex == totalNumDocs {
+				break createBatches
+			}
+
+			batch = append(batch, es.SessionIDs[docIndex])
+			docIndex++
+		}
+
+		batches = append(batches, batch)
+	}
+
+	return batches
+}
+
+func (es *EndSessions) encodeBatch(batch []bsonx.Doc, desc description.SelectedServer) *Write {
+	vals := make(bsonx.Arr, 0, len(batch))
+	for _, doc := range batch {
+		vals = append(vals, bsonx.Document(doc))
+	}
+
+	cmd := bsonx.Doc{{"endSessions", bsonx.Array(vals)}}
+
+	return &Write{
+		Clock:   es.Clock,
+		DB:      "admin",
+		Command: cmd,
+	}
+}
+
+// Encode will encode this command into a series of wire messages for the given server description.
+func (es *EndSessions) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	cmds := es.encode(desc)
+	wms := make([]wiremessage.WireMessage, len(cmds))
+
+	for _, cmd := range cmds {
+		wm, err := cmd.Encode(desc)
+		if err != nil {
+			return nil, err
+		}
+
+		wms = append(wms, wm)
+	}
+
+	return wms, nil
+}
+
+func (es *EndSessions) encode(desc description.SelectedServer) []*Write {
+	out := []*Write{}
+	batches := es.split()
+
+	for _, batch := range batches {
+		out = append(out, es.encodeBatch(batch, desc))
+	}
+
+	return out
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (es *EndSessions) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *EndSessions {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		es.errors = append(es.errors, err)
+		return es
+	}
+
+	return es.decode(desc, rdr)
+}
+
+func (es *EndSessions) decode(desc description.SelectedServer, rdr bson.Raw) *EndSessions {
+	var res result.EndSessions
+	es.errors = append(es.errors, bson.Unmarshal(rdr, &res))
+	es.results = append(es.results, res)
+	return es
+}
+
+// Result returns the results of the decoded wire messages.
+func (es *EndSessions) Result() ([]result.EndSessions, []error) {
+	return es.results, es.errors
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (es *EndSessions) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) ([]result.EndSessions, []error) {
+	cmds := es.encode(desc)
+
+	for _, cmd := range cmds {
+		rdr, _ := cmd.RoundTrip(ctx, desc, rw) // ignore any errors returned by the command
+		es.decode(desc, rdr)
+	}
+
+	return es.Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go
new file mode 100644
index 0000000..5ecb48c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go
@@ -0,0 +1,141 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+	"fmt"
+
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+var (
+	// ErrUnknownCommandFailure occurs when a command fails for an unknown reason.
+	ErrUnknownCommandFailure = errors.New("unknown command failure")
+	// ErrNoCommandResponse occurs when the server sent no response document to a command.
+	ErrNoCommandResponse = errors.New("no command response document")
+	// ErrMultiDocCommandResponse occurs when the server sent multiple documents in response to a command.
+	ErrMultiDocCommandResponse = errors.New("command returned multiple documents")
+	// ErrNoDocCommandResponse occurs when the server indicated a response existed, but none was found.
+	ErrNoDocCommandResponse = errors.New("command returned no documents")
+	// ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a
+	// server is passed to an insert command.
+	ErrDocumentTooLarge = errors.New("an inserted document is too large")
+	// ErrNonPrimaryRP occurs when a nonprimary read preference is used with a transaction.
+	ErrNonPrimaryRP = errors.New("read preference in a transaction must be primary")
+	// UnknownTransactionCommitResult is an error label for unknown transaction commit results.
+	UnknownTransactionCommitResult = "UnknownTransactionCommitResult"
+	// TransientTransactionError is an error label for transient errors with transactions.
+	TransientTransactionError = "TransientTransactionError"
+	// NetworkError is an error label for network errors.
+	NetworkError = "NetworkError"
+	// ReplyDocumentMismatch is an error label for OP_QUERY field mismatch errors.
+	ReplyDocumentMismatch = "malformed OP_REPLY: NumberReturned does not match number of documents returned"
+)
+
+var retryableCodes = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001}
+
+// QueryFailureError is an error representing a command failure as a document.
+type QueryFailureError struct {
+	Message  string
+	Response bson.Raw
+}
+
+// Error implements the error interface.
+func (e QueryFailureError) Error() string {
+	return fmt.Sprintf("%s: %v", e.Message, e.Response)
+}
+
+// ResponseError is an error parsing the response to a command.
+type ResponseError struct {
+	Message string
+	Wrapped error
+}
+
+// NewCommandResponseError creates a CommandResponseError.
+func NewCommandResponseError(msg string, err error) ResponseError {
+	return ResponseError{Message: msg, Wrapped: err}
+}
+
+// Error implements the error interface.
+func (e ResponseError) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("%s: %s", e.Message, e.Wrapped)
+	}
+	return fmt.Sprintf("%s", e.Message)
+}
+
+// Error is a command execution error from the database.
+type Error struct {
+	Code    int32
+	Message string
+	Labels  []string
+	Name    string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	if e.Name != "" {
+		return fmt.Sprintf("(%v) %v", e.Name, e.Message)
+	}
+	return e.Message
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (e Error) HasErrorLabel(label string) bool {
+	if e.Labels != nil {
+		for _, l := range e.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// Retryable returns true if the error is retryable
+func (e Error) Retryable() bool {
+	for _, label := range e.Labels {
+		if label == NetworkError {
+			return true
+		}
+	}
+	for _, code := range retryableCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+	if strings.Contains(e.Message, "not master") || strings.Contains(e.Message, "node is recovering") {
+		return true
+	}
+
+	return false
+}
+
+// IsWriteConcernErrorRetryable returns true if the write concern error is retryable.
+func IsWriteConcernErrorRetryable(wce *result.WriteConcernError) bool {
+	for _, code := range retryableCodes {
+		if int32(wce.Code) == code {
+			return true
+		}
+	}
+	if strings.Contains(wce.ErrMsg, "not master") || strings.Contains(wce.ErrMsg, "node is recovering") {
+		return true
+	}
+
+	return false
+}
+
+// IsNotFound indicates if the error is from a namespace not being found.
+func IsNotFound(err error) bool {
+	e, ok := err.(Error)
+	// need message check because legacy servers don't include the error code
+	return ok && (e.Code == 26 || e.Message == "ns not found")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go
new file mode 100644
index 0000000..e9d135e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go
@@ -0,0 +1,113 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Find represents the find command.
+//
+// The find command finds documents within a collection that match a filter.
+type Find struct {
+	NS          Namespace
+	Filter      bsonx.Doc
+	CursorOpts  []bsonx.Elem
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *Find) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *Find) encode(desc description.SelectedServer) (*Read, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"find", bsonx.String(f.NS.Collection)}}
+
+	if f.Filter != nil {
+		command = append(command, bsonx.Elem{"filter", bsonx.Document(f.Filter)})
+	}
+
+	command = append(command, f.Opts...)
+
+	return &Read{
+		Clock:       f.Clock,
+		DB:          f.NS.DB,
+		ReadPref:    f.ReadPref,
+		Command:     command,
+		ReadConcern: f.ReadConcern,
+		Session:     f.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *Find) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Find {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *Find) decode(desc description.SelectedServer, rdr bson.Raw) *Find {
+	f.result = rdr
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *Find) Result() (bson.Raw, error) {
+	if f.err != nil {
+		return nil, f.err
+	}
+
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *Find) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *Find) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go
new file mode 100644
index 0000000..260ee37
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// unmarshalFindAndModifyResult turns the provided bson.Reader into a findAndModify result.
+func unmarshalFindAndModifyResult(rdr bson.Raw) (result.FindAndModify, error) {
+	var res result.FindAndModify
+
+	val, err := rdr.LookupErr("value")
+	switch {
+	case err == bsoncore.ErrElementNotFound:
+		return result.FindAndModify{}, errors.New("invalid response from server, no value field")
+	case err != nil:
+		return result.FindAndModify{}, err
+	}
+
+	switch val.Type {
+	case bson.TypeNull:
+	case bson.TypeEmbeddedDocument:
+		res.Value = val.Document()
+	default:
+		return result.FindAndModify{}, errors.New("invalid response from server, 'value' field is not a document")
+	}
+
+	if val, err := rdr.LookupErr("lastErrorObject", "updatedExisting"); err == nil {
+		b, ok := val.BooleanOK()
+		if ok {
+			res.LastErrorObject.UpdatedExisting = b
+		}
+	}
+
+	if val, err := rdr.LookupErr("lastErrorObject", "upserted"); err == nil {
+		oid, ok := val.ObjectIDOK()
+		if ok {
+			res.LastErrorObject.Upserted = oid
+		}
+	}
+	return res, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go
new file mode 100644
index 0000000..f5c36d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndDelete represents the findOneAndDelete operation.
+//
+// The findOneAndDelete command deletes a single document that matches a query and returns it.
+type FindOneAndDelete struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndDelete) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndDelete) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"remove", bsonx.Boolean(true)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndDelete) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndDelete {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndDelete) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndDelete {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndDelete) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndDelete) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndDelete) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go
new file mode 100644
index 0000000..b3139f8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go
@@ -0,0 +1,112 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndReplace represents the findOneAndReplace operation.
+//
+// The findOneAndReplace command modifies and returns a single document.
+type FindOneAndReplace struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Replacement  bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndReplace) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndReplace) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"update", bsonx.Document(f.Replacement)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndReplace) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndReplace {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndReplace) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndReplace {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndReplace) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndReplace) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndReplace) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go
new file mode 100644
index 0000000..b90c7d0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go
@@ -0,0 +1,112 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndUpdate represents the findOneAndUpdate operation.
+//
+// The findOneAndUpdate command modifies and returns a single document.
+type FindOneAndUpdate struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Update       bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndUpdate) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndUpdate) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"update", bsonx.Document(f.Update)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndUpdate) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndUpdate {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndUpdate) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndUpdate {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndUpdate) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndUpdate) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndUpdate) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go
new file mode 100644
index 0000000..ce016b1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GetMore represents the getMore command.
+//
+// The getMore command retrieves additional documents from a cursor.
+type GetMore struct {
+	ID      int64
+	NS      Namespace
+	Opts    []bsonx.Elem
+	Clock   *session.ClusterClock
+	Session *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (gm *GetMore) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := gm.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (gm *GetMore) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{
+		{"getMore", bsonx.Int64(gm.ID)},
+		{"collection", bsonx.String(gm.NS.Collection)},
+	}
+
+	for _, opt := range gm.Opts {
+		switch opt.Key {
+		case "maxAwaitTimeMS":
+			cmd = append(cmd, bsonx.Elem{"maxTimeMs", opt.Value})
+		default:
+			cmd = append(cmd, opt)
+		}
+	}
+
+	return &Read{
+		Clock:   gm.Clock,
+		DB:      gm.NS.DB,
+		Command: cmd,
+		Session: gm.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (gm *GetMore) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *GetMore {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		gm.err = err
+		return gm
+	}
+
+	return gm.decode(desc, rdr)
+}
+
+func (gm *GetMore) decode(desc description.SelectedServer, rdr bson.Raw) *GetMore {
+	gm.result = rdr
+	return gm
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (gm *GetMore) Result() (bson.Raw, error) {
+	if gm.err != nil {
+		return nil, gm.err
+	}
+
+	return gm.result, nil
+}
+
+// Err returns the error set on this command.
+func (gm *GetMore) Err() error { return gm.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (gm *GetMore) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := gm.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return gm.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go
new file mode 100644
index 0000000..4f68c3c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GetLastError represents the getLastError command.
+//
+// The getLastError command is used for getting the last
+// error from the last command on a connection.
+//
+// Since GetLastError only makes sense in the context of
+// a single connection, there is no Dispatch method.
+type GetLastError struct {
+	Clock   *session.ClusterClock
+	Session *session.Client
+
+	err error
+	res result.GetLastError
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (gle *GetLastError) Encode() (wiremessage.WireMessage, error) {
+	encoded, err := gle.encode()
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(description.SelectedServer{})
+}
+
+func (gle *GetLastError) encode() (*Read, error) {
+	// This can probably just be a global variable that we reuse.
+	cmd := bsonx.Doc{{"getLastError", bsonx.Int32(1)}}
+
+	return &Read{
+		Clock:    gle.Clock,
+		DB:       "admin",
+		ReadPref: readpref.Secondary(),
+		Session:  gle.Session,
+		Command:  cmd,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (gle *GetLastError) Decode(wm wiremessage.WireMessage) *GetLastError {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		gle.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return gle
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		gle.err = err
+		return gle
+	}
+	return gle.decode(rdr)
+}
+
+func (gle *GetLastError) decode(rdr bson.Raw) *GetLastError {
+	err := bson.Unmarshal(rdr, &gle.res)
+	if err != nil {
+		gle.err = err
+		return gle
+	}
+
+	return gle
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (gle *GetLastError) Result() (result.GetLastError, error) {
+	if gle.err != nil {
+		return result.GetLastError{}, gle.err
+	}
+
+	return gle.res, nil
+}
+
+// Err returns the error set on this command.
+func (gle *GetLastError) Err() error { return gle.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (gle *GetLastError) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.GetLastError, error) {
+	cmd, err := gle.encode()
+	if err != nil {
+		return result.GetLastError{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, description.SelectedServer{}, rw)
+	if err != nil {
+		return result.GetLastError{}, err
+	}
+
+	return gle.decode(rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go
new file mode 100644
index 0000000..29eb103
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go
@@ -0,0 +1,117 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"runtime"
+
+	"github.com/mongodb/mongo-go-driver/version"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Handshake represents a generic MongoDB Handshake. It calls isMaster and
+// buildInfo.
+//
+// The isMaster and buildInfo commands are used to build a server description.
+type Handshake struct {
+	Client             bsonx.Doc
+	Compressors        []string
+	SaslSupportedMechs string
+
+	ismstr result.IsMaster
+	err    error
+}
+
+// Encode will encode the handshake commands into a wire message containing isMaster
+func (h *Handshake) Encode() (wiremessage.WireMessage, error) {
+	var wm wiremessage.WireMessage
+	ismstr, err := (&IsMaster{
+		Client:             h.Client,
+		Compressors:        h.Compressors,
+		SaslSupportedMechs: h.SaslSupportedMechs,
+	}).Encode()
+	if err != nil {
+		return wm, err
+	}
+
+	wm = ismstr
+	return wm, nil
+}
+
+// Decode will decode the wire messages.
+// Errors during decoding are deferred until either the Result or Err methods
+// are called.
+func (h *Handshake) Decode(wm wiremessage.WireMessage) *Handshake {
+	h.ismstr, h.err = (&IsMaster{}).Decode(wm).Result()
+	if h.err != nil {
+		return h
+	}
+	return h
+}
+
+// Result returns the result of decoded wire messages.
+func (h *Handshake) Result(addr address.Address) (description.Server, error) {
+	if h.err != nil {
+		return description.Server{}, h.err
+	}
+	return description.NewServer(addr, h.ismstr), nil
+}
+
+// Err returns the error set on this Handshake.
+func (h *Handshake) Err() error { return h.err }
+
+// Handshake implements the connection.Handshaker interface. It is identical
+// to the RoundTrip methods on other types in this package. It will execute
+// the isMaster command.
+func (h *Handshake) Handshake(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+	wm, err := h.Encode()
+	if err != nil {
+		return description.Server{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return description.Server{}, err
+	}
+
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return description.Server{}, err
+	}
+	return h.Decode(wm).Result(addr)
+}
+
+// ClientDoc creates a client information document for use in an isMaster
+// command.
+func ClientDoc(app string) bsonx.Doc {
+	doc := bsonx.Doc{
+		{"driver",
+			bsonx.Document(bsonx.Doc{
+				{"name", bsonx.String("mongo-go-driver")},
+				{"version", bsonx.String(version.Driver)},
+			}),
+		},
+		{"os",
+			bsonx.Document(bsonx.Doc{
+				{"type", bsonx.String(runtime.GOOS)},
+				{"architecture", bsonx.String(runtime.GOARCH)},
+			}),
+		},
+		{"platform", bsonx.String(runtime.Version())},
+	}
+
+	if app != "" {
+		doc = append(doc, bsonx.Elem{"application", bsonx.Document(bsonx.Doc{{"name", bsonx.String(app)}})})
+	}
+
+	return doc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go
new file mode 100644
index 0000000..5059630
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go
@@ -0,0 +1,158 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// this is the amount of reserved buffer space in a message that the
+// driver reserves for command overhead.
+const reservedCommandBufferBytes = 16 * 10 * 10 * 10
+
+// Insert represents the insert command.
+//
+// The insert command inserts a set of documents into the database.
+//
+// Since the Insert command does not return any value other than ok or
+// an error, this type has no Err method.
+type Insert struct {
+	ContinueOnError bool
+	Clock           *session.ClusterClock
+	NS              Namespace
+	Docs            []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Insert
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (i *Insert) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := i.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(i.batches, desc)
+}
+
+func (i *Insert) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	command, err := encodeBatch(docs, i.Opts, InsertCommand, i.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, opt := range i.Opts {
+		if opt.Key == "ordered" && !opt.Value.Boolean() {
+			i.ContinueOnError = true
+			break
+		}
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        i.Clock,
+			DB:           i.NS.DB,
+			Command:      command,
+			WriteConcern: i.WriteConcern,
+			Session:      i.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+func (i *Insert) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(i.Docs, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := i.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		i.batches = append(i.batches, cmd)
+	}
+	return nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (i *Insert) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Insert {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		i.err = err
+		return i
+	}
+
+	return i.decode(desc, rdr)
+}
+
+func (i *Insert) decode(desc description.SelectedServer, rdr bson.Raw) *Insert {
+	i.err = bson.Unmarshal(rdr, &i.result)
+	return i
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (i *Insert) Result() (result.Insert, error) {
+	if i.err != nil {
+		return result.Insert{}, i.err
+	}
+	return i.result, nil
+}
+
+// Err returns the error set on this command.
+func (i *Insert) Err() error { return i.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+//func (i *Insert) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Insert, error) {
+func (i *Insert) RoundTrip(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+) (result.Insert, error) {
+	if i.batches == nil {
+		err := i.encode(desc)
+		if err != nil {
+			return result.Insert{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		i.batches,
+		i.ContinueOnError,
+		i.Session,
+		InsertCommand,
+	)
+
+	// if there are leftover batches, save them for retry
+	if batches != nil {
+		i.batches = batches
+	}
+
+	if err != nil {
+		return result.Insert{}, err
+	}
+
+	res := r.(result.Insert)
+	return res, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go
new file mode 100644
index 0000000..6bd8d09
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go
@@ -0,0 +1,121 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// IsMaster represents the isMaster command.
+//
+// The isMaster command is used for setting up a connection to MongoDB and
+// for monitoring a MongoDB server.
+//
+// Since IsMaster can only be run on a connection, there is no Dispatch method.
+type IsMaster struct {
+	Client             bsonx.Doc
+	Compressors        []string
+	SaslSupportedMechs string
+
+	err error
+	res result.IsMaster
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (im *IsMaster) Encode() (wiremessage.WireMessage, error) {
+	cmd := bsonx.Doc{{"isMaster", bsonx.Int32(1)}}
+	if im.Client != nil {
+		cmd = append(cmd, bsonx.Elem{"client", bsonx.Document(im.Client)})
+	}
+	if im.SaslSupportedMechs != "" {
+		cmd = append(cmd, bsonx.Elem{"saslSupportedMechs", bsonx.String(im.SaslSupportedMechs)})
+	}
+
+	// always send compressors even if empty slice
+	array := bsonx.Arr{}
+	for _, compressor := range im.Compressors {
+		array = append(array, bsonx.String(compressor))
+	}
+
+	cmd = append(cmd, bsonx.Elem{"compression", bsonx.Array(array)})
+
+	rdr, err := cmd.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: "admin.$cmd",
+		Flags:              wiremessage.SlaveOK,
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+	return query, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (im *IsMaster) Decode(wm wiremessage.WireMessage) *IsMaster {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		im.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return im
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		im.err = err
+		return im
+	}
+	err = bson.Unmarshal(rdr, &im.res)
+	if err != nil {
+		im.err = err
+		return im
+	}
+
+	// Reconstructs the $clusterTime doc after decode
+	if im.res.ClusterTime != nil {
+		im.res.ClusterTime = bsoncore.BuildDocument(nil, bsoncore.AppendDocumentElement(nil, "$clusterTime", im.res.ClusterTime))
+	}
+	return im
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (im *IsMaster) Result() (result.IsMaster, error) {
+	if im.err != nil {
+		return result.IsMaster{}, im.err
+	}
+
+	return im.res, nil
+}
+
+// Err returns the error set on this command.
+func (im *IsMaster) Err() error { return im.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (im *IsMaster) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.IsMaster, error) {
+	wm, err := im.Encode()
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+	return im.Decode(wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go
new file mode 100644
index 0000000..37b1f89
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// KillCursors represents the killCursors command.
+//
+// The killCursors command kills a set of cursors.
+type KillCursors struct {
+	Clock *session.ClusterClock
+	NS    Namespace
+	IDs   []int64
+
+	result result.KillCursors
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (kc *KillCursors) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := kc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (kc *KillCursors) encode(desc description.SelectedServer) (*Read, error) {
+	idVals := make([]bsonx.Val, 0, len(kc.IDs))
+	for _, id := range kc.IDs {
+		idVals = append(idVals, bsonx.Int64(id))
+	}
+	cmd := bsonx.Doc{
+		{"killCursors", bsonx.String(kc.NS.Collection)},
+		{"cursors", bsonx.Array(idVals)},
+	}
+
+	return &Read{
+		Clock:   kc.Clock,
+		DB:      kc.NS.DB,
+		Command: cmd,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (kc *KillCursors) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *KillCursors {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		kc.err = err
+		return kc
+	}
+	return kc.decode(desc, rdr)
+}
+
+func (kc *KillCursors) decode(desc description.SelectedServer, rdr bson.Raw) *KillCursors {
+	err := bson.Unmarshal(rdr, &kc.result)
+	if err != nil {
+		kc.err = err
+		return kc
+	}
+	return kc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (kc *KillCursors) Result() (result.KillCursors, error) {
+	if kc.err != nil {
+		return result.KillCursors{}, kc.err
+	}
+
+	return kc.result, nil
+}
+
+// Err returns the error set on this command.
+func (kc *KillCursors) Err() error { return kc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (kc *KillCursors) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.KillCursors, error) {
+	cmd, err := kc.encode(desc)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+
+	return kc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go
new file mode 100644
index 0000000..0c3e76e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go
@@ -0,0 +1,102 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ListCollections represents the listCollections command.
+//
+// The listCollections command lists the collections in a database.
+type ListCollections struct {
+	Clock      *session.ClusterClock
+	DB         string
+	Filter     bsonx.Doc
+	CursorOpts []bsonx.Elem
+	Opts       []bsonx.Elem
+	ReadPref   *readpref.ReadPref
+	Session    *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (lc *ListCollections) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := lc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (lc *ListCollections) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listCollections", bsonx.Int32(1)}}
+
+	if lc.Filter != nil {
+		cmd = append(cmd, bsonx.Elem{"filter", bsonx.Document(lc.Filter)})
+	}
+	cmd = append(cmd, lc.Opts...)
+
+	return &Read{
+		Clock:    lc.Clock,
+		DB:       lc.DB,
+		Command:  cmd,
+		ReadPref: lc.ReadPref,
+		Session:  lc.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decolcng
+// are deferred until either the Result or Err methods are called.
+func (lc *ListCollections) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListCollections {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		lc.err = err
+		return lc
+	}
+	return lc.decode(desc, rdr)
+}
+
+func (lc *ListCollections) decode(desc description.SelectedServer, rdr bson.Raw) *ListCollections {
+	lc.result = rdr
+	return lc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (lc *ListCollections) Result() (bson.Raw, error) {
+	if lc.err != nil {
+		return nil, lc.err
+	}
+	return lc.result, nil
+}
+
+// Err returns the error set on this command.
+func (lc *ListCollections) Err() error { return lc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (lc *ListCollections) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := lc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return lc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go
new file mode 100644
index 0000000..d4fd843
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go
@@ -0,0 +1,98 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ListDatabases represents the listDatabases command.
+//
+// The listDatabases command lists the databases in a MongoDB deployment.
+type ListDatabases struct {
+	Clock   *session.ClusterClock
+	Filter  bsonx.Doc
+	Opts    []bsonx.Elem
+	Session *session.Client
+
+	result result.ListDatabases
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (ld *ListDatabases) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := ld.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (ld *ListDatabases) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listDatabases", bsonx.Int32(1)}}
+
+	if ld.Filter != nil {
+		cmd = append(cmd, bsonx.Elem{"filter", bsonx.Document(ld.Filter)})
+	}
+	cmd = append(cmd, ld.Opts...)
+
+	return &Read{
+		Clock:   ld.Clock,
+		DB:      "admin",
+		Command: cmd,
+		Session: ld.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (ld *ListDatabases) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListDatabases {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		ld.err = err
+		return ld
+	}
+	return ld.decode(desc, rdr)
+}
+
+func (ld *ListDatabases) decode(desc description.SelectedServer, rdr bson.Raw) *ListDatabases {
+	ld.err = bson.Unmarshal(rdr, &ld.result)
+	return ld
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ld *ListDatabases) Result() (result.ListDatabases, error) {
+	if ld.err != nil {
+		return result.ListDatabases{}, ld.err
+	}
+	return ld.result, nil
+}
+
+// Err returns the error set on this command.
+func (ld *ListDatabases) Err() error { return ld.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (ld *ListDatabases) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.ListDatabases, error) {
+	cmd, err := ld.encode(desc)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	return ld.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go
new file mode 100644
index 0000000..48730ed
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ErrEmptyCursor is a signaling error when a cursor for list indexes is empty.
+var ErrEmptyCursor = errors.New("empty cursor")
+
+// ListIndexes represents the listIndexes command.
+//
+// The listIndexes command lists the indexes for a namespace.
+type ListIndexes struct {
+	Clock      *session.ClusterClock
+	NS         Namespace
+	CursorOpts []bsonx.Elem
+	Opts       []bsonx.Elem
+	Session    *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (li *ListIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := li.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (li *ListIndexes) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listIndexes", bsonx.String(li.NS.Collection)}}
+	cmd = append(cmd, li.Opts...)
+
+	return &Read{
+		Clock:   li.Clock,
+		DB:      li.NS.DB,
+		Command: cmd,
+		Session: li.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoling
+// are deferred until either the Result or Err methods are called.
+func (li *ListIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListIndexes {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		if IsNotFound(err) {
+			li.err = ErrEmptyCursor
+			return li
+		}
+		li.err = err
+		return li
+	}
+
+	return li.decode(desc, rdr)
+}
+
+func (li *ListIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *ListIndexes {
+	li.result = rdr
+	return li
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (li *ListIndexes) Result() (bson.Raw, error) {
+	if li.err != nil {
+		return nil, li.err
+	}
+	return li.result, nil
+}
+
+// Err returns the error set on this command.
+func (li *ListIndexes) Err() error { return li.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (li *ListIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := li.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		if IsNotFound(err) {
+			return nil, ErrEmptyCursor
+		}
+		return nil, err
+	}
+
+	return li.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go
new file mode 100644
index 0000000..2a4f413
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+	"strings"
+)
+
+// Namespace encapsulates a database and collection name, which together
+// uniquely identifies a collection within a MongoDB cluster.
+type Namespace struct {
+	DB         string
+	Collection string
+}
+
+// NewNamespace returns a new Namespace for the
+// given database and collection.
+func NewNamespace(db, collection string) Namespace { return Namespace{DB: db, Collection: collection} }
+
+// ParseNamespace parses a namespace string into a Namespace.
+//
+// The namespace string must contain at least one ".", the first of which is the separator
+// between the database and collection names.  If not, the default (invalid) Namespace is returned.
+func ParseNamespace(name string) Namespace {
+	index := strings.Index(name, ".")
+	if index == -1 {
+		return Namespace{}
+	}
+
+	return Namespace{
+		DB:         name[:index],
+		Collection: name[index+1:],
+	}
+}
+
+// FullName returns the full namespace string, which is the result of joining the database
+// name and the collection name with a "." character.
+func (ns *Namespace) FullName() string {
+	return strings.Join([]string{ns.DB, ns.Collection}, ".")
+}
+
+// Validate validates the namespace.
+func (ns *Namespace) Validate() error {
+	if err := ns.validateDB(); err != nil {
+		return err
+	}
+
+	return ns.validateCollection()
+}
+
+// validateDB ensures the database name is not an empty string, contain a ".",
+// or contain a " ".
+func (ns *Namespace) validateDB() error {
+	if ns.DB == "" {
+		return errors.New("database name cannot be empty")
+	}
+	if strings.Contains(ns.DB, " ") {
+		return errors.New("database name cannot contain ' '")
+	}
+	if strings.Contains(ns.DB, ".") {
+		return errors.New("database name cannot contain '.'")
+	}
+
+	return nil
+}
+
+// validateCollection ensures the collection name is not an empty string.
+func (ns *Namespace) validateCollection() error {
+	if ns.Collection == "" {
+		return errors.New("collection name cannot be empty")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go
new file mode 100644
index 0000000..c2d5952
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+func decodeCommandOpMsg(msg wiremessage.Msg) (bson.Raw, error) {
+	var mainDoc bsonx.Doc
+
+	for _, section := range msg.Sections {
+		switch converted := section.(type) {
+		case wiremessage.SectionBody:
+			err := mainDoc.UnmarshalBSON(converted.Document)
+			if err != nil {
+				return nil, err
+			}
+		case wiremessage.SectionDocumentSequence:
+			arr := bsonx.Arr{}
+			for _, doc := range converted.Documents {
+				newDoc := bsonx.Doc{}
+				err := newDoc.UnmarshalBSON(doc)
+				if err != nil {
+					return nil, err
+				}
+
+				arr = append(arr, bsonx.Document(newDoc))
+			}
+
+			mainDoc = append(mainDoc, bsonx.Elem{converted.Identifier, bsonx.Array(arr)})
+		}
+	}
+
+	byteArray, err := mainDoc.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+
+	rdr := bson.Raw(byteArray)
+	err = rdr.Validate()
+	if err != nil {
+		return nil, NewCommandResponseError("malformed OP_MSG: invalid document", err)
+	}
+
+	err = extractError(rdr)
+	if err != nil {
+		return nil, err
+	}
+	return rdr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go
new file mode 100644
index 0000000..68c15ed
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go
@@ -0,0 +1,43 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// decodeCommandOpReply handles decoding the OP_REPLY response to an OP_QUERY
+// command.
+func decodeCommandOpReply(reply wiremessage.Reply) (bson.Raw, error) {
+	if reply.NumberReturned == 0 {
+		return nil, ErrNoDocCommandResponse
+	}
+	if reply.NumberReturned > 1 {
+		return nil, ErrMultiDocCommandResponse
+	}
+	if len(reply.Documents) != 1 {
+		return nil, NewCommandResponseError("malformed OP_REPLY: NumberReturned does not match number of documents returned", nil)
+	}
+	rdr := reply.Documents[0]
+	err := rdr.Validate()
+	if err != nil {
+		return nil, NewCommandResponseError("malformed OP_REPLY: invalid document", err)
+	}
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return nil, QueryFailureError{
+			Message:  "command failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	err = extractError(rdr)
+	if err != nil {
+		return nil, err
+	}
+	return rdr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go
new file mode 100644
index 0000000..d7b6547
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go
@@ -0,0 +1,287 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Read represents a generic database read command.
+type Read struct {
+	DB          string
+	Command     bsonx.Doc
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+func (r *Read) createReadPref(serverKind description.ServerKind, topologyKind description.TopologyKind, isOpQuery bool) bsonx.Doc {
+	doc := bsonx.Doc{}
+	rp := r.ReadPref
+
+	if rp == nil {
+		if topologyKind == description.Single && serverKind != description.Mongos {
+			return append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+		}
+		return nil
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		if serverKind == description.Mongos {
+			return nil
+		}
+		if topologyKind == description.Single {
+			return append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+		}
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primary")})
+	case readpref.PrimaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+	case readpref.SecondaryPreferredMode:
+		_, ok := r.ReadPref.MaxStaleness()
+		if serverKind == description.Mongos && isOpQuery && !ok && len(r.ReadPref.TagSets()) == 0 {
+			return nil
+		}
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondaryPreferred")})
+	case readpref.SecondaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondary")})
+	case readpref.NearestMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("nearest")})
+	}
+
+	sets := make([]bsonx.Val, 0, len(r.ReadPref.TagSets()))
+	for _, ts := range r.ReadPref.TagSets() {
+		if len(ts) == 0 {
+			continue
+		}
+		set := bsonx.Doc{}
+		for _, t := range ts {
+			set = append(set, bsonx.Elem{t.Name, bsonx.String(t.Value)})
+		}
+		sets = append(sets, bsonx.Document(set))
+	}
+	if len(sets) > 0 {
+		doc = append(doc, bsonx.Elem{"tags", bsonx.Array(sets)})
+	}
+
+	if d, ok := r.ReadPref.MaxStaleness(); ok {
+		doc = append(doc, bsonx.Elem{"maxStalenessSeconds", bsonx.Int32(int32(d.Seconds()))})
+	}
+
+	return doc
+}
+
+// addReadPref will add a read preference to the query document.
+//
+// NOTE: This method must always return either a valid bson.Reader or an error.
+func (r *Read) addReadPref(rp *readpref.ReadPref, serverKind description.ServerKind, topologyKind description.TopologyKind, query bson.Raw) (bson.Raw, error) {
+	doc := r.createReadPref(serverKind, topologyKind, true)
+	if doc == nil {
+		return query, nil
+	}
+
+	qdoc := bsonx.Doc{}
+	err := bson.Unmarshal(query, &qdoc)
+	if err != nil {
+		return query, err
+	}
+	return bsonx.Doc{
+		{"$query", bsonx.Document(qdoc)},
+		{"$readPreference", bsonx.Document(doc)},
+	}.MarshalBSON()
+}
+
+// Encode r as OP_MSG
+func (r *Read) encodeOpMsg(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	msg := wiremessage.Msg{
+		MsgHeader: wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		Sections:  make([]wiremessage.Section, 0),
+	}
+
+	readPrefDoc := r.createReadPref(desc.Server.Kind, desc.Kind, false)
+	fullDocRdr, err := opmsgAddGlobals(cmd, r.DB, readPrefDoc)
+	if err != nil {
+		return nil, err
+	}
+
+	// type 0 doc
+	msg.Sections = append(msg.Sections, wiremessage.SectionBody{
+		PayloadType: wiremessage.SingleDocument,
+		Document:    fullDocRdr,
+	})
+
+	// no flags to add
+
+	return msg, nil
+}
+
+func (r *Read) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return wiremessage.SlaveOK
+	}
+
+	if r.ReadPref == nil {
+		// assume primary
+		return 0
+	}
+
+	if r.ReadPref.Mode() != readpref.PrimaryMode {
+		return wiremessage.SlaveOK
+	}
+
+	return 0
+}
+
+// Encode c as OP_QUERY
+func (r *Read) encodeOpQuery(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := marshalCommand(cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	if desc.Server.Kind == description.Mongos {
+		rdr, err = r.addReadPref(r.ReadPref, desc.Server.Kind, desc.Kind, rdr)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: r.DB + ".$cmd",
+		Flags:              r.slaveOK(desc),
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+
+	return query, nil
+}
+
+func (r *Read) decodeOpMsg(wm wiremessage.WireMessage) {
+	msg, ok := wm.(wiremessage.Msg)
+	if !ok {
+		r.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+
+	r.result, r.err = decodeCommandOpMsg(msg)
+}
+
+func (r *Read) decodeOpReply(wm wiremessage.WireMessage) {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		r.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+	r.result, r.err = decodeCommandOpReply(reply)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (r *Read) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := r.Command.Copy()
+	cmd, err := addReadConcern(cmd, desc, r.ReadConcern, r.Session)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd, err = addSessionFields(cmd, desc, r.Session)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd = addClusterTime(cmd, desc, r.Session, r.Clock)
+
+	if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion {
+		return r.encodeOpQuery(desc, cmd)
+	}
+
+	return r.encodeOpMsg(desc, cmd)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (r *Read) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Read {
+	switch wm.(type) {
+	case wiremessage.Reply:
+		r.decodeOpReply(wm)
+	default:
+		r.decodeOpMsg(wm)
+	}
+
+	if r.err != nil {
+		// decode functions set error if an invalid response document was returned or if the OK flag in the response was 0
+		// if the OK flag was 0, a type Error is returned. otherwise, a special type is returned
+		if _, ok := r.err.(Error); !ok {
+			return r // for missing/invalid response docs, don't update cluster times
+		}
+	}
+
+	_ = updateClusterTimes(r.Session, r.Clock, r.result)
+	_ = updateOperationTime(r.Session, r.result)
+	return r
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (r *Read) Result() (bson.Raw, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+
+	return r.result, nil
+}
+
+// Err returns the error set on this command.
+func (r *Read) Err() error {
+	return r.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (r *Read) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	wm, err := r.Encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if r.Session != nil {
+		err = r.Session.UpdateUseTime()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return r.Decode(desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go
new file mode 100644
index 0000000..69758b8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go
@@ -0,0 +1,82 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// StartSession represents a startSession command
+type StartSession struct {
+	Clock  *session.ClusterClock
+	result result.StartSession
+	err    error
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (ss *StartSession) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := ss.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (ss *StartSession) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"startSession", bsonx.Int32(1)}}
+	return &Write{
+		Clock:   ss.Clock,
+		DB:      "admin",
+		Command: cmd,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (ss *StartSession) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *StartSession {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ss.err = err
+		return ss
+	}
+
+	return ss.decode(desc, rdr)
+}
+
+func (ss *StartSession) decode(desc description.SelectedServer, rdr bson.Raw) *StartSession {
+	ss.err = bson.Unmarshal(rdr, &ss.result)
+	return ss
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ss *StartSession) Result() (result.StartSession, error) {
+	if ss.err != nil {
+		return result.StartSession{}, ss.err
+	}
+
+	return ss.result, nil
+}
+
+// Err returns the error set on this command
+func (ss *StartSession) Err() error {
+	return ss.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (ss *StartSession) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.StartSession, error) {
+	cmd := ss.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.StartSession{}, err
+	}
+
+	return ss.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go
new file mode 100644
index 0000000..29470ae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go
@@ -0,0 +1,161 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Update represents the update command.
+//
+// The update command updates a set of documents with the database.
+type Update struct {
+	ContinueOnError bool
+	Clock           *session.ClusterClock
+	NS              Namespace
+	Docs            []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Update
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (u *Update) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := u.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(u.batches, desc)
+}
+
+func (u *Update) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(u.Docs, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := u.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		u.batches = append(u.batches, cmd)
+	}
+
+	return nil
+}
+
+func (u *Update) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	copyDocs := make([]bsonx.Doc, 0, len(docs)) // copy of all the documents
+	for _, doc := range docs {
+		newDoc := doc.Copy()
+		copyDocs = append(copyDocs, newDoc)
+	}
+
+	var options []bsonx.Elem
+	for _, opt := range u.Opts {
+		switch opt.Key {
+		case "upsert", "collation", "arrayFilters":
+			// options that are encoded on each individual document
+			for idx := range copyDocs {
+				copyDocs[idx] = append(copyDocs[idx], opt)
+			}
+		default:
+			options = append(options, opt)
+		}
+	}
+
+	command, err := encodeBatch(copyDocs, options, UpdateCommand, u.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        u.Clock,
+			DB:           u.NS.DB,
+			Command:      command,
+			WriteConcern: u.WriteConcern,
+			Session:      u.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (u *Update) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Update {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		u.err = err
+		return u
+	}
+	return u.decode(desc, rdr)
+}
+
+func (u *Update) decode(desc description.SelectedServer, rdr bson.Raw) *Update {
+	u.err = bson.Unmarshal(rdr, &u.result)
+	return u
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (u *Update) Result() (result.Update, error) {
+	if u.err != nil {
+		return result.Update{}, u.err
+	}
+	return u.result, nil
+}
+
+// Err returns the error set on this command.
+func (u *Update) Err() error { return u.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (u *Update) RoundTrip(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+) (result.Update, error) {
+	if u.batches == nil {
+		err := u.encode(desc)
+		if err != nil {
+			return result.Update{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		u.batches,
+		u.ContinueOnError,
+		u.Session,
+		UpdateCommand,
+	)
+
+	// if there are leftover batches, save them for retry
+	if batches != nil {
+		u.batches = batches
+	}
+
+	if err != nil {
+		return result.Update{}, err
+	}
+
+	return r.(result.Update), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go
new file mode 100644
index 0000000..3787fae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go
@@ -0,0 +1,245 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Write represents a generic write database command.
+// This can be used to send arbitrary write commands to the database.
+type Write struct {
+	DB           string
+	Command      bsonx.Doc
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode c as OP_MSG
+func (w *Write) encodeOpMsg(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	var arr bsonx.Arr
+	var identifier string
+
+	cmd, arr, identifier = opmsgRemoveArray(cmd)
+
+	msg := wiremessage.Msg{
+		MsgHeader: wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		Sections:  make([]wiremessage.Section, 0),
+	}
+
+	fullDocRdr, err := opmsgAddGlobals(cmd, w.DB, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// type 0 doc
+	msg.Sections = append(msg.Sections, wiremessage.SectionBody{
+		PayloadType: wiremessage.SingleDocument,
+		Document:    fullDocRdr,
+	})
+
+	// type 1 doc
+	if identifier != "" {
+		docSequence, err := opmsgCreateDocSequence(arr, identifier)
+		if err != nil {
+			return nil, err
+		}
+
+		msg.Sections = append(msg.Sections, docSequence)
+	}
+
+	// flags
+	if !writeconcern.AckWrite(w.WriteConcern) {
+		msg.FlagBits |= wiremessage.MoreToCome
+	}
+
+	return msg, nil
+}
+
+// Encode w as OP_QUERY
+func (w *Write) encodeOpQuery(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := marshalCommand(cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: w.DB + ".$cmd",
+		Flags:              w.slaveOK(desc),
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+
+	return query, nil
+}
+
+func (w *Write) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return wiremessage.SlaveOK
+	}
+
+	return 0
+}
+
+func (w *Write) decodeOpReply(wm wiremessage.WireMessage) {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		w.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+	w.result, w.err = decodeCommandOpReply(reply)
+}
+
+func (w *Write) decodeOpMsg(wm wiremessage.WireMessage) {
+	msg, ok := wm.(wiremessage.Msg)
+	if !ok {
+		w.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+
+	w.result, w.err = decodeCommandOpMsg(msg)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (w *Write) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := w.Command.Copy()
+	var err error
+	if w.Session != nil && w.Session.TransactionStarting() {
+		// Starting transactions have a read concern, even in writes.
+		cmd, err = addReadConcern(cmd, desc, nil, w.Session)
+		if err != nil {
+			return nil, err
+		}
+	}
+	cmd, err = addWriteConcern(cmd, w.WriteConcern)
+	if err != nil {
+		return nil, err
+	}
+
+	if !writeconcern.AckWrite(w.WriteConcern) {
+		// unack write with explicit session --> raise an error
+		// unack write with implicit session --> do not send session ID (implicit session shouldn't have been created
+		// in the first place)
+
+		if w.Session != nil && w.Session.SessionType == session.Explicit {
+			return nil, errors.New("explicit sessions cannot be used with unacknowledged writes")
+		}
+	} else {
+		// only encode session ID for acknowledged writes
+		cmd, err = addSessionFields(cmd, desc, w.Session)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if w.Session != nil && w.Session.RetryWrite {
+		cmd = append(cmd, bsonx.Elem{"txnNumber", bsonx.Int64(w.Session.TxnNumber)})
+	}
+
+	cmd = addClusterTime(cmd, desc, w.Session, w.Clock)
+
+	if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion {
+		return w.encodeOpQuery(desc, cmd)
+	}
+
+	return w.encodeOpMsg(desc, cmd)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (w *Write) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Write {
+	switch wm.(type) {
+	case wiremessage.Reply:
+		w.decodeOpReply(wm)
+	default:
+		w.decodeOpMsg(wm)
+	}
+
+	if w.err != nil {
+		if _, ok := w.err.(Error); !ok {
+			return w
+		}
+	}
+
+	_ = updateClusterTimes(w.Session, w.Clock, w.result)
+
+	if writeconcern.AckWrite(w.WriteConcern) {
+		// don't update session operation time for unacknowledged write
+		_ = updateOperationTime(w.Session, w.result)
+	}
+	return w
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (w *Write) Result() (bson.Raw, error) {
+	if w.err != nil {
+		return nil, w.err
+	}
+
+	return w.result, nil
+}
+
+// Err returns the error set on this command.
+func (w *Write) Err() error {
+	return w.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriteCloser.
+func (w *Write) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	wm, err := w.Encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if msg, ok := wm.(wiremessage.Msg); ok {
+		// don't expect response if using OP_MSG for an unacknowledged write
+		if msg.FlagBits&wiremessage.MoreToCome > 0 {
+			return nil, ErrUnacknowledgedWrite
+		}
+	}
+
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if w.Session != nil {
+		err = w.Session.UpdateUseTime()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return w.Decode(desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go
new file mode 100644
index 0000000..5ec2ea0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go
@@ -0,0 +1,157 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package compressor
+
+import (
+	"bytes"
+	"compress/zlib"
+
+	"io"
+
+	"github.com/golang/snappy"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Compressor is the interface implemented by types that can compress and decompress wire messages. This is used
+// when sending and receiving messages to and from the server.
+type Compressor interface {
+	CompressBytes(src, dest []byte) ([]byte, error)
+	UncompressBytes(src, dest []byte) ([]byte, error)
+	CompressorID() wiremessage.CompressorID
+	Name() string
+}
+
+type writer struct {
+	buf []byte
+}
+
+// Write appends bytes to the writer
+func (w *writer) Write(p []byte) (n int, err error) {
+	index := len(w.buf)
+	if len(p) > cap(w.buf)-index {
+		buf := make([]byte, 2*cap(w.buf)+len(p))
+		copy(buf, w.buf)
+		w.buf = buf
+	}
+
+	w.buf = w.buf[:index+len(p)]
+	copy(w.buf[index:], p)
+	return len(p), nil
+}
+
+// SnappyCompressor uses the snappy method to compress data
+type SnappyCompressor struct {
+}
+
+// ZlibCompressor uses the zlib method to compress data
+type ZlibCompressor struct {
+	level      int
+	zlibWriter *zlib.Writer
+}
+
+// CompressBytes uses snappy to compress a slice of bytes.
+func (s *SnappyCompressor) CompressBytes(src, dest []byte) ([]byte, error) {
+	dest = dest[:0]
+	dest = snappy.Encode(dest, src)
+	return dest, nil
+}
+
+// UncompressBytes uses snappy to uncompress a slice of bytes.
+func (s *SnappyCompressor) UncompressBytes(src, dest []byte) ([]byte, error) {
+	var err error
+	dest, err = snappy.Decode(dest, src)
+	if err != nil {
+		return dest, err
+	}
+
+	return dest, nil
+}
+
+// CompressorID returns the ID for the snappy compressor.
+func (s *SnappyCompressor) CompressorID() wiremessage.CompressorID {
+	return wiremessage.CompressorSnappy
+}
+
+// Name returns the string name for the snappy compressor.
+func (s *SnappyCompressor) Name() string {
+	return "snappy"
+}
+
+// CompressBytes uses zlib to compress a slice of bytes.
+func (z *ZlibCompressor) CompressBytes(src, dest []byte) ([]byte, error) {
+	dest = dest[:0]
+	z.zlibWriter.Reset(&writer{
+		buf: dest,
+	})
+
+	_, err := z.zlibWriter.Write(src)
+	if err != nil {
+		_ = z.zlibWriter.Close()
+		return dest, err
+	}
+
+	err = z.zlibWriter.Close()
+	if err != nil {
+		return dest, err
+	}
+	return dest, nil
+}
+
+// UncompressBytes uses zlib to uncompress a slice of bytes. It assumes dest is empty and is the exact size that it
+// needs to be.
+func (z *ZlibCompressor) UncompressBytes(src, dest []byte) ([]byte, error) {
+	reader := bytes.NewReader(src)
+	zlibReader, err := zlib.NewReader(reader)
+
+	if err != nil {
+		return dest, err
+	}
+	defer func() {
+		_ = zlibReader.Close()
+	}()
+
+	_, err = io.ReadFull(zlibReader, dest)
+	if err != nil {
+		return dest, err
+	}
+
+	return dest, nil
+}
+
+// CompressorID returns the ID for the zlib compressor.
+func (z *ZlibCompressor) CompressorID() wiremessage.CompressorID {
+	return wiremessage.CompressorZLib
+}
+
+// Name returns the name for the zlib compressor.
+func (z *ZlibCompressor) Name() string {
+	return "zlib"
+}
+
+// CreateSnappy creates a snappy compressor
+func CreateSnappy() Compressor {
+	return &SnappyCompressor{}
+}
+
+// CreateZlib creates a zlib compressor
+func CreateZlib(level int) (Compressor, error) {
+	if level < 0 {
+		level = wiremessage.DefaultZlibLevel
+	}
+
+	var compressBuf bytes.Buffer
+	zlibWriter, err := zlib.NewWriterLevel(&compressBuf, level)
+
+	if err != nil {
+		return &ZlibCompressor{}, err
+	}
+
+	return &ZlibCompressor{
+		level:      level,
+		zlibWriter: zlibWriter,
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go
new file mode 100644
index 0000000..4d8976d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go
@@ -0,0 +1,21 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+const defaultPort = "27017"
+
+// Addr is a network address. It can be either an IP address or a DNS name.
+type Addr string
+
+// Network is the network protcol for this address. In most cases this will be "tcp" or "unix".
+func (Addr) Network() string { return "" }
+
+// String is the canonical version of this address, e.g. localhost:27017, 1.2.3.4:27017, example.com:27017
+func (Addr) String() string { return "" }
+
+// Canonicalize creates a canonicalized address.
+func (Addr) Canonicalize() Addr { return Addr("") }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go
new file mode 100644
index 0000000..222f6f7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go
@@ -0,0 +1,28 @@
+package connection
+
+import "time"
+
+// commandMetadata contains metadata about a command sent to the server.
+type commandMetadata struct {
+	Name               string
+	Time               time.Time
+	Legacy             bool
+	FullCollectionName string
+}
+
+// createMetadata creates metadata for a command.
+func createMetadata(name string, legacy bool, fullCollName string) *commandMetadata {
+	return &commandMetadata{
+		Name:               name,
+		Time:               time.Now(),
+		Legacy:             legacy,
+		FullCollectionName: fullCollName,
+	}
+}
+
+// TimeDifference returns the difference between now and the time a command was sent in nanoseconds.
+func (cm *commandMetadata) TimeDifference() int64 {
+	t := time.Now()
+	duration := t.Sub(cm.Time)
+	return duration.Nanoseconds()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go
new file mode 100644
index 0000000..5e64037
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go
@@ -0,0 +1,851 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package connection contains the types for building and pooling connections that can speak the
+// MongoDB Wire Protocol. Since this low level library is meant to be used in the context of either
+// a driver or a server there are some extra identifiers on a connection so one can keep track of
+// what a connection is. This package purposefully hides the underlying network and abstracts the
+// writing to and reading from a connection to wireops.Op's. This package also provides types for
+// listening for and accepting Connections, as well as some types for handling connections and
+// proxying connections to another server.
+package connection
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+var globalClientConnectionID uint64
+var emptyDoc bson.Raw
+
+func nextClientConnectionID() uint64 {
+	return atomic.AddUint64(&globalClientConnectionID, 1)
+}
+
+// Connection is used to read and write wire protocol messages to a network.
+type Connection interface {
+	WriteWireMessage(context.Context, wiremessage.WireMessage) error
+	ReadWireMessage(context.Context) (wiremessage.WireMessage, error)
+	Close() error
+	Expired() bool
+	Alive() bool
+	ID() string
+}
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// DialerFunc is a type implemented by functions that can be used as a Dialer.
+type DialerFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// DialContext implements the Dialer interface.
+func (df DialerFunc) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return df(ctx, network, address)
+}
+
+// DefaultDialer is the Dialer implementation that is used by this package. Changing this
+// will also change the Dialer used for this package. This should only be changed why all
+// of the connections being made need to use a different Dialer. Most of the time, using a
+// WithDialer option is more appropriate than changing this variable.
+var DefaultDialer Dialer = &net.Dialer{}
+
+// Handshaker is the interface implemented by types that can perform a MongoDB
+// handshake over a provided ReadWriter. This is used during connection
+// initialization.
+type Handshaker interface {
+	Handshake(context.Context, address.Address, wiremessage.ReadWriter) (description.Server, error)
+}
+
+// HandshakerFunc is an adapter to allow the use of ordinary functions as
+// connection handshakers.
+type HandshakerFunc func(context.Context, address.Address, wiremessage.ReadWriter) (description.Server, error)
+
+// Handshake implements the Handshaker interface.
+func (hf HandshakerFunc) Handshake(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+	return hf(ctx, addr, rw)
+}
+
+type connection struct {
+	addr        address.Address
+	id          string
+	conn        net.Conn
+	compressBuf []byte                // buffer to compress messages
+	compressor  compressor.Compressor // use for compressing messages
+	// server can compress response with any compressor supported by driver
+	compressorMap    map[wiremessage.CompressorID]compressor.Compressor
+	commandMap       map[int64]*commandMetadata // map for monitoring commands sent to server
+	dead             bool
+	idleTimeout      time.Duration
+	idleDeadline     time.Time
+	lifetimeDeadline time.Time
+	cmdMonitor       *event.CommandMonitor
+	readTimeout      time.Duration
+	uncompressBuf    []byte // buffer to uncompress messages
+	writeTimeout     time.Duration
+	readBuf          []byte
+	writeBuf         []byte
+	wireMessageBuf   []byte // buffer to store uncompressed wire message before compressing
+}
+
+// New opens a connection to a given Addr
+//
+// The server description returned is nil if there was no handshaker provided.
+func New(ctx context.Context, addr address.Address, opts ...Option) (Connection, *description.Server, error) {
+	cfg, err := newConfig(opts...)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	nc, err := cfg.dialer.DialContext(ctx, addr.Network(), addr.String())
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if cfg.tlsConfig != nil {
+		tlsConfig := cfg.tlsConfig.Clone()
+		nc, err = configureTLS(ctx, nc, addr, tlsConfig)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	var lifetimeDeadline time.Time
+	if cfg.lifeTimeout > 0 {
+		lifetimeDeadline = time.Now().Add(cfg.lifeTimeout)
+	}
+
+	id := fmt.Sprintf("%s[-%d]", addr, nextClientConnectionID())
+	compressorMap := make(map[wiremessage.CompressorID]compressor.Compressor)
+
+	for _, comp := range cfg.compressors {
+		compressorMap[comp.CompressorID()] = comp
+	}
+
+	c := &connection{
+		id:               id,
+		conn:             nc,
+		compressBuf:      make([]byte, 256),
+		compressorMap:    compressorMap,
+		commandMap:       make(map[int64]*commandMetadata),
+		addr:             addr,
+		idleTimeout:      cfg.idleTimeout,
+		lifetimeDeadline: lifetimeDeadline,
+		readTimeout:      cfg.readTimeout,
+		writeTimeout:     cfg.writeTimeout,
+		readBuf:          make([]byte, 256),
+		uncompressBuf:    make([]byte, 256),
+		writeBuf:         make([]byte, 0, 256),
+		wireMessageBuf:   make([]byte, 256),
+	}
+
+	c.bumpIdleDeadline()
+
+	var desc *description.Server
+	if cfg.handshaker != nil {
+		d, err := cfg.handshaker.Handshake(ctx, c.addr, c)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if len(d.Compression) > 0 {
+		clientMethodLoop:
+			for _, comp := range cfg.compressors {
+				method := comp.Name()
+
+				for _, serverMethod := range d.Compression {
+					if method != serverMethod {
+						continue
+					}
+
+					c.compressor = comp // found matching compressor
+					break clientMethodLoop
+				}
+			}
+
+		}
+
+		desc = &d
+	}
+
+	c.cmdMonitor = cfg.cmdMonitor // attach the command monitor later to avoid monitoring auth
+	return c, desc, nil
+}
+
+func configureTLS(ctx context.Context, nc net.Conn, addr address.Address, config *TLSConfig) (net.Conn, error) {
+	if !config.InsecureSkipVerify {
+		hostname := addr.String()
+		colonPos := strings.LastIndex(hostname, ":")
+		if colonPos == -1 {
+			colonPos = len(hostname)
+		}
+
+		hostname = hostname[:colonPos]
+		config.ServerName = hostname
+	}
+
+	client := tls.Client(nc, config.Config)
+
+	errChan := make(chan error, 1)
+	go func() {
+		errChan <- client.Handshake()
+	}()
+
+	select {
+	case err := <-errChan:
+		if err != nil {
+			return nil, err
+		}
+	case <-ctx.Done():
+		return nil, errors.New("server connection cancelled/timeout during TLS handshake")
+	}
+	return client, nil
+}
+
+func (c *connection) Alive() bool {
+	return !c.dead
+}
+
+func (c *connection) Expired() bool {
+	now := time.Now()
+	if !c.idleDeadline.IsZero() && now.After(c.idleDeadline) {
+		return true
+	}
+
+	if !c.lifetimeDeadline.IsZero() && now.After(c.lifetimeDeadline) {
+		return true
+	}
+
+	return c.dead
+}
+
+func canCompress(cmd string) bool {
+	if cmd == "isMaster" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" ||
+		cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" {
+		return false
+	}
+	return true
+}
+
+func (c *connection) compressMessage(wm wiremessage.WireMessage) (wiremessage.WireMessage, error) {
+	var requestID int32
+	var responseTo int32
+	var origOpcode wiremessage.OpCode
+
+	switch converted := wm.(type) {
+	case wiremessage.Query:
+		firstElem, err := converted.Query.IndexErr(0)
+		if err != nil {
+			return wiremessage.Compressed{}, err
+		}
+
+		key := firstElem.Key()
+		if !canCompress(key) {
+			return wm, nil // return original message because this command can't be compressed
+		}
+		requestID = converted.MsgHeader.RequestID
+		origOpcode = wiremessage.OpQuery
+		responseTo = converted.MsgHeader.ResponseTo
+	case wiremessage.Msg:
+		firstElem, err := converted.Sections[0].(wiremessage.SectionBody).Document.IndexErr(0)
+		if err != nil {
+			return wiremessage.Compressed{}, err
+		}
+
+		key := firstElem.Key()
+		if !canCompress(key) {
+			return wm, nil
+		}
+
+		requestID = converted.MsgHeader.RequestID
+		origOpcode = wiremessage.OpMsg
+		responseTo = converted.MsgHeader.ResponseTo
+	}
+
+	// can compress
+	c.wireMessageBuf = c.wireMessageBuf[:0] // truncate
+	var err error
+	c.wireMessageBuf, err = wm.AppendWireMessage(c.wireMessageBuf)
+	if err != nil {
+		return wiremessage.Compressed{}, err
+	}
+
+	c.wireMessageBuf = c.wireMessageBuf[16:] // strip header
+	c.compressBuf = c.compressBuf[:0]
+	compressedBytes, err := c.compressor.CompressBytes(c.wireMessageBuf, c.compressBuf)
+	if err != nil {
+		return wiremessage.Compressed{}, err
+	}
+
+	compressedMessage := wiremessage.Compressed{
+		MsgHeader: wiremessage.Header{
+			// MessageLength and OpCode will be set when marshalling wire message by SetDefaults()
+			RequestID:  requestID,
+			ResponseTo: responseTo,
+		},
+		OriginalOpCode:    origOpcode,
+		UncompressedSize:  int32(len(c.wireMessageBuf)), // length of uncompressed message excluding MsgHeader
+		CompressorID:      wiremessage.CompressorID(c.compressor.CompressorID()),
+		CompressedMessage: compressedBytes,
+	}
+
+	return compressedMessage, nil
+}
+
+// returns []byte of uncompressed message with reconstructed header, original opcode, error
+func (c *connection) uncompressMessage(compressed wiremessage.Compressed) ([]byte, wiremessage.OpCode, error) {
+	// server doesn't guarantee the same compression method will be used each time so the CompressorID field must be
+	// used to find the correct method for uncompressing data
+	uncompressor := c.compressorMap[compressed.CompressorID]
+
+	// reset uncompressBuf
+	c.uncompressBuf = c.uncompressBuf[:0]
+	if int(compressed.UncompressedSize) > cap(c.uncompressBuf) {
+		c.uncompressBuf = make([]byte, 0, compressed.UncompressedSize)
+	}
+
+	uncompressedMessage, err := uncompressor.UncompressBytes(compressed.CompressedMessage, c.uncompressBuf)
+
+	if err != nil {
+		return nil, 0, err
+	}
+
+	origHeader := wiremessage.Header{
+		MessageLength: int32(len(uncompressedMessage)) + 16, // add 16 for original header
+		RequestID:     compressed.MsgHeader.RequestID,
+		ResponseTo:    compressed.MsgHeader.ResponseTo,
+	}
+
+	switch compressed.OriginalOpCode {
+	case wiremessage.OpReply:
+		origHeader.OpCode = wiremessage.OpReply
+	case wiremessage.OpMsg:
+		origHeader.OpCode = wiremessage.OpMsg
+	default:
+		return nil, 0, fmt.Errorf("opcode %s not implemented", compressed.OriginalOpCode)
+	}
+
+	var fullMessage []byte
+	fullMessage = origHeader.AppendHeader(fullMessage)
+	fullMessage = append(fullMessage, uncompressedMessage...)
+	return fullMessage, origHeader.OpCode, nil
+}
+
+func canMonitor(cmd string) bool {
+	if cmd == "authenticate" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "createUser" ||
+		cmd == "updateUser" || cmd == "copydbgetnonce" || cmd == "copydbsaslstart" || cmd == "copydb" {
+		return false
+	}
+
+	return true
+}
+
+func (c *connection) commandStartedEvent(ctx context.Context, wm wiremessage.WireMessage) error {
+	if c.cmdMonitor == nil || c.cmdMonitor.Started == nil {
+		return nil
+	}
+
+	startedEvent := &event.CommandStartedEvent{
+		ConnectionID: c.id,
+	}
+
+	var cmd bsonx.Doc
+	var err error
+	var legacy bool
+	var fullCollName string
+
+	var acknowledged bool
+	switch converted := wm.(type) {
+	case wiremessage.Query:
+		cmd, err = converted.CommandDocument()
+		if err != nil {
+			return err
+		}
+
+		acknowledged = converted.AcknowledgedWrite()
+		startedEvent.DatabaseName = converted.DatabaseName()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		legacy = converted.Legacy()
+		fullCollName = converted.FullCollectionName
+	case wiremessage.Msg:
+		cmd, err = converted.GetMainDocument()
+		if err != nil {
+			return err
+		}
+
+		acknowledged = converted.AcknowledgedWrite()
+		arr, identifier, err := converted.GetSequenceArray()
+		if err != nil {
+			return err
+		}
+		if arr != nil {
+			cmd = cmd.Copy() // make copy to avoid changing original command
+			cmd = append(cmd, bsonx.Elem{identifier, bsonx.Array(arr)})
+		}
+
+		dbVal, err := cmd.LookupErr("$db")
+		if err != nil {
+			return err
+		}
+
+		startedEvent.DatabaseName = dbVal.StringValue()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+	case wiremessage.GetMore:
+		cmd = converted.CommandDocument()
+		startedEvent.DatabaseName = converted.DatabaseName()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		acknowledged = true
+		legacy = true
+		fullCollName = converted.FullCollectionName
+	case wiremessage.KillCursors:
+		cmd = converted.CommandDocument()
+		startedEvent.DatabaseName = converted.DatabaseName
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		legacy = true
+	}
+
+	rawcmd, _ := cmd.MarshalBSON()
+	startedEvent.Command = rawcmd
+	startedEvent.CommandName = cmd[0].Key
+	if !canMonitor(startedEvent.CommandName) {
+		startedEvent.Command = emptyDoc
+	}
+
+	c.cmdMonitor.Started(ctx, startedEvent)
+
+	if !acknowledged {
+		if c.cmdMonitor.Succeeded == nil {
+			return nil
+		}
+
+		// unack writes must provide a CommandSucceededEvent with an { ok: 1 } reply
+		finishedEvent := event.CommandFinishedEvent{
+			DurationNanos: 0,
+			CommandName:   startedEvent.CommandName,
+			RequestID:     startedEvent.RequestID,
+			ConnectionID:  c.id,
+		}
+
+		c.cmdMonitor.Succeeded(ctx, &event.CommandSucceededEvent{
+			CommandFinishedEvent: finishedEvent,
+			Reply:                bsoncore.BuildDocument(nil, bsoncore.AppendInt32Element(nil, "ok", 1)),
+		})
+
+		return nil
+	}
+
+	c.commandMap[startedEvent.RequestID] = createMetadata(startedEvent.CommandName, legacy, fullCollName)
+	return nil
+}
+
+func processReply(reply bsonx.Doc) (bool, string) {
+	var success bool
+	var errmsg string
+	var errCode int32
+
+	for _, elem := range reply {
+		switch elem.Key {
+		case "ok":
+			switch elem.Value.Type() {
+			case bsontype.Int32:
+				if elem.Value.Int32() == 1 {
+					success = true
+				}
+			case bsontype.Int64:
+				if elem.Value.Int64() == 1 {
+					success = true
+				}
+			case bsontype.Double:
+				if elem.Value.Double() == 1 {
+					success = true
+				}
+			}
+		case "errmsg":
+			if str, ok := elem.Value.StringValueOK(); ok {
+				errmsg = str
+			}
+		case "code":
+			if c, ok := elem.Value.Int32OK(); ok {
+				errCode = c
+			}
+		}
+	}
+
+	if success {
+		return true, ""
+	}
+
+	fullErrMsg := fmt.Sprintf("Error code %d: %s", errCode, errmsg)
+	return false, fullErrMsg
+}
+
+func (c *connection) commandFinishedEvent(ctx context.Context, wm wiremessage.WireMessage) error {
+	if c.cmdMonitor == nil {
+		return nil
+	}
+
+	var reply bsonx.Doc
+	var requestID int64
+	var err error
+
+	switch converted := wm.(type) {
+	case wiremessage.Reply:
+		requestID = int64(converted.MsgHeader.ResponseTo)
+	case wiremessage.Msg:
+		requestID = int64(converted.MsgHeader.ResponseTo)
+	}
+	cmdMetadata := c.commandMap[requestID]
+	delete(c.commandMap, requestID)
+
+	switch converted := wm.(type) {
+	case wiremessage.Reply:
+		if cmdMetadata.Legacy {
+			reply, err = converted.GetMainLegacyDocument(cmdMetadata.FullCollectionName)
+		} else {
+			reply, err = converted.GetMainDocument()
+		}
+	case wiremessage.Msg:
+		reply, err = converted.GetMainDocument()
+	}
+	if err != nil {
+		return err
+	}
+
+	success, errmsg := processReply(reply)
+
+	if (success && c.cmdMonitor.Succeeded == nil) || (!success && c.cmdMonitor.Failed == nil) {
+		return nil
+	}
+
+	finishedEvent := event.CommandFinishedEvent{
+		DurationNanos: cmdMetadata.TimeDifference(),
+		CommandName:   cmdMetadata.Name,
+		RequestID:     requestID,
+		ConnectionID:  c.id,
+	}
+
+	if success {
+		if !canMonitor(finishedEvent.CommandName) {
+			successEvent := &event.CommandSucceededEvent{
+				Reply:                emptyDoc,
+				CommandFinishedEvent: finishedEvent,
+			}
+			c.cmdMonitor.Succeeded(ctx, successEvent)
+			return nil
+		}
+
+		// if response has type 1 document sequence, the sequence must be included as a BSON array in the event's reply.
+		if opmsg, ok := wm.(wiremessage.Msg); ok {
+			arr, identifier, err := opmsg.GetSequenceArray()
+			if err != nil {
+				return err
+			}
+			if arr != nil {
+				reply = reply.Copy() // make copy to avoid changing original command
+				reply = append(reply, bsonx.Elem{identifier, bsonx.Array(arr)})
+			}
+		}
+
+		replyraw, _ := reply.MarshalBSON()
+		successEvent := &event.CommandSucceededEvent{
+			Reply:                replyraw,
+			CommandFinishedEvent: finishedEvent,
+		}
+
+		c.cmdMonitor.Succeeded(ctx, successEvent)
+		return nil
+	}
+
+	failureEvent := &event.CommandFailedEvent{
+		Failure:              errmsg,
+		CommandFinishedEvent: finishedEvent,
+	}
+
+	c.cmdMonitor.Failed(ctx, failureEvent)
+	return nil
+}
+
+func (c *connection) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	var err error
+	if c.dead {
+		return Error{
+			ConnectionID: c.id,
+			message:      "connection is dead",
+		}
+	}
+
+	select {
+	case <-ctx.Done():
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to write",
+		}
+	default:
+	}
+
+	deadline := time.Time{}
+	if c.writeTimeout != 0 {
+		deadline = time.Now().Add(c.writeTimeout)
+	}
+
+	if dl, ok := ctx.Deadline(); ok && (deadline.IsZero() || dl.Before(deadline)) {
+		deadline = dl
+	}
+
+	if err := c.conn.SetWriteDeadline(deadline); err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "failed to set write deadline",
+		}
+	}
+
+	// Truncate the write buffer
+	c.writeBuf = c.writeBuf[:0]
+
+	messageToWrite := wm
+	// Compress if possible
+	if c.compressor != nil {
+		compressed, err := c.compressMessage(wm)
+		if err != nil {
+			return Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to compress wire message",
+			}
+		}
+		messageToWrite = compressed
+	}
+
+	c.writeBuf, err = messageToWrite.AppendWireMessage(c.writeBuf)
+	if err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to encode wire message",
+		}
+	}
+
+	_, err = c.conn.Write(c.writeBuf)
+	if err != nil {
+		c.Close()
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to write wire message to network",
+		}
+	}
+
+	c.bumpIdleDeadline()
+	err = c.commandStartedEvent(ctx, wm)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *connection) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	if c.dead {
+		return nil, Error{
+			ConnectionID: c.id,
+			message:      "connection is dead",
+		}
+	}
+
+	select {
+	case <-ctx.Done():
+		// We close the connection because we don't know if there
+		// is an unread message on the wire.
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to read",
+		}
+	default:
+	}
+
+	deadline := time.Time{}
+	if c.readTimeout != 0 {
+		deadline = time.Now().Add(c.readTimeout)
+	}
+
+	if ctxDL, ok := ctx.Deadline(); ok && (deadline.IsZero() || ctxDL.Before(deadline)) {
+		deadline = ctxDL
+	}
+
+	if err := c.conn.SetReadDeadline(deadline); err != nil {
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to set read deadline",
+		}
+	}
+
+	var sizeBuf [4]byte
+	_, err := io.ReadFull(c.conn, sizeBuf[:])
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to decode message length",
+		}
+	}
+
+	size := readInt32(sizeBuf[:], 0)
+
+	// Isn't the best reuse, but resizing a []byte to be larger
+	// is difficult.
+	if cap(c.readBuf) > int(size) {
+		c.readBuf = c.readBuf[:size]
+	} else {
+		c.readBuf = make([]byte, size)
+	}
+
+	c.readBuf[0], c.readBuf[1], c.readBuf[2], c.readBuf[3] = sizeBuf[0], sizeBuf[1], sizeBuf[2], sizeBuf[3]
+
+	_, err = io.ReadFull(c.conn, c.readBuf[4:])
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to read full message",
+		}
+	}
+
+	hdr, err := wiremessage.ReadHeader(c.readBuf, 0)
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to decode header",
+		}
+	}
+
+	messageToDecode := c.readBuf
+	opcodeToCheck := hdr.OpCode
+
+	if hdr.OpCode == wiremessage.OpCompressed {
+		var compressed wiremessage.Compressed
+		err := compressed.UnmarshalWireMessage(c.readBuf)
+		if err != nil {
+			defer c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_COMPRESSED",
+			}
+		}
+
+		uncompressed, origOpcode, err := c.uncompressMessage(compressed)
+		if err != nil {
+			defer c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to uncompress message",
+			}
+		}
+		messageToDecode = uncompressed
+		opcodeToCheck = origOpcode
+	}
+
+	var wm wiremessage.WireMessage
+	switch opcodeToCheck {
+	case wiremessage.OpReply:
+		var r wiremessage.Reply
+		err := r.UnmarshalWireMessage(messageToDecode)
+		if err != nil {
+			c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_REPLY",
+			}
+		}
+		wm = r
+	case wiremessage.OpMsg:
+		var reply wiremessage.Msg
+		err := reply.UnmarshalWireMessage(messageToDecode)
+		if err != nil {
+			c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_MSG",
+			}
+		}
+		wm = reply
+	default:
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			message:      fmt.Sprintf("opcode %s not implemented", hdr.OpCode),
+		}
+	}
+
+	c.bumpIdleDeadline()
+	err = c.commandFinishedEvent(ctx, wm)
+	if err != nil {
+		return nil, err // TODO: do we care if monitoring fails?
+	}
+
+	return wm, nil
+}
+
+func (c *connection) bumpIdleDeadline() {
+	if c.idleTimeout > 0 {
+		c.idleDeadline = time.Now().Add(c.idleTimeout)
+	}
+}
+
+func (c *connection) Close() error {
+	c.dead = true
+	err := c.conn.Close()
+	if err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "failed to close net.Conn",
+		}
+	}
+
+	return nil
+}
+
+func (c *connection) ID() string {
+	return c.id
+}
+
+func (c *connection) initialize(ctx context.Context, appName string) error {
+	return nil
+}
+
+func readInt32(b []byte, pos int32) int32 {
+	return (int32(b[pos+0])) | (int32(b[pos+1]) << 8) | (int32(b[pos+2]) << 16) | (int32(b[pos+3]) << 24)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go
new file mode 100644
index 0000000..eebca94
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go
@@ -0,0 +1,41 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "fmt"
+
+// Error represents a connection error.
+type Error struct {
+	ConnectionID string
+	Wrapped      error
+
+	message string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("connection(%s) %s: %s", e.ConnectionID, e.message, e.Wrapped.Error())
+	}
+	return fmt.Sprintf("connection(%s) %s", e.ConnectionID, e.message)
+}
+
+// NetworkError represents an error that occurred while reading from or writing
+// to a network socket.
+type NetworkError struct {
+	ConnectionID string
+	Wrapped      error
+}
+
+func (ne NetworkError) Error() string {
+	return fmt.Sprintf("connection(%s): %s", ne.ConnectionID, ne.Wrapped.Error())
+}
+
+// PoolError is an error returned from a Pool method.
+type PoolError string
+
+func (pe PoolError) Error() string { return string(pe) }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go
new file mode 100644
index 0000000..554e38d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go
@@ -0,0 +1,13 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build !go1.12
+
+package connection
+
+import "time"
+
+const tcpKeepalive = 300 * time.Second
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go
new file mode 100644
index 0000000..9eeeb69
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go
@@ -0,0 +1,11 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build go1.12
+
+package connection
+
+const tcpKeepalive = 0 // will be set by default on Go 1.12 and higher
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go
new file mode 100644
index 0000000..76f4f7d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go
@@ -0,0 +1,27 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+// Listener is a generic mongodb network protocol listener. It can return connections
+// that speak the mongodb wire protocol.
+//
+// Multiple goroutines may invoke methods on a Listener simultaneously.
+//
+// TODO(GODRIVER-270): Implement this.
+type Listener interface {
+	// Accept waits for and returns the next Connection to the listener.
+	Accept() (Connection, error)
+
+	// Close closes the listener.
+	Close() error
+
+	// Addr returns the listener's network address.
+	Addr() Addr
+}
+
+// Listen creates a new listener on the provided network and address.
+func Listen(network, address string) (Listener, error) { return nil, nil }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go
new file mode 100644
index 0000000..cd19931
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"net"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+)
+
+type config struct {
+	appName        string
+	connectTimeout time.Duration
+	dialer         Dialer
+	handshaker     Handshaker
+	idleTimeout    time.Duration
+	lifeTimeout    time.Duration
+	cmdMonitor     *event.CommandMonitor
+	readTimeout    time.Duration
+	writeTimeout   time.Duration
+	tlsConfig      *TLSConfig
+	compressors    []compressor.Compressor
+}
+
+func newConfig(opts ...Option) (*config, error) {
+	cfg := &config{
+		connectTimeout: 30 * time.Second,
+		dialer:         nil,
+		idleTimeout:    10 * time.Minute,
+		lifeTimeout:    30 * time.Minute,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if cfg.dialer == nil {
+		cfg.dialer = &net.Dialer{
+			KeepAlive: tcpKeepalive,
+			Timeout:   cfg.connectTimeout,
+		}
+	}
+
+	return cfg, nil
+}
+
+// Option is used to configure a connection.
+type Option func(*config) error
+
+// WithAppName sets the application name which gets sent to MongoDB when it
+// first connects.
+func WithAppName(fn func(string) string) Option {
+	return func(c *config) error {
+		c.appName = fn(c.appName)
+		return nil
+	}
+}
+
+// WithCompressors sets the compressors that can be used for communication.
+func WithCompressors(fn func([]compressor.Compressor) []compressor.Compressor) Option {
+	return func(c *config) error {
+		c.compressors = fn(c.compressors)
+		return nil
+	}
+}
+
+// WithConnectTimeout configures the maximum amount of time a dial will wait for a
+// connect to complete. The default is 30 seconds.
+func WithConnectTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.connectTimeout = fn(c.connectTimeout)
+		return nil
+	}
+}
+
+// WithDialer configures the Dialer to use when making a new connection to MongoDB.
+func WithDialer(fn func(Dialer) Dialer) Option {
+	return func(c *config) error {
+		c.dialer = fn(c.dialer)
+		return nil
+	}
+}
+
+// WithHandshaker configures the Handshaker that wll be used to initialize newly
+// dialed connections.
+func WithHandshaker(fn func(Handshaker) Handshaker) Option {
+	return func(c *config) error {
+		c.handshaker = fn(c.handshaker)
+		return nil
+	}
+}
+
+// WithIdleTimeout configures the maximum idle time to allow for a connection.
+func WithIdleTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.idleTimeout = fn(c.idleTimeout)
+		return nil
+	}
+}
+
+// WithLifeTimeout configures the maximum life of a connection.
+func WithLifeTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.lifeTimeout = fn(c.lifeTimeout)
+		return nil
+	}
+}
+
+// WithReadTimeout configures the maximum read time for a connection.
+func WithReadTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.readTimeout = fn(c.readTimeout)
+		return nil
+	}
+}
+
+// WithWriteTimeout configures the maximum write time for a connection.
+func WithWriteTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.writeTimeout = fn(c.writeTimeout)
+		return nil
+	}
+}
+
+// WithTLSConfig configures the TLS options for a connection.
+func WithTLSConfig(fn func(*TLSConfig) *TLSConfig) Option {
+	return func(c *config) error {
+		c.tlsConfig = fn(c.tlsConfig)
+		return nil
+	}
+}
+
+// WithMonitor configures a event for command monitoring.
+func WithMonitor(fn func(*event.CommandMonitor) *event.CommandMonitor) Option {
+	return func(c *config) error {
+		c.cmdMonitor = fn(c.cmdMonitor)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go
new file mode 100644
index 0000000..3a25ad3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go
@@ -0,0 +1,310 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"golang.org/x/sync/semaphore"
+)
+
+// ErrPoolClosed is returned from an attempt to use a closed pool.
+var ErrPoolClosed = PoolError("pool is closed")
+
+// ErrSizeLargerThanCapacity is returned from an attempt to create a pool with a size
+// larger than the capacity.
+var ErrSizeLargerThanCapacity = PoolError("size is larger than capacity")
+
+// ErrPoolConnected is returned from an attempt to connect an already connected pool
+var ErrPoolConnected = PoolError("pool is connected")
+
+// ErrPoolDisconnected is returned from an attempt to disconnect an already disconnected
+// or disconnecting pool.
+var ErrPoolDisconnected = PoolError("pool is disconnected or disconnecting")
+
+// ErrConnectionClosed is returned from an attempt to use an already closed connection.
+var ErrConnectionClosed = Error{ConnectionID: "<closed>", message: "connection is closed"}
+
+// These constants represent the connection states of a pool.
+const (
+	disconnected int32 = iota
+	disconnecting
+	connected
+)
+
+// Pool is used to pool Connections to a server.
+type Pool interface {
+	// Get must return a nil *description.Server if the returned connection is
+	// not a newly dialed connection.
+	Get(context.Context) (Connection, *description.Server, error)
+	// Connect handles the initialization of a Pool and allow Connections to be
+	// retrieved and pooled. Implementations must return an error if Connect is
+	// called more than once before calling Disconnect.
+	Connect(context.Context) error
+	// Disconnect closest connections managed by this Pool. Implementations must
+	// either wait until all of the connections in use have been returned and
+	// closed or the context expires before returning. If the context expires
+	// via cancellation, deadline, timeout, or some other manner, implementations
+	// must close the in use connections. If this method returns with no errors,
+	// all connections managed by this pool must be closed. Calling Disconnect
+	// multiple times after a single Connect call must result in an error.
+	Disconnect(context.Context) error
+	Drain() error
+}
+
+type pool struct {
+	address    address.Address
+	opts       []Option
+	conns      chan *pooledConnection
+	generation uint64
+	sem        *semaphore.Weighted
+	connected  int32
+	nextid     uint64
+	capacity   uint64
+	inflight   map[uint64]*pooledConnection
+
+	sync.Mutex
+}
+
+// NewPool creates a new pool that will hold size number of idle connections
+// and will create a max of capacity connections. It will use the provided
+// options.
+func NewPool(addr address.Address, size, capacity uint64, opts ...Option) (Pool, error) {
+	if size > capacity {
+		return nil, ErrSizeLargerThanCapacity
+	}
+	p := &pool{
+		address:    addr,
+		conns:      make(chan *pooledConnection, size),
+		generation: 0,
+		sem:        semaphore.NewWeighted(int64(capacity)),
+		connected:  disconnected,
+		capacity:   capacity,
+		inflight:   make(map[uint64]*pooledConnection),
+		opts:       opts,
+	}
+	return p, nil
+}
+
+func (p *pool) Drain() error {
+	atomic.AddUint64(&p.generation, 1)
+	return nil
+}
+
+func (p *pool) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&p.connected, disconnected, connected) {
+		return ErrPoolConnected
+	}
+	atomic.AddUint64(&p.generation, 1)
+	return nil
+}
+
+func (p *pool) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&p.connected, connected, disconnecting) {
+		return ErrPoolDisconnected
+	}
+
+	// We first clear out the idle connections, then we attempt to acquire the entire capacity
+	// semaphore. If the context is either cancelled, the deadline expires, or there is a timeout
+	// the semaphore acquire method will return an error. If that happens, we will aggressively
+	// close the remaining open connections. If we were able to successfully acquire the semaphore,
+	// then all of the in flight connections have been closed and we release the semaphore.
+loop:
+	for {
+		select {
+		case pc := <-p.conns:
+			// This error would be overwritten by the semaphore
+			_ = p.closeConnection(pc)
+		default:
+			break loop
+		}
+	}
+	err := p.sem.Acquire(ctx, int64(p.capacity))
+	if err != nil {
+		p.Lock()
+		// We copy the remaining connections to close into a slice, then
+		// iterate the slice to do the closing. This allows us to use a single
+		// function to actually clean up and close connections at the expense of
+		// a double iteration in the worst case.
+		toClose := make([]*pooledConnection, 0, len(p.inflight))
+		for _, pc := range p.inflight {
+			toClose = append(toClose, pc)
+		}
+		p.Unlock()
+		for _, pc := range toClose {
+			_ = pc.Close()
+		}
+	} else {
+		p.sem.Release(int64(p.capacity))
+	}
+	atomic.StoreInt32(&p.connected, disconnected)
+	return nil
+}
+
+func (p *pool) Get(ctx context.Context) (Connection, *description.Server, error) {
+	if atomic.LoadInt32(&p.connected) != connected {
+		return nil, nil, ErrPoolClosed
+	}
+
+	err := p.sem.Acquire(ctx, 1)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return p.get(ctx)
+}
+
+func (p *pool) get(ctx context.Context) (Connection, *description.Server, error) {
+	g := atomic.LoadUint64(&p.generation)
+	select {
+	case c := <-p.conns:
+		if c.Expired() {
+			go p.closeConnection(c)
+			return p.get(ctx)
+		}
+
+		return &acquired{Connection: c, sem: p.sem}, nil, nil
+	case <-ctx.Done():
+		p.sem.Release(1)
+		return nil, nil, ctx.Err()
+	default:
+		c, desc, err := New(ctx, p.address, p.opts...)
+		if err != nil {
+			p.sem.Release(1)
+			return nil, nil, err
+		}
+
+		pc := &pooledConnection{
+			Connection: c,
+			p:          p,
+			generation: g,
+			id:         atomic.AddUint64(&p.nextid, 1),
+		}
+		p.Lock()
+		if atomic.LoadInt32(&p.connected) != connected {
+			p.Unlock()
+			p.sem.Release(1)
+			p.closeConnection(pc)
+			return nil, nil, ErrPoolClosed
+		}
+		defer p.Unlock()
+		p.inflight[pc.id] = pc
+		return &acquired{Connection: pc, sem: p.sem}, desc, nil
+	}
+}
+
+func (p *pool) closeConnection(pc *pooledConnection) error {
+	if !atomic.CompareAndSwapInt32(&pc.closed, 0, 1) {
+		return nil
+	}
+	p.Lock()
+	delete(p.inflight, pc.id)
+	p.Unlock()
+	return pc.Connection.Close()
+}
+
+func (p *pool) returnConnection(pc *pooledConnection) error {
+	if atomic.LoadInt32(&p.connected) != connected || pc.Expired() {
+		return p.closeConnection(pc)
+	}
+
+	select {
+	case p.conns <- pc:
+		return nil
+	default:
+		return p.closeConnection(pc)
+	}
+}
+
+func (p *pool) isExpired(generation uint64) bool {
+	return generation < atomic.LoadUint64(&p.generation)
+}
+
+type pooledConnection struct {
+	Connection
+	p          *pool
+	generation uint64
+	id         uint64
+	closed     int32
+}
+
+func (pc *pooledConnection) Close() error {
+	return pc.p.returnConnection(pc)
+}
+
+func (pc *pooledConnection) Expired() bool {
+	return pc.Connection.Expired() || pc.p.isExpired(pc.generation)
+}
+
+type acquired struct {
+	Connection
+
+	sem *semaphore.Weighted
+	sync.Mutex
+}
+
+func (a *acquired) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return ErrConnectionClosed
+	}
+	return a.Connection.WriteWireMessage(ctx, wm)
+}
+
+func (a *acquired) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return nil, ErrConnectionClosed
+	}
+	return a.Connection.ReadWireMessage(ctx)
+}
+
+func (a *acquired) Close() error {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return nil
+	}
+	err := a.Connection.Close()
+	a.sem.Release(1)
+	a.Connection = nil
+	return err
+}
+
+func (a *acquired) Expired() bool {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return true
+	}
+	return a.Connection.Expired()
+}
+
+func (a *acquired) Alive() bool {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return false
+	}
+	return a.Connection.Alive()
+}
+
+func (a *acquired) ID() string {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return "<closed>"
+	}
+	return a.Connection.ID()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go
new file mode 100644
index 0000000..62e18d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go
@@ -0,0 +1,26 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+
+// Proxy implements a MongoDB proxy. It will use the given pool to connect to a
+// MongoDB server and proxy the traffic between connections it is given and the
+// server. It will pass each of the wireops it reads from the handled connection
+// to a Processor. If an error is returned from the processor, the wireop will
+// not be forwarded onto the server. If there is not an error the returned message
+// will be passed onto the server. If both the return message and the error are nil,
+// the original wiremessage will be passed onto the server.
+//
+// TODO(GODRIVER-268): Implement this.
+type Proxy struct {
+	Processor wiremessage.Transformer
+	Pool      Pool
+}
+
+// HandleConnection implements the Handler interface.
+func (*Proxy) HandleConnection(Connection) { return }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go
new file mode 100644
index 0000000..033d0d7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "context"
+
+// Server is used to handle incoming Connections. It handles the boilerplate of accepting a
+// Connection and cleaning it up after running a Handler. This also makes it easier to build
+// higher level processors, like proxies, by handling the life cycle of the underlying
+// connection.
+//
+// TODO(GODRIVER-269): Implement this.
+type Server struct {
+	Addr    Addr
+	Handler Handler
+}
+
+// ListenAndServe listens on the network address srv.Addr and calls Serve to
+// handle requests on incoming connections. If srv.Addr is blank, "localhost:27017"
+// is used.
+func (*Server) ListenAndServe() error { return nil }
+
+// Serve accepts incoming connections on the Listener l, creating a new service
+// goroutine for each. The service goroutines call srv.Handler and do not processing
+// beforehand. When srv.Handler returns, the connection is closed.
+func (*Server) Serve(Listener) error { return nil }
+
+// Shutdown gracefully shuts down the server by closing the active listeners. Shutdown
+// does not handle or wait for all open connections to close and return before returning.
+func (*Server) Shutdown(context.Context) error { return nil }
+
+// Handler handles an individual Connection. Returning signals that the Connection
+// is no longer needed and can be closed.
+type Handler interface {
+	HandleConnection(Connection)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go
new file mode 100644
index 0000000..0536418
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go
@@ -0,0 +1,237 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/hex"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"strings"
+)
+
+// TLSConfig contains options for configuring a TLS connection to the server.
+type TLSConfig struct {
+	*tls.Config
+	clientCertPass func() string
+}
+
+// NewTLSConfig creates a new TLSConfig.
+func NewTLSConfig() *TLSConfig {
+	cfg := &TLSConfig{}
+	cfg.Config = new(tls.Config)
+
+	return cfg
+}
+
+// SetClientCertDecryptPassword sets a function to retrieve the decryption password
+// necessary to read a certificate. This is a function instead of a string to
+// provide greater flexibility when deciding how to retrieve and store the password.
+func (c *TLSConfig) SetClientCertDecryptPassword(f func() string) {
+	c.clientCertPass = f
+}
+
+// SetInsecure sets whether the client should verify the server's certificate
+// chain and hostnames.
+func (c *TLSConfig) SetInsecure(allow bool) {
+	c.InsecureSkipVerify = allow
+}
+
+// AddCACertFromFile adds a root CA certificate to the configuration given a path
+// to the containing file.
+func (c *TLSConfig) AddCACertFromFile(file string) error {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		return err
+	}
+
+	certBytes, err := loadCert(data)
+	if err != nil {
+		return err
+	}
+
+	cert, err := x509.ParseCertificate(certBytes)
+	if err != nil {
+		return err
+	}
+
+	if c.RootCAs == nil {
+		c.RootCAs = x509.NewCertPool()
+	}
+
+	c.RootCAs.AddCert(cert)
+
+	return nil
+}
+
+// AddClientCertFromFile adds a client certificate to the configuration given a path to the
+// containing file and returns the certificate's subject name.
+func (c *TLSConfig) AddClientCertFromFile(clientFile string) (string, error) {
+	data, err := ioutil.ReadFile(clientFile)
+	if err != nil {
+		return "", err
+	}
+
+	var currentBlock *pem.Block
+	var certBlock, certDecodedBlock, keyBlock []byte
+
+	remaining := data
+	start := 0
+	for {
+		currentBlock, remaining = pem.Decode(remaining)
+		if currentBlock == nil {
+			break
+		}
+
+		if currentBlock.Type == "CERTIFICATE" {
+			certBlock = data[start : len(data)-len(remaining)]
+			certDecodedBlock = currentBlock.Bytes
+			start += len(certBlock)
+		} else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") {
+			if c.clientCertPass != nil && x509.IsEncryptedPEMBlock(currentBlock) {
+				var encoded bytes.Buffer
+				buf, err := x509.DecryptPEMBlock(currentBlock, []byte(c.clientCertPass()))
+				if err != nil {
+					return "", err
+				}
+
+				pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: buf})
+				keyBlock = encoded.Bytes()
+				start = len(data) - len(remaining)
+			} else {
+				keyBlock = data[start : len(data)-len(remaining)]
+				start += len(keyBlock)
+			}
+		}
+	}
+	if len(certBlock) == 0 {
+		return "", fmt.Errorf("failed to find CERTIFICATE")
+	}
+	if len(keyBlock) == 0 {
+		return "", fmt.Errorf("failed to find PRIVATE KEY")
+	}
+
+	cert, err := tls.X509KeyPair(certBlock, keyBlock)
+	if err != nil {
+		return "", err
+	}
+
+	c.Certificates = append(c.Certificates, cert)
+
+	// The documentation for the tls.X509KeyPair indicates that the Leaf certificate is not
+	// retained.
+	crt, err := x509.ParseCertificate(certDecodedBlock)
+	if err != nil {
+		return "", err
+	}
+
+	return x509CertSubject(crt), nil
+}
+
+func loadCert(data []byte) ([]byte, error) {
+	var certBlock *pem.Block
+
+	for certBlock == nil {
+		if data == nil || len(data) == 0 {
+			return nil, errors.New(".pem file must have both a CERTIFICATE and an RSA PRIVATE KEY section")
+		}
+
+		block, rest := pem.Decode(data)
+		if block == nil {
+			return nil, errors.New("invalid .pem file")
+		}
+
+		switch block.Type {
+		case "CERTIFICATE":
+			if certBlock != nil {
+				return nil, errors.New("multiple CERTIFICATE sections in .pem file")
+			}
+
+			certBlock = block
+		}
+
+		data = rest
+	}
+
+	return certBlock.Bytes, nil
+}
+
+// Because the functionality to convert a pkix.Name to a string wasn't added until Go 1.10, we
+// need to copy the implementation (along with the attributeTypeNames map below).
+func x509CertSubject(cert *x509.Certificate) string {
+	r := cert.Subject.ToRDNSequence()
+
+	s := ""
+	for i := 0; i < len(r); i++ {
+		rdn := r[len(r)-1-i]
+		if i > 0 {
+			s += ","
+		}
+		for j, tv := range rdn {
+			if j > 0 {
+				s += "+"
+			}
+
+			oidString := tv.Type.String()
+			typeName, ok := attributeTypeNames[oidString]
+			if !ok {
+				derBytes, err := asn1.Marshal(tv.Value)
+				if err == nil {
+					s += oidString + "=#" + hex.EncodeToString(derBytes)
+					continue // No value escaping necessary.
+				}
+
+				typeName = oidString
+			}
+
+			valueString := fmt.Sprint(tv.Value)
+			escaped := make([]rune, 0, len(valueString))
+
+			for k, c := range valueString {
+				escape := false
+
+				switch c {
+				case ',', '+', '"', '\\', '<', '>', ';':
+					escape = true
+
+				case ' ':
+					escape = k == 0 || k == len(valueString)-1
+
+				case '#':
+					escape = k == 0
+				}
+
+				if escape {
+					escaped = append(escaped, '\\', c)
+				} else {
+					escaped = append(escaped, c)
+				}
+			}
+
+			s += typeName + "=" + string(escaped)
+		}
+	}
+
+	return s
+}
+
+var attributeTypeNames = map[string]string{
+	"2.5.4.6":  "C",
+	"2.5.4.10": "O",
+	"2.5.4.11": "OU",
+	"2.5.4.3":  "CN",
+	"2.5.4.5":  "SERIALNUMBER",
+	"2.5.4.7":  "L",
+	"2.5.4.8":  "ST",
+	"2.5.4.9":  "STREET",
+	"2.5.4.17": "POSTALCODE",
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go
new file mode 100644
index 0000000..a753dfd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "crypto/tls"
+
+// Clone returns a shallow clone of c. It is safe to clone a Config that is being
+// used concurrently by a TLS client or server.
+func (c *TLSConfig) Clone() *TLSConfig {
+	cfg := cloneconfig(c.Config)
+	return &TLSConfig{cfg, c.clientCertPass}
+}
+
+func cloneconfig(c *tls.Config) *tls.Config {
+	return &tls.Config{
+		Rand:                        c.Rand,
+		Time:                        c.Time,
+		Certificates:                c.Certificates,
+		NameToCertificate:           c.NameToCertificate,
+		GetCertificate:              c.GetCertificate,
+		RootCAs:                     c.RootCAs,
+		NextProtos:                  c.NextProtos,
+		ServerName:                  c.ServerName,
+		ClientAuth:                  c.ClientAuth,
+		ClientCAs:                   c.ClientCAs,
+		InsecureSkipVerify:          c.InsecureSkipVerify,
+		CipherSuites:                c.CipherSuites,
+		PreferServerCipherSuites:    c.PreferServerCipherSuites,
+		SessionTicketsDisabled:      c.SessionTicketsDisabled,
+		SessionTicketKey:            c.SessionTicketKey,
+		ClientSessionCache:          c.ClientSessionCache,
+		MinVersion:                  c.MinVersion,
+		MaxVersion:                  c.MaxVersion,
+		CurvePreferences:            c.CurvePreferences,
+		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+		Renegotiation:               c.Renegotiation,
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go
new file mode 100644
index 0000000..263a84c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go
@@ -0,0 +1,773 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connstring
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/internal"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Parse parses the provided uri and returns a URI object.
+func Parse(s string) (ConnString, error) {
+	var p parser
+	err := p.parse(s)
+	if err != nil {
+		err = internal.WrapErrorf(err, "error parsing uri (%s)", s)
+	}
+	return p.ConnString, err
+}
+
+// ConnString represents a connection string to mongodb.
+type ConnString struct {
+	Original                           string
+	AppName                            string
+	AuthMechanism                      string
+	AuthMechanismProperties            map[string]string
+	AuthSource                         string
+	Compressors                        []string
+	Connect                            ConnectMode
+	ConnectSet                         bool
+	ConnectTimeout                     time.Duration
+	ConnectTimeoutSet                  bool
+	Database                           string
+	HeartbeatInterval                  time.Duration
+	HeartbeatIntervalSet               bool
+	Hosts                              []string
+	J                                  bool
+	JSet                               bool
+	LocalThreshold                     time.Duration
+	LocalThresholdSet                  bool
+	MaxConnIdleTime                    time.Duration
+	MaxConnIdleTimeSet                 bool
+	MaxPoolSize                        uint16
+	MaxPoolSizeSet                     bool
+	Password                           string
+	PasswordSet                        bool
+	ReadConcernLevel                   string
+	ReadPreference                     string
+	ReadPreferenceTagSets              []map[string]string
+	RetryWrites                        bool
+	RetryWritesSet                     bool
+	MaxStaleness                       time.Duration
+	MaxStalenessSet                    bool
+	ReplicaSet                         string
+	ServerSelectionTimeout             time.Duration
+	ServerSelectionTimeoutSet          bool
+	SocketTimeout                      time.Duration
+	SocketTimeoutSet                   bool
+	SSL                                bool
+	SSLSet                             bool
+	SSLClientCertificateKeyFile        string
+	SSLClientCertificateKeyFileSet     bool
+	SSLClientCertificateKeyPassword    func() string
+	SSLClientCertificateKeyPasswordSet bool
+	SSLInsecure                        bool
+	SSLInsecureSet                     bool
+	SSLCaFile                          string
+	SSLCaFileSet                       bool
+	WString                            string
+	WNumber                            int
+	WNumberSet                         bool
+	Username                           string
+	ZlibLevel                          int
+
+	WTimeout              time.Duration
+	WTimeoutSet           bool
+	WTimeoutSetFromOption bool
+
+	Options        map[string][]string
+	UnknownOptions map[string][]string
+}
+
+func (u *ConnString) String() string {
+	return u.Original
+}
+
+// ConnectMode informs the driver on how to connect
+// to the server.
+type ConnectMode uint8
+
+// ConnectMode constants.
+const (
+	AutoConnect ConnectMode = iota
+	SingleConnect
+)
+
+type parser struct {
+	ConnString
+}
+
+func (p *parser) parse(original string) error {
+	p.Original = original
+	uri := original
+
+	var err error
+	var isSRV bool
+	if strings.HasPrefix(uri, "mongodb+srv://") {
+		isSRV = true
+		// remove the scheme
+		uri = uri[14:]
+	} else if strings.HasPrefix(uri, "mongodb://") {
+		// remove the scheme
+		uri = uri[10:]
+	} else {
+		return fmt.Errorf("scheme must be \"mongodb\" or \"mongodb+srv\"")
+	}
+
+	if idx := strings.Index(uri, "@"); idx != -1 {
+		userInfo := uri[:idx]
+		uri = uri[idx+1:]
+
+		username := userInfo
+		var password string
+
+		if idx := strings.Index(userInfo, ":"); idx != -1 {
+			username = userInfo[:idx]
+			password = userInfo[idx+1:]
+			p.PasswordSet = true
+		}
+
+		if len(username) > 1 {
+			if strings.Contains(username, "/") {
+				return fmt.Errorf("unescaped slash in username")
+			}
+		}
+
+		p.Username, err = url.QueryUnescape(username)
+		if err != nil {
+			return internal.WrapErrorf(err, "invalid username")
+		}
+		if len(password) > 1 {
+			if strings.Contains(password, ":") {
+				return fmt.Errorf("unescaped colon in password")
+			}
+			if strings.Contains(password, "/") {
+				return fmt.Errorf("unescaped slash in password")
+			}
+			p.Password, err = url.QueryUnescape(password)
+			if err != nil {
+				return internal.WrapErrorf(err, "invalid password")
+			}
+		}
+	}
+
+	// fetch the hosts field
+	hosts := uri
+	if idx := strings.IndexAny(uri, "/?@"); idx != -1 {
+		if uri[idx] == '@' {
+			return fmt.Errorf("unescaped @ sign in user info")
+		}
+		if uri[idx] == '?' {
+			return fmt.Errorf("must have a / before the query ?")
+		}
+		hosts = uri[:idx]
+	}
+
+	var connectionArgsFromTXT []string
+	parsedHosts := strings.Split(hosts, ",")
+
+	if isSRV {
+		parsedHosts = strings.Split(hosts, ",")
+		if len(parsedHosts) != 1 {
+			return fmt.Errorf("URI with SRV must include one and only one hostname")
+		}
+		parsedHosts, err = fetchSeedlistFromSRV(parsedHosts[0])
+		if err != nil {
+			return err
+		}
+
+		// error ignored because finding a TXT record should not be
+		// considered an error.
+		recordsFromTXT, _ := net.LookupTXT(hosts)
+
+		// This is a temporary fix to get around bug https://github.com/golang/go/issues/21472.
+		// It will currently incorrectly concatenate multiple TXT records to one
+		// on windows.
+		if runtime.GOOS == "windows" {
+			recordsFromTXT = []string{strings.Join(recordsFromTXT, "")}
+		}
+
+		if len(recordsFromTXT) > 1 {
+			return errors.New("multiple records from TXT not supported")
+		}
+		if len(recordsFromTXT) > 0 {
+			connectionArgsFromTXT = strings.FieldsFunc(recordsFromTXT[0], func(r rune) bool { return r == ';' || r == '&' })
+
+			err := validateTXTResult(connectionArgsFromTXT)
+			if err != nil {
+				return err
+			}
+
+		}
+
+		// SSL is enabled by default for SRV, but can be manually disabled with "ssl=false".
+		p.SSL = true
+		p.SSLSet = true
+	}
+
+	for _, host := range parsedHosts {
+		err = p.addHost(host)
+		if err != nil {
+			return internal.WrapErrorf(err, "invalid host \"%s\"", host)
+		}
+	}
+	if len(p.Hosts) == 0 {
+		return fmt.Errorf("must have at least 1 host")
+	}
+
+	uri = uri[len(hosts):]
+
+	extractedDatabase, err := extractDatabaseFromURI(uri)
+	if err != nil {
+		return err
+	}
+
+	uri = extractedDatabase.uri
+	p.Database = extractedDatabase.db
+
+	connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri)
+	connectionArgPairs := append(connectionArgsFromTXT, connectionArgsFromQueryString...)
+
+	for _, pair := range connectionArgPairs {
+		err = p.addOption(pair)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = p.setDefaultAuthParams(extractedDatabase.db)
+	if err != nil {
+		return err
+	}
+
+	err = p.validateAuth()
+	if err != nil {
+		return err
+	}
+
+	// Check for invalid write concern (i.e. w=0 and j=true)
+	if p.WNumberSet && p.WNumber == 0 && p.JSet && p.J {
+		return writeconcern.ErrInconsistent
+	}
+
+	// If WTimeout was set from manual options passed in, set WTImeoutSet to true.
+	if p.WTimeoutSetFromOption {
+		p.WTimeoutSet = true
+	}
+
+	return nil
+}
+
+func (p *parser) setDefaultAuthParams(dbName string) error {
+	switch strings.ToLower(p.AuthMechanism) {
+	case "plain":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "$external"
+			}
+		}
+	case "gssapi":
+		if p.AuthMechanismProperties == nil {
+			p.AuthMechanismProperties = map[string]string{
+				"SERVICE_NAME": "mongodb",
+			}
+		} else if v, ok := p.AuthMechanismProperties["SERVICE_NAME"]; !ok || v == "" {
+			p.AuthMechanismProperties["SERVICE_NAME"] = "mongodb"
+		}
+		fallthrough
+	case "mongodb-x509":
+		if p.AuthSource == "" {
+			p.AuthSource = "$external"
+		} else if p.AuthSource != "$external" {
+			return fmt.Errorf("auth source must be $external")
+		}
+	case "mongodb-cr":
+		fallthrough
+	case "scram-sha-1":
+		fallthrough
+	case "scram-sha-256":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "admin"
+			}
+		}
+	case "":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "admin"
+			}
+		}
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func (p *parser) validateAuth() error {
+	switch strings.ToLower(p.AuthMechanism) {
+	case "mongodb-cr":
+		if p.Username == "" {
+			return fmt.Errorf("username required for MONGO-CR")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for MONGO-CR")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-CR cannot have mechanism properties")
+		}
+	case "mongodb-x509":
+		if p.Password != "" {
+			return fmt.Errorf("password cannot be specified for MONGO-X509")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-X509 cannot have mechanism properties")
+		}
+	case "gssapi":
+		if p.Username == "" {
+			return fmt.Errorf("username required for GSSAPI")
+		}
+		for k := range p.AuthMechanismProperties {
+			if k != "SERVICE_NAME" && k != "CANONICALIZE_HOST_NAME" && k != "SERVICE_REALM" {
+				return fmt.Errorf("invalid auth property for GSSAPI")
+			}
+		}
+	case "plain":
+		if p.Username == "" {
+			return fmt.Errorf("username required for PLAIN")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for PLAIN")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("PLAIN cannot have mechanism properties")
+		}
+	case "scram-sha-1":
+		if p.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-1")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-1")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-1 cannot have mechanism properties")
+		}
+	case "scram-sha-256":
+		if p.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-256")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-256")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-256 cannot have mechanism properties")
+		}
+	case "":
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func fetchSeedlistFromSRV(host string) ([]string, error) {
+	var err error
+
+	_, _, err = net.SplitHostPort(host)
+
+	if err == nil {
+		// we were able to successfully extract a port from the host,
+		// but should not be able to when using SRV
+		return nil, fmt.Errorf("URI with srv must not include a port number")
+	}
+
+	_, addresses, err := net.LookupSRV("mongodb", "tcp", host)
+	if err != nil {
+		return nil, err
+	}
+	parsedHosts := make([]string, len(addresses))
+	for i, address := range addresses {
+		trimmedAddressTarget := strings.TrimSuffix(address.Target, ".")
+		err := validateSRVResult(trimmedAddressTarget, host)
+		if err != nil {
+			return nil, err
+		}
+		parsedHosts[i] = fmt.Sprintf("%s:%d", trimmedAddressTarget, address.Port)
+	}
+
+	return parsedHosts, nil
+}
+
+func (p *parser) addHost(host string) error {
+	if host == "" {
+		return nil
+	}
+	host, err := url.QueryUnescape(host)
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid host \"%s\"", host)
+	}
+
+	_, port, err := net.SplitHostPort(host)
+	// this is unfortunate that SplitHostPort actually requires
+	// a port to exist.
+	if err != nil {
+		if addrError, ok := err.(*net.AddrError); !ok || addrError.Err != "missing port in address" {
+			return err
+		}
+	}
+
+	if port != "" {
+		d, err := strconv.Atoi(port)
+		if err != nil {
+			return internal.WrapErrorf(err, "port must be an integer")
+		}
+		if d <= 0 || d >= 65536 {
+			return fmt.Errorf("port must be in the range [1, 65535]")
+		}
+	}
+	p.Hosts = append(p.Hosts, host)
+	return nil
+}
+
+func (p *parser) addOption(pair string) error {
+	kv := strings.SplitN(pair, "=", 2)
+	if len(kv) != 2 || kv[0] == "" {
+		return fmt.Errorf("invalid option")
+	}
+
+	key, err := url.QueryUnescape(kv[0])
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid option key \"%s\"", kv[0])
+	}
+
+	value, err := url.QueryUnescape(kv[1])
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid option value \"%s\"", kv[1])
+	}
+
+	lowerKey := strings.ToLower(key)
+	switch lowerKey {
+	case "appname":
+		p.AppName = value
+	case "authmechanism":
+		p.AuthMechanism = value
+	case "authmechanismproperties":
+		p.AuthMechanismProperties = make(map[string]string)
+		pairs := strings.Split(value, ",")
+		for _, pair := range pairs {
+			kv := strings.SplitN(pair, ":", 2)
+			if len(kv) != 2 || kv[0] == "" {
+				return fmt.Errorf("invalid authMechanism property")
+			}
+			p.AuthMechanismProperties[kv[0]] = kv[1]
+		}
+	case "authsource":
+		p.AuthSource = value
+	case "compressors":
+		compressors := strings.Split(value, ",")
+		if len(compressors) < 1 {
+			return fmt.Errorf("must have at least 1 compressor")
+		}
+		p.Compressors = compressors
+	case "connect":
+		switch strings.ToLower(value) {
+		case "automatic":
+		case "direct":
+			p.Connect = SingleConnect
+		default:
+			return fmt.Errorf("invalid 'connect' value: %s", value)
+		}
+
+		p.ConnectSet = true
+	case "connecttimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.ConnectTimeout = time.Duration(n) * time.Millisecond
+		p.ConnectTimeoutSet = true
+	case "heartbeatintervalms", "heartbeatfrequencyms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.HeartbeatInterval = time.Duration(n) * time.Millisecond
+		p.HeartbeatIntervalSet = true
+	case "journal":
+		switch value {
+		case "true":
+			p.J = true
+		case "false":
+			p.J = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.JSet = true
+	case "localthresholdms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.LocalThreshold = time.Duration(n) * time.Millisecond
+		p.LocalThresholdSet = true
+	case "maxidletimems":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxConnIdleTime = time.Duration(n) * time.Millisecond
+		p.MaxConnIdleTimeSet = true
+	case "maxpoolsize":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxPoolSize = uint16(n)
+		p.MaxPoolSizeSet = true
+	case "readconcernlevel":
+		p.ReadConcernLevel = value
+	case "readpreference":
+		p.ReadPreference = value
+	case "readpreferencetags":
+		tags := make(map[string]string)
+		items := strings.Split(value, ",")
+		for _, item := range items {
+			parts := strings.Split(item, ":")
+			if len(parts) != 2 {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+			tags[parts[0]] = parts[1]
+		}
+		p.ReadPreferenceTagSets = append(p.ReadPreferenceTagSets, tags)
+	case "maxstaleness":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxStaleness = time.Duration(n) * time.Second
+		p.MaxStalenessSet = true
+	case "replicaset":
+		p.ReplicaSet = value
+	case "retrywrites":
+		p.RetryWrites = value == "true"
+		p.RetryWritesSet = true
+	case "serverselectiontimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.ServerSelectionTimeout = time.Duration(n) * time.Millisecond
+		p.ServerSelectionTimeoutSet = true
+	case "sockettimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.SocketTimeout = time.Duration(n) * time.Millisecond
+		p.SocketTimeoutSet = true
+	case "ssl":
+		switch value {
+		case "true":
+			p.SSL = true
+		case "false":
+			p.SSL = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.SSLSet = true
+	case "sslclientcertificatekeyfile":
+		p.SSL = true
+		p.SSLSet = true
+		p.SSLClientCertificateKeyFile = value
+		p.SSLClientCertificateKeyFileSet = true
+	case "sslclientcertificatekeypassword":
+		p.SSLClientCertificateKeyPassword = func() string { return value }
+		p.SSLClientCertificateKeyPasswordSet = true
+	case "sslinsecure":
+		switch value {
+		case "true":
+			p.SSLInsecure = true
+		case "false":
+			p.SSLInsecure = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.SSLInsecureSet = true
+	case "sslcertificateauthorityfile":
+		p.SSL = true
+		p.SSLSet = true
+		p.SSLCaFile = value
+		p.SSLCaFileSet = true
+	case "w":
+		if w, err := strconv.Atoi(value); err == nil {
+			if w < 0 {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+
+			p.WNumber = w
+			p.WNumberSet = true
+			p.WString = ""
+			break
+		}
+
+		p.WString = value
+		p.WNumberSet = false
+
+	case "wtimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.WTimeout = time.Duration(n) * time.Millisecond
+		p.WTimeoutSet = true
+	case "wtimeout":
+		// Defer to wtimeoutms, but not to a manually-set option.
+		if p.WTimeoutSet {
+			break
+		}
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.WTimeout = time.Duration(n) * time.Millisecond
+	case "zlibcompressionlevel":
+		level, err := strconv.Atoi(value)
+		if err != nil || (level < -1 || level > 9) {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		if level == -1 {
+			level = wiremessage.DefaultZlibLevel
+		}
+		p.ZlibLevel = level
+	default:
+		if p.UnknownOptions == nil {
+			p.UnknownOptions = make(map[string][]string)
+		}
+		p.UnknownOptions[lowerKey] = append(p.UnknownOptions[lowerKey], value)
+	}
+
+	if p.Options == nil {
+		p.Options = make(map[string][]string)
+	}
+	p.Options[lowerKey] = append(p.Options[lowerKey], value)
+
+	return nil
+}
+
+func validateSRVResult(recordFromSRV, inputHostName string) error {
+	separatedInputDomain := strings.Split(inputHostName, ".")
+	separatedRecord := strings.Split(recordFromSRV, ".")
+	if len(separatedRecord) < 2 {
+		return errors.New("DNS name must contain at least 2 labels")
+	}
+	if len(separatedRecord) < len(separatedInputDomain) {
+		return errors.New("Domain suffix from SRV record not matched input domain")
+	}
+
+	inputDomainSuffix := separatedInputDomain[1:]
+	domainSuffixOffset := len(separatedRecord) - (len(separatedInputDomain) - 1)
+
+	recordDomainSuffix := separatedRecord[domainSuffixOffset:]
+	for ix, label := range inputDomainSuffix {
+		if label != recordDomainSuffix[ix] {
+			return errors.New("Domain suffix from SRV record not matched input domain")
+		}
+	}
+	return nil
+}
+
+var allowedTXTOptions = map[string]struct{}{
+	"authsource": {},
+	"replicaset": {},
+}
+
+func validateTXTResult(paramsFromTXT []string) error {
+	for _, param := range paramsFromTXT {
+		kv := strings.SplitN(param, "=", 2)
+		if len(kv) != 2 {
+			return errors.New("Invalid TXT record")
+		}
+		key := strings.ToLower(kv[0])
+		if _, ok := allowedTXTOptions[key]; !ok {
+			return fmt.Errorf("Cannot specify option '%s' in TXT record", kv[0])
+		}
+	}
+	return nil
+}
+
+func extractQueryArgsFromURI(uri string) ([]string, error) {
+	if len(uri) == 0 {
+		return nil, nil
+	}
+
+	if uri[0] != '?' {
+		return nil, errors.New("must have a ? separator between path and query")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return nil, nil
+	}
+	return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil
+
+}
+
+type extractedDatabase struct {
+	uri string
+	db  string
+}
+
+// extractDatabaseFromURI is a helper function to retrieve information about
+// the database from the passed in URI. It accepts as an argument the currently
+// parsed URI and returns the remainder of the uri, the database it found,
+// and any error it encounters while parsing.
+func extractDatabaseFromURI(uri string) (extractedDatabase, error) {
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	if uri[0] != '/' {
+		return extractedDatabase{}, errors.New("must have a / separator between hosts and path")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	database := uri
+	if idx := strings.IndexRune(uri, '?'); idx != -1 {
+		database = uri[:idx]
+	}
+
+	escapedDatabase, err := url.QueryUnescape(database)
+	if err != nil {
+		return extractedDatabase{}, internal.WrapErrorf(err, "invalid database \"%s\"", database)
+	}
+
+	uri = uri[len(database):]
+
+	return extractedDatabase{
+		uri: uri,
+		db:  escapedDatabase,
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go
new file mode 100644
index 0000000..758b112
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// Unknown is an unknown server or topology kind.
+const Unknown = 0
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go
new file mode 100644
index 0000000..f0236c0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+)
+
+// MaxStalenessSupported returns an error if the given server version
+// does not support max staleness.
+func MaxStalenessSupported(wireVersion *VersionRange) error {
+	if wireVersion != nil && wireVersion.Max < 5 {
+		return fmt.Errorf("max staleness is only supported for servers 3.4 or newer")
+	}
+
+	return nil
+}
+
+// ScramSHA1Supported returns an error if the given server version
+// does not support scram-sha-1.
+func ScramSHA1Supported(wireVersion *VersionRange) error {
+	if wireVersion != nil && wireVersion.Max < 3 {
+		return fmt.Errorf("SCRAM-SHA-1 is only supported for servers 3.0 or newer")
+	}
+
+	return nil
+}
+
+// SessionsSupported returns true of the given server version indicates that it supports sessions.
+func SessionsSupported(wireVersion *VersionRange) bool {
+	return wireVersion != nil && wireVersion.Max >= 6
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go
new file mode 100644
index 0000000..d6857ce
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go
@@ -0,0 +1,144 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/tag"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// UnsetRTT is the unset value for a round trip time.
+const UnsetRTT = -1 * time.Millisecond
+
+// SelectedServer represents a selected server that is a member of a topology.
+type SelectedServer struct {
+	Server
+	Kind TopologyKind
+}
+
+// Server represents a description of a server. This is created from an isMaster
+// command.
+type Server struct {
+	Addr address.Address
+
+	AverageRTT            time.Duration
+	AverageRTTSet         bool
+	Compression           []string // compression methods returned by server
+	CanonicalAddr         address.Address
+	ElectionID            primitive.ObjectID
+	HeartbeatInterval     time.Duration
+	LastError             error
+	LastUpdateTime        time.Time
+	LastWriteTime         time.Time
+	MaxBatchCount         uint32
+	MaxDocumentSize       uint32
+	MaxMessageSize        uint32
+	Members               []address.Address
+	ReadOnly              bool
+	SessionTimeoutMinutes uint32
+	SetName               string
+	SetVersion            uint32
+	Tags                  tag.Set
+	Kind                  ServerKind
+	WireVersion           *VersionRange
+
+	SaslSupportedMechs []string // user-specific from server handshake
+}
+
+// NewServer creates a new server description from the given parameters.
+func NewServer(addr address.Address, isMaster result.IsMaster) Server {
+	i := Server{
+		Addr: addr,
+
+		CanonicalAddr:         address.Address(isMaster.Me).Canonicalize(),
+		Compression:           isMaster.Compression,
+		ElectionID:            isMaster.ElectionID,
+		LastUpdateTime:        time.Now().UTC(),
+		LastWriteTime:         isMaster.LastWriteTimestamp,
+		MaxBatchCount:         isMaster.MaxWriteBatchSize,
+		MaxDocumentSize:       isMaster.MaxBSONObjectSize,
+		MaxMessageSize:        isMaster.MaxMessageSizeBytes,
+		SaslSupportedMechs:    isMaster.SaslSupportedMechs,
+		SessionTimeoutMinutes: isMaster.LogicalSessionTimeoutMinutes,
+		SetName:               isMaster.SetName,
+		SetVersion:            isMaster.SetVersion,
+		Tags:                  tag.NewTagSetFromMap(isMaster.Tags),
+	}
+
+	if i.CanonicalAddr == "" {
+		i.CanonicalAddr = addr
+	}
+
+	if isMaster.OK != 1 {
+		i.LastError = fmt.Errorf("not ok")
+		return i
+	}
+
+	for _, host := range isMaster.Hosts {
+		i.Members = append(i.Members, address.Address(host).Canonicalize())
+	}
+
+	for _, passive := range isMaster.Passives {
+		i.Members = append(i.Members, address.Address(passive).Canonicalize())
+	}
+
+	for _, arbiter := range isMaster.Arbiters {
+		i.Members = append(i.Members, address.Address(arbiter).Canonicalize())
+	}
+
+	i.Kind = Standalone
+
+	if isMaster.IsReplicaSet {
+		i.Kind = RSGhost
+	} else if isMaster.SetName != "" {
+		if isMaster.IsMaster {
+			i.Kind = RSPrimary
+		} else if isMaster.Hidden {
+			i.Kind = RSMember
+		} else if isMaster.Secondary {
+			i.Kind = RSSecondary
+		} else if isMaster.ArbiterOnly {
+			i.Kind = RSArbiter
+		} else {
+			i.Kind = RSMember
+		}
+	} else if isMaster.Msg == "isdbgrid" {
+		i.Kind = Mongos
+	}
+
+	i.WireVersion = &VersionRange{
+		Min: isMaster.MinWireVersion,
+		Max: isMaster.MaxWireVersion,
+	}
+
+	return i
+}
+
+// SetAverageRTT sets the average round trip time for this server description.
+func (s Server) SetAverageRTT(rtt time.Duration) Server {
+	s.AverageRTT = rtt
+	if rtt == UnsetRTT {
+		s.AverageRTTSet = false
+	} else {
+		s.AverageRTTSet = true
+	}
+
+	return s
+}
+
+// DataBearing returns true if the server is a data bearing server.
+func (s Server) DataBearing() bool {
+	return s.Kind == RSPrimary ||
+		s.Kind == RSSecondary ||
+		s.Kind == Mongos ||
+		s.Kind == Standalone
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go
new file mode 100644
index 0000000..657791b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go
@@ -0,0 +1,43 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// ServerKind represents the type of a server.
+type ServerKind uint32
+
+// These constants are the possible types of servers.
+const (
+	Standalone  ServerKind = 1
+	RSMember    ServerKind = 2
+	RSPrimary   ServerKind = 4 + RSMember
+	RSSecondary ServerKind = 8 + RSMember
+	RSArbiter   ServerKind = 16 + RSMember
+	RSGhost     ServerKind = 32 + RSMember
+	Mongos      ServerKind = 256
+)
+
+// String implements the fmt.Stringer interface.
+func (kind ServerKind) String() string {
+	switch kind {
+	case Standalone:
+		return "Standalone"
+	case RSMember:
+		return "RSOther"
+	case RSPrimary:
+		return "RSPrimary"
+	case RSSecondary:
+		return "RSSecondary"
+	case RSArbiter:
+		return "RSArbiter"
+	case RSGhost:
+		return "RSGhost"
+	case Mongos:
+		return "Mongos"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go
new file mode 100644
index 0000000..9c31b6e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go
@@ -0,0 +1,279 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+// ServerSelector is an interface implemented by types that can select a server given a
+// topology description.
+type ServerSelector interface {
+	SelectServer(Topology, []Server) ([]Server, error)
+}
+
+// ServerSelectorFunc is a function that can be used as a ServerSelector.
+type ServerSelectorFunc func(Topology, []Server) ([]Server, error)
+
+// SelectServer implements the ServerSelector interface.
+func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, error) {
+	return ssf(t, s)
+}
+
+type compositeSelector struct {
+	selectors []ServerSelector
+}
+
+// CompositeSelector combines multiple selectors into a single selector.
+func CompositeSelector(selectors []ServerSelector) ServerSelector {
+	return &compositeSelector{selectors: selectors}
+}
+
+func (cs *compositeSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	var err error
+	for _, sel := range cs.selectors {
+		candidates, err = sel.SelectServer(t, candidates)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return candidates, nil
+}
+
+type latencySelector struct {
+	latency time.Duration
+}
+
+// LatencySelector creates a ServerSelector which selects servers based on their latency.
+func LatencySelector(latency time.Duration) ServerSelector {
+	return &latencySelector{latency: latency}
+}
+
+func (ls *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	if ls.latency < 0 {
+		return candidates, nil
+	}
+
+	switch len(candidates) {
+	case 0, 1:
+		return candidates, nil
+	default:
+		min := time.Duration(math.MaxInt64)
+		for _, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT < min {
+					min = candidate.AverageRTT
+				}
+			}
+		}
+
+		if min == math.MaxInt64 {
+			return candidates, nil
+		}
+
+		max := min + ls.latency
+
+		var result []Server
+		for _, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT <= max {
+					result = append(result, candidate)
+				}
+			}
+		}
+
+		return result, nil
+	}
+}
+
+// WriteSelector selects all the writable servers.
+func WriteSelector() ServerSelector {
+	return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) {
+		switch t.Kind {
+		case Single:
+			return candidates, nil
+		default:
+			result := []Server{}
+			for _, candidate := range candidates {
+				switch candidate.Kind {
+				case Mongos, RSPrimary, Standalone:
+					result = append(result, candidate)
+				}
+			}
+			return result, nil
+		}
+	})
+}
+
+// ReadPrefSelector selects servers based on the provided read preference.
+func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector {
+	return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) {
+		if _, set := rp.MaxStaleness(); set {
+			for _, s := range candidates {
+				if s.Kind != Unknown {
+					if err := MaxStalenessSupported(s.WireVersion); err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+
+		switch t.Kind {
+		case Single:
+			return candidates, nil
+		case ReplicaSetNoPrimary, ReplicaSetWithPrimary:
+			return selectForReplicaSet(rp, t, candidates)
+		case Sharded:
+			return selectByKind(candidates, Mongos), nil
+		}
+
+		return nil, nil
+	})
+}
+
+func selectForReplicaSet(rp *readpref.ReadPref, t Topology, candidates []Server) ([]Server, error) {
+	if err := verifyMaxStaleness(rp, t); err != nil {
+		return nil, err
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.PrimaryPreferredMode:
+		selected := selectByKind(candidates, RSPrimary)
+
+		if len(selected) == 0 {
+			selected = selectSecondaries(rp, candidates)
+			return selectByTagSet(selected, rp.TagSets()), nil
+		}
+
+		return selected, nil
+	case readpref.SecondaryPreferredMode:
+		selected := selectSecondaries(rp, candidates)
+		selected = selectByTagSet(selected, rp.TagSets())
+		if len(selected) > 0 {
+			return selected, nil
+		}
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.SecondaryMode:
+		selected := selectSecondaries(rp, candidates)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	case readpref.NearestMode:
+		selected := selectByKind(candidates, RSPrimary)
+		selected = append(selected, selectSecondaries(rp, candidates)...)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	}
+
+	return nil, fmt.Errorf("unsupported mode: %d", rp.Mode())
+}
+
+func selectSecondaries(rp *readpref.ReadPref, candidates []Server) []Server {
+	secondaries := selectByKind(candidates, RSSecondary)
+	if len(secondaries) == 0 {
+		return secondaries
+	}
+	if maxStaleness, set := rp.MaxStaleness(); set {
+		primaries := selectByKind(candidates, RSPrimary)
+		if len(primaries) == 0 {
+			baseTime := secondaries[0].LastWriteTime
+			for i := 1; i < len(secondaries); i++ {
+				if secondaries[i].LastWriteTime.After(baseTime) {
+					baseTime = secondaries[i].LastWriteTime
+				}
+			}
+
+			var selected []Server
+			for _, secondary := range secondaries {
+				estimatedStaleness := baseTime.Sub(secondary.LastWriteTime) + secondary.HeartbeatInterval
+				if estimatedStaleness <= maxStaleness {
+					selected = append(selected, secondary)
+				}
+			}
+
+			return selected
+		}
+
+		primary := primaries[0]
+
+		var selected []Server
+		for _, secondary := range secondaries {
+			estimatedStaleness := secondary.LastUpdateTime.Sub(secondary.LastWriteTime) - primary.LastUpdateTime.Sub(primary.LastWriteTime) + secondary.HeartbeatInterval
+			if estimatedStaleness <= maxStaleness {
+				selected = append(selected, secondary)
+			}
+		}
+		return selected
+	}
+
+	return secondaries
+}
+
+func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server {
+	if len(tagSets) == 0 {
+		return candidates
+	}
+
+	for _, ts := range tagSets {
+		var results []Server
+		for _, s := range candidates {
+			if len(s.Tags) > 0 && s.Tags.ContainsAll(ts) {
+				results = append(results, s)
+			}
+		}
+
+		if len(results) > 0 {
+			return results
+		}
+	}
+
+	return []Server{}
+}
+
+func selectByKind(candidates []Server, kind ServerKind) []Server {
+	var result []Server
+	for _, s := range candidates {
+		if s.Kind == kind {
+			result = append(result, s)
+		}
+	}
+
+	return result
+}
+
+func verifyMaxStaleness(rp *readpref.ReadPref, t Topology) error {
+	maxStaleness, set := rp.MaxStaleness()
+	if !set {
+		return nil
+	}
+
+	if maxStaleness < 90*time.Second {
+		return fmt.Errorf("max staleness (%s) must be greater than or equal to 90s", maxStaleness)
+	}
+
+	if len(t.Servers) < 1 {
+		// Maybe we should return an error here instead?
+		return nil
+	}
+
+	// we'll assume all candidates have the same heartbeat interval.
+	s := t.Servers[0]
+	idleWritePeriod := 10 * time.Second
+
+	if maxStaleness < s.HeartbeatInterval+idleWritePeriod {
+		return fmt.Errorf(
+			"max staleness (%s) must be greater than or equal to the heartbeat interval (%s) plus idle write period (%s)",
+			maxStaleness, s.HeartbeatInterval, idleWritePeriod,
+		)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go
new file mode 100644
index 0000000..caf447d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+)
+
+// Topology represents a description of a mongodb topology
+type Topology struct {
+	Servers               []Server
+	Kind                  TopologyKind
+	SessionTimeoutMinutes uint32
+}
+
+// Server returns the server for the given address. Returns false if the server
+// could not be found.
+func (t Topology) Server(addr address.Address) (Server, bool) {
+	for _, server := range t.Servers {
+		if server.Addr.String() == addr.String() {
+			return server, true
+		}
+	}
+	return Server{}, false
+}
+
+// TopologyDiff is the difference between two different topology descriptions.
+type TopologyDiff struct {
+	Added   []Server
+	Removed []Server
+}
+
+// DiffTopology compares the two topology descriptions and returns the difference.
+func DiffTopology(old, new Topology) TopologyDiff {
+	var diff TopologyDiff
+
+	// TODO: do this without sorting...
+	oldServers := serverSorter(old.Servers)
+	newServers := serverSorter(new.Servers)
+
+	sort.Sort(oldServers)
+	sort.Sort(newServers)
+
+	i := 0
+	j := 0
+	for {
+		if i < len(oldServers) && j < len(newServers) {
+			comp := strings.Compare(oldServers[i].Addr.String(), newServers[j].Addr.String())
+			switch comp {
+			case 1:
+				//left is bigger than
+				diff.Added = append(diff.Added, newServers[j])
+				j++
+			case -1:
+				// right is bigger
+				diff.Removed = append(diff.Removed, oldServers[i])
+				i++
+			case 0:
+				i++
+				j++
+			}
+		} else if i < len(oldServers) {
+			diff.Removed = append(diff.Removed, oldServers[i])
+			i++
+		} else if j < len(newServers) {
+			diff.Added = append(diff.Added, newServers[j])
+			j++
+		} else {
+			break
+		}
+	}
+
+	return diff
+}
+
+type serverSorter []Server
+
+func (ss serverSorter) Len() int      { return len(ss) }
+func (ss serverSorter) Swap(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
+func (ss serverSorter) Less(i, j int) bool {
+	return strings.Compare(ss[i].Addr.String(), ss[j].Addr.String()) < 0
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go
new file mode 100644
index 0000000..69f8177
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// TopologyKind represents a specific topology configuration.
+type TopologyKind uint32
+
+// These constants are the available topology configurations.
+const (
+	Single                TopologyKind = 1
+	ReplicaSet            TopologyKind = 2
+	ReplicaSetNoPrimary   TopologyKind = 4 + ReplicaSet
+	ReplicaSetWithPrimary TopologyKind = 8 + ReplicaSet
+	Sharded               TopologyKind = 256
+)
+
+// String implements the fmt.Stringer interface.
+func (kind TopologyKind) String() string {
+	switch kind {
+	case Single:
+		return "Single"
+	case ReplicaSet:
+		return "ReplicaSet"
+	case ReplicaSetNoPrimary:
+		return "ReplicaSetNoPrimary"
+	case ReplicaSetWithPrimary:
+		return "ReplicaSetWithPrimary"
+	case Sharded:
+		return "Sharded"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go
new file mode 100644
index 0000000..60cda4e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go
@@ -0,0 +1,44 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import "strconv"
+
+// Version represents a software version.
+type Version struct {
+	Desc  string
+	Parts []uint8
+}
+
+// AtLeast ensures that the version is at least as large as the "other" version.
+func (v Version) AtLeast(other ...uint8) bool {
+	for i := range other {
+		if i == len(v.Parts) {
+			return false
+		}
+		if v.Parts[i] < other[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// String provides the string represtation of the Version.
+func (v Version) String() string {
+	if v.Desc == "" {
+		var s string
+		for i, p := range v.Parts {
+			if i != 0 {
+				s += "."
+			}
+			s += strconv.Itoa(int(p))
+		}
+		return s
+	}
+
+	return v.Desc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go
new file mode 100644
index 0000000..984dff8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go
@@ -0,0 +1,31 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import "fmt"
+
+// VersionRange represents a range of versions.
+type VersionRange struct {
+	Min int32
+	Max int32
+}
+
+// NewVersionRange creates a new VersionRange given a min and a max.
+func NewVersionRange(min, max int32) VersionRange {
+	return VersionRange{Min: min, Max: max}
+}
+
+// Includes returns a bool indicating whether the supplied integer is included
+// in the range.
+func (vr VersionRange) Includes(v int32) bool {
+	return v >= vr.Min && v <= vr.Max
+}
+
+// String implements the fmt.Stringer interface.
+func (vr VersionRange) String() string {
+	return fmt.Sprintf("[%d, %d]", vr.Min, vr.Max)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go
new file mode 100644
index 0000000..d317bb7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go
@@ -0,0 +1,173 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package result contains the results from various operations.
+package result
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Upsert contains the information for a single upsert.
+type Upsert struct {
+	Index int64       `bson:"index"`
+	ID    interface{} `bson:"_id"`
+}
+
+// Insert is a result from an Insert command.
+type Insert struct {
+	N                 int
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// StartSession is a result from a StartSession command.
+type StartSession struct {
+	ID bsonx.Doc `bson:"id"`
+}
+
+// EndSessions is a result from an EndSessions command.
+type EndSessions struct{}
+
+// Delete is a result from a Delete command.
+type Delete struct {
+	N                 int
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// Update is a result of an Update command.
+type Update struct {
+	MatchedCount      int64              `bson:"n"`
+	ModifiedCount     int64              `bson:"nModified"`
+	Upserted          []Upsert           `bson:"upserted"`
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// Distinct is a result from a Distinct command.
+type Distinct struct {
+	Values []interface{}
+}
+
+// FindAndModify is a result from a findAndModify command.
+type FindAndModify struct {
+	Value           bson.Raw
+	LastErrorObject struct {
+		UpdatedExisting bool
+		Upserted        interface{}
+	}
+}
+
+// WriteError is an error from a write operation that is not a write concern
+// error.
+type WriteError struct {
+	Index  int
+	Code   int
+	ErrMsg string
+}
+
+// WriteConcernError is an error related to a write concern.
+type WriteConcernError struct {
+	Code    int
+	ErrMsg  string
+	ErrInfo bson.Raw
+}
+
+// ListDatabases is the result from a listDatabases command.
+type ListDatabases struct {
+	Databases []struct {
+		Name       string
+		SizeOnDisk int64 `bson:"sizeOnDisk"`
+		Empty      bool
+	}
+	TotalSize int64 `bson:"totalSize"`
+}
+
+// IsMaster is a result of an IsMaster command.
+type IsMaster struct {
+	Arbiters                     []string           `bson:"arbiters,omitempty"`
+	ArbiterOnly                  bool               `bson:"arbiterOnly,omitempty"`
+	ClusterTime                  bson.Raw           `bson:"$clusterTime,omitempty"`
+	Compression                  []string           `bson:"compression,omitempty"`
+	ElectionID                   primitive.ObjectID `bson:"electionId,omitempty"`
+	Hidden                       bool               `bson:"hidden,omitempty"`
+	Hosts                        []string           `bson:"hosts,omitempty"`
+	IsMaster                     bool               `bson:"ismaster,omitempty"`
+	IsReplicaSet                 bool               `bson:"isreplicaset,omitempty"`
+	LastWriteTimestamp           time.Time          `bson:"lastWriteDate,omitempty"`
+	LogicalSessionTimeoutMinutes uint32             `bson:"logicalSessionTimeoutMinutes,omitempty"`
+	MaxBSONObjectSize            uint32             `bson:"maxBsonObjectSize,omitempty"`
+	MaxMessageSizeBytes          uint32             `bson:"maxMessageSizeBytes,omitempty"`
+	MaxWriteBatchSize            uint32             `bson:"maxWriteBatchSize,omitempty"`
+	Me                           string             `bson:"me,omitempty"`
+	MaxWireVersion               int32              `bson:"maxWireVersion,omitempty"`
+	MinWireVersion               int32              `bson:"minWireVersion,omitempty"`
+	Msg                          string             `bson:"msg,omitempty"`
+	OK                           int32              `bson:"ok"`
+	Passives                     []string           `bson:"passives,omitempty"`
+	ReadOnly                     bool               `bson:"readOnly,omitempty"`
+	SaslSupportedMechs           []string           `bson:"saslSupportedMechs,omitempty"`
+	Secondary                    bool               `bson:"secondary,omitempty"`
+	SetName                      string             `bson:"setName,omitempty"`
+	SetVersion                   uint32             `bson:"setVersion,omitempty"`
+	Tags                         map[string]string  `bson:"tags,omitempty"`
+}
+
+// BuildInfo is a result of a BuildInfo command.
+type BuildInfo struct {
+	OK           bool    `bson:"ok"`
+	GitVersion   string  `bson:"gitVersion,omitempty"`
+	Version      string  `bson:"version,omitempty"`
+	VersionArray []uint8 `bson:"versionArray,omitempty"`
+}
+
+// IsZero returns true if the BuildInfo is the zero value.
+func (bi BuildInfo) IsZero() bool {
+	if !bi.OK && bi.GitVersion == "" && bi.Version == "" && bi.VersionArray == nil {
+		return true
+	}
+
+	return false
+}
+
+// GetLastError is a result of a GetLastError command.
+type GetLastError struct {
+	ConnectionID uint32 `bson:"connectionId"`
+}
+
+// KillCursors is a result of a KillCursors command.
+type KillCursors struct {
+	CursorsKilled   []int64 `bson:"cursorsKilled"`
+	CursorsNotFound []int64 `bson:"cursorsNotFound"`
+	CursorsAlive    []int64 `bson:"cursorsAlive"`
+}
+
+// CreateIndexes is a result of a CreateIndexes command.
+type CreateIndexes struct {
+	CreatedCollectionAutomatically bool `bson:"createdCollectionAutomatically"`
+	IndexesBefore                  int  `bson:"numIndexesBefore"`
+	IndexesAfter                   int  `bson:"numIndexesAfter"`
+}
+
+// TransactionResult holds the result of committing or aborting a transaction.
+type TransactionResult struct {
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// BulkWrite holds the result of a bulk write operation.
+type BulkWrite struct {
+	InsertedCount int64
+	MatchedCount  int64
+	ModifiedCount int64
+	DeletedCount  int64
+	UpsertedCount int64
+	UpsertedIDs   map[int64]interface{}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go
new file mode 100644
index 0000000..3d9f784
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go
@@ -0,0 +1,20 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+func appendInt32(b []byte, i int32) []byte {
+	return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
+}
+
+func appendCString(b []byte, str string) []byte {
+	b = append(b, str...)
+	return append(b, 0x00)
+}
+
+func appendInt64(b []byte, i int64) []byte {
+	return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go
new file mode 100644
index 0000000..8c339a5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Command represents the OP_COMMAND message of the MongoDB wire protocol.
+type Command struct {
+	MsgHeader   Header
+	Database    string
+	CommandName string
+	Metadata    string
+	CommandArgs string
+	InputDocs   []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (c Command) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (c Command) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (c Command) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (c Command) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (c Command) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (c *Command) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go
new file mode 100644
index 0000000..d5773d6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// CommandReply represents the OP_COMMANDREPLY message of the MongoDB wire protocol.
+type CommandReply struct {
+	MsgHeader    Header
+	Metadata     bson.Raw
+	CommandReply bson.Raw
+	OutputDocs   []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (cr CommandReply) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (cr CommandReply) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (cr CommandReply) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (cr CommandReply) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (cr CommandReply) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (cr *CommandReply) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go
new file mode 100644
index 0000000..f8f3884
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go
@@ -0,0 +1,110 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Compressed represents the OP_COMPRESSED message of the MongoDB wire protocol.
+type Compressed struct {
+	MsgHeader         Header
+	OriginalOpCode    OpCode
+	UncompressedSize  int32
+	CompressorID      CompressorID
+	CompressedMessage []byte
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (c Compressed) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, c.Len())
+	return c.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (c Compressed) ValidateWireMessage() error {
+	if int(c.MsgHeader.MessageLength) != c.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+
+	if c.MsgHeader.OpCode != OpCompressed {
+		return errors.New("incorrect header: opcode is not OpCompressed")
+	}
+
+	if c.OriginalOpCode != c.MsgHeader.OpCode {
+		return errors.New("incorrect header: original opcode does not match opcode in message header")
+	}
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of MsgHeader if it is 0. It will also set the OpCode to
+// OpCompressed if the OpCode is 0. If either of these properties are non-zero and not correct, this method will return
+// both the []byte with the wire message appended to it and an invalid header error.
+func (c Compressed) AppendWireMessage(b []byte) ([]byte, error) {
+	err := c.MsgHeader.SetDefaults(c.Len(), OpCompressed)
+
+	b = c.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(c.OriginalOpCode))
+	b = appendInt32(b, c.UncompressedSize)
+	b = append(b, byte(c.CompressorID))
+	b = append(b, c.CompressedMessage...)
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (c Compressed) String() string {
+	return fmt.Sprintf(
+		`OP_COMPRESSED{MsgHeader: %s, Uncompressed Size: %d, CompressorId: %d, Compressed message: %s}`,
+		c.MsgHeader, c.UncompressedSize, c.CompressorID, c.CompressedMessage,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (c Compressed) Len() int {
+	// Header + OpCode + UncompressedSize + CompressorId + CompressedMessage
+	return 16 + 4 + 4 + 1 + len(c.CompressedMessage)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (c *Compressed) UnmarshalWireMessage(b []byte) error {
+	var err error
+	c.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+
+	if len(b) < int(c.MsgHeader.MessageLength) {
+		return Error{Type: ErrOpCompressed, Message: "[]byte too small"}
+	}
+
+	c.OriginalOpCode = OpCode(readInt32(b, 16)) // skip first 16 for header
+	c.UncompressedSize = readInt32(b, 20)
+	c.CompressorID = CompressorID(b[24])
+
+	// messageLength - Header - OpCode - UncompressedSize - CompressorId
+	msgLen := c.MsgHeader.MessageLength - 16 - 4 - 4 - 1
+	c.CompressedMessage = b[25 : 25+msgLen]
+
+	return nil
+}
+
+// CompressorID is the ID for each type of Compressor.
+type CompressorID uint8
+
+// These constants represent the individual compressor IDs for an OP_COMPRESSED.
+const (
+	CompressorNoOp CompressorID = iota
+	CompressorSnappy
+	CompressorZLib
+)
+
+// DefaultZlibLevel is the default level for zlib compression
+const DefaultZlibLevel = 6
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go
new file mode 100644
index 0000000..0a502da
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Delete represents the OP_DELETE message of the MongoDB wire protocol.
+type Delete struct {
+	MsgHeader          Header
+	FullCollectionName string
+	Flags              DeleteFlag
+	Selector           bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (d Delete) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (d Delete) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (d Delete) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (d Delete) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (d Delete) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (d *Delete) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// DeleteFlag represents the flags on an OP_DELETE message.
+type DeleteFlag int32
+
+// These constants represent the individual flags on an OP_DELETE message.
+const (
+	SingleRemove DeleteFlag = 1 << iota
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go
new file mode 100644
index 0000000..f197113
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"strings"
+)
+
+// GetMore represents the OP_GET_MORE message of the MongoDB wire protocol.
+type GetMore struct {
+	MsgHeader          Header
+	Zero               int32
+	FullCollectionName string
+	NumberToReturn     int32
+	CursorID           int64
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (gm GetMore) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, gm.Len())
+	return gm.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (gm GetMore) ValidateWireMessage() error {
+	if int(gm.MsgHeader.MessageLength) != gm.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if gm.MsgHeader.OpCode != OpGetMore {
+		return errors.New("incorrect header: op code is not OpGetMore")
+	}
+	if strings.Index(gm.FullCollectionName, ".") == -1 {
+		return errors.New("incorrect header: collection name does not contain a dot")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpGetMore if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (gm GetMore) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = gm.MsgHeader.SetDefaults(gm.Len(), OpGetMore)
+
+	b = gm.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, gm.Zero)
+	b = appendCString(b, gm.FullCollectionName)
+	b = appendInt32(b, gm.NumberToReturn)
+	b = appendInt64(b, gm.CursorID)
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (gm GetMore) String() string {
+	return fmt.Sprintf(
+		`OP_GET_MORE{MsgHeader: %s, Zero: %d, FullCollectionName: %s, NumberToReturn: %d, CursorID: %d}`,
+		gm.MsgHeader, gm.Zero, gm.FullCollectionName, gm.NumberToReturn, gm.CursorID,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (gm GetMore) Len() int {
+	// Header + Zero + CollectionName + Null Terminator + Return + CursorID
+	return 16 + 4 + len(gm.FullCollectionName) + 1 + 4 + 8
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (gm *GetMore) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (gm GetMore) CommandDocument() bsonx.Doc {
+	parts := strings.Split(gm.FullCollectionName, ".")
+	collName := parts[len(parts)-1]
+
+	doc := bsonx.Doc{
+		{"getMore", bsonx.Int64(gm.CursorID)},
+		{"collection", bsonx.String(collName)},
+	}
+	if gm.NumberToReturn != 0 {
+		doc = doc.Append("batchSize", bsonx.Int32(gm.NumberToReturn))
+	}
+
+	return doc
+}
+
+// DatabaseName returns the name of the database for this command.
+func (gm GetMore) DatabaseName() string {
+	return strings.Split(gm.FullCollectionName, ".")[0]
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go
new file mode 100644
index 0000000..ad1c291
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"fmt"
+)
+
+// ErrInvalidHeader is returned when methods are called on a malformed Header.
+var ErrInvalidHeader error = Error{Type: ErrHeader, Message: "invalid header"}
+
+// ErrHeaderTooSmall is returned when the size of the header is too small to be valid.
+var ErrHeaderTooSmall error = Error{Type: ErrHeader, Message: "the header is too small to be valid"}
+
+// ErrHeaderTooFewBytes is returned when a call to ReadHeader does not contain enough
+// bytes to be a valid header.
+var ErrHeaderTooFewBytes error = Error{Type: ErrHeader, Message: "invalid header because []byte too small"}
+
+// ErrHeaderInvalidLength is returned when the MessageLength of a header is
+// set but is not set to the correct size.
+var ErrHeaderInvalidLength error = Error{Type: ErrHeader, Message: "invalid header because MessageLength is imporperly set"}
+
+// ErrHeaderIncorrectOpCode is returned when the OpCode on a header is set but
+// is not set to the correct OpCode.
+var ErrHeaderIncorrectOpCode error = Error{Type: ErrHeader, Message: "invalid header because OpCode is improperly set"}
+
+// Header represents the header of a MongoDB wire protocol message.
+type Header struct {
+	MessageLength int32
+	RequestID     int32
+	ResponseTo    int32
+	OpCode        OpCode
+}
+
+// ReadHeader reads a header from the given slice of bytes starting at offset
+// pos.
+func ReadHeader(b []byte, pos int32) (Header, error) {
+	if len(b) < 16 {
+		return Header{}, ErrHeaderTooFewBytes
+	}
+	return Header{
+		MessageLength: readInt32(b, 0),
+		RequestID:     readInt32(b, 4),
+		ResponseTo:    readInt32(b, 8),
+		OpCode:        OpCode(readInt32(b, 12)),
+	}, nil
+}
+
+func (h Header) String() string {
+	return fmt.Sprintf(
+		`Header{MessageLength: %d, RequestID: %d, ResponseTo: %d, OpCode: %v}`,
+		h.MessageLength, h.RequestID, h.ResponseTo, h.OpCode,
+	)
+}
+
+// AppendHeader will append this header to the given slice of bytes.
+func (h Header) AppendHeader(b []byte) []byte {
+	b = appendInt32(b, h.MessageLength)
+	b = appendInt32(b, h.RequestID)
+	b = appendInt32(b, h.ResponseTo)
+	b = appendInt32(b, int32(h.OpCode))
+
+	return b
+}
+
+// SetDefaults sets the length and opcode of this header.
+func (h *Header) SetDefaults(length int, opcode OpCode) error {
+	switch h.MessageLength {
+	case int32(length):
+	case 0:
+		h.MessageLength = int32(length)
+	default:
+		return ErrHeaderInvalidLength
+	}
+	switch h.OpCode {
+	case opcode:
+	case OpCode(0):
+		h.OpCode = opcode
+	default:
+		return ErrHeaderIncorrectOpCode
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go
new file mode 100644
index 0000000..498228d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Insert represents the OP_INSERT message of the MongoDB wire protocol.
+type Insert struct {
+	MsgHeader          Header
+	Flags              InsertFlag
+	FullCollectionName string
+	Documents          []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (i Insert) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (i Insert) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (i Insert) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (i Insert) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (i Insert) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (i *Insert) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// InsertFlag represents the flags on an OP_INSERT message.
+type InsertFlag int32
+
+// These constants represent the individual flags on an OP_INSERT message.
+const (
+	ContinueOnError InsertFlag = 1 << iota
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go
new file mode 100644
index 0000000..228a6ae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go
@@ -0,0 +1,92 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// KillCursors represents the OP_KILL_CURSORS message of the MongoDB wire protocol.
+type KillCursors struct {
+	MsgHeader         Header
+	Zero              int32
+	NumberOfCursorIDs int32
+	CursorIDs         []int64
+
+	DatabaseName   string
+	CollectionName string
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (kc KillCursors) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, kc.Len())
+	return kc.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (kc KillCursors) ValidateWireMessage() error {
+	if int(kc.MsgHeader.MessageLength) != kc.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if kc.MsgHeader.OpCode != OpKillCursors {
+		return errors.New("incorrect header: op code is not OpGetMore")
+	}
+	if kc.NumberOfCursorIDs != int32(len(kc.CursorIDs)) {
+		return errors.New("incorrect number of cursor IDs")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (kc KillCursors) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = kc.MsgHeader.SetDefaults(kc.Len(), OpKillCursors)
+
+	b = kc.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, kc.Zero)
+	b = appendInt32(b, kc.NumberOfCursorIDs)
+	for _, id := range kc.CursorIDs {
+		b = appendInt64(b, id)
+	}
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (kc KillCursors) String() string {
+	return fmt.Sprintf(
+		`OP_KILL_CURSORS{MsgHeader: %s, Zero: %d, Number of Cursor IDS: %d, Cursor IDs: %v}`,
+		kc.MsgHeader, kc.Zero, kc.NumberOfCursorIDs, kc.CursorIDs,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (kc KillCursors) Len() int {
+	// Header + Zero + Number IDs + 8 * Number IDs
+	return 16 + 4 + 4 + int(kc.NumberOfCursorIDs*8)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (kc *KillCursors) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (kc KillCursors) CommandDocument() bsonx.Doc {
+	cursors := make([]bsonx.Val, len(kc.CursorIDs))
+	for i, id := range kc.CursorIDs {
+		cursors[i] = bsonx.Int64(id)
+	}
+
+	return bsonx.Doc{
+		{"killCursors", bsonx.String(kc.CollectionName)},
+		{"cursors", bsonx.Array(cursors)},
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go
new file mode 100644
index 0000000..07f35ab
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go
@@ -0,0 +1,298 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Msg represents the OP_MSG message of the MongoDB wire protocol.
+type Msg struct {
+	MsgHeader Header
+	FlagBits  MsgFlag
+	Sections  []Section
+	Checksum  uint32
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (m Msg) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, m.Len())
+	return m.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (m Msg) ValidateWireMessage() error {
+	if int(m.MsgHeader.MessageLength) != m.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if m.MsgHeader.OpCode != OpMsg {
+		return errors.New("incorrect header: opcode is not OpMsg")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMesssage will set the MessageLength property of the MsgHeader if it is zero. It will also set the Opcode
+// to OP_MSG if it is zero. If either of these properties are non-zero and not correct, this method will return both the
+// []byte with the wire message appended to it and an invalid header error.
+func (m Msg) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = m.MsgHeader.SetDefaults(m.Len(), OpMsg)
+
+	b = m.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(m.FlagBits))
+
+	for _, section := range m.Sections {
+		newB := make([]byte, 0)
+		newB = section.AppendSection(newB)
+
+		b = section.AppendSection(b)
+	}
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (m Msg) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (m Msg) Len() int {
+	// Header + Flags + len of each section + optional checksum
+	totalLen := 16 + 4 // header and flag
+
+	for _, section := range m.Sections {
+		totalLen += section.Len()
+	}
+
+	if m.FlagBits&ChecksumPresent > 0 {
+		totalLen += 4
+	}
+
+	return totalLen
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (m *Msg) UnmarshalWireMessage(b []byte) error {
+	var err error
+
+	m.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if len(b) < int(m.MsgHeader.MessageLength) {
+		return Error{
+			Type:    ErrOpMsg,
+			Message: "[]byte too small",
+		}
+	}
+
+	m.FlagBits = MsgFlag(readInt32(b, 16))
+
+	// read each section
+	sectionBytes := m.MsgHeader.MessageLength - 16 - 4 // number of bytes taken up by sections
+	hasChecksum := m.FlagBits&ChecksumPresent > 0
+	if hasChecksum {
+		sectionBytes -= 4 // 4 bytes at end for checksum
+	}
+
+	m.Sections = make([]Section, 0)
+	position := 20 // position to read from
+	for sectionBytes > 0 {
+		sectionType := SectionType(b[position])
+		position++
+
+		switch sectionType {
+		case SingleDocument:
+			rdr, size, err := readDocument(b, int32(position))
+			if err.Message != "" {
+				err.Type = ErrOpMsg
+				return err
+			}
+
+			position += size
+			sb := SectionBody{
+				Document: rdr,
+			}
+			sb.PayloadType = sb.Kind()
+
+			sectionBytes -= int32(sb.Len())
+			m.Sections = append(m.Sections, sb)
+		case DocumentSequence:
+			sds := SectionDocumentSequence{}
+			sds.Size = readInt32(b, int32(position))
+			position += 4
+
+			identifier, err := readCString(b, int32(position))
+			if err != nil {
+				return err
+			}
+
+			sds.Identifier = identifier
+			position += len(identifier) + 1 // +1 for \0
+			sds.PayloadType = sds.Kind()
+
+			// length of documents to read
+			// sequenceLen - 4 bytes for size field - identifierLength (including \0)
+			docsLen := int(sds.Size) - 4 - len(identifier) - 1
+			for docsLen > 0 {
+				rdr, size, err := readDocument(b, int32(position))
+				if err.Message != "" {
+					err.Type = ErrOpMsg
+					return err
+				}
+
+				position += size
+				sds.Documents = append(sds.Documents, rdr)
+				docsLen -= size
+			}
+
+			sectionBytes -= int32(sds.Len())
+			m.Sections = append(m.Sections, sds)
+		}
+	}
+
+	if hasChecksum {
+		m.Checksum = uint32(readInt32(b, int32(position)))
+	}
+
+	return nil
+}
+
+// GetMainDocument returns the document containing the message to send.
+func (m *Msg) GetMainDocument() (bsonx.Doc, error) {
+	return bsonx.ReadDoc(m.Sections[0].(SectionBody).Document)
+}
+
+// GetSequenceArray returns this message's document sequence as a BSON array along with the array identifier.
+// If this message has no associated document sequence, a nil array is returned.
+func (m *Msg) GetSequenceArray() (bsonx.Arr, string, error) {
+	if len(m.Sections) == 1 {
+		return nil, "", nil
+	}
+
+	arr := bsonx.Arr{}
+	sds := m.Sections[1].(SectionDocumentSequence)
+
+	for _, rdr := range sds.Documents {
+		doc, err := bsonx.ReadDoc([]byte(rdr))
+		if err != nil {
+			return nil, "", err
+		}
+
+		arr = append(arr, bsonx.Document(doc))
+	}
+
+	return arr, sds.Identifier, nil
+}
+
+// AcknowledgedWrite returns true if this msg represents an acknowledged write command.
+func (m *Msg) AcknowledgedWrite() bool {
+	return m.FlagBits&MoreToCome == 0
+}
+
+// MsgFlag represents the flags on an OP_MSG message.
+type MsgFlag uint32
+
+// These constants represent the individual flags on an OP_MSG message.
+const (
+	ChecksumPresent MsgFlag = 1 << iota
+	MoreToCome
+
+	ExhaustAllowed MsgFlag = 1 << 16
+)
+
+// Section represents a section on an OP_MSG message.
+type Section interface {
+	Kind() SectionType
+	Len() int
+	AppendSection([]byte) []byte
+}
+
+// SectionBody represents the kind body of an OP_MSG message.
+type SectionBody struct {
+	PayloadType SectionType
+	Document    bson.Raw
+}
+
+// Kind implements the Section interface.
+func (sb SectionBody) Kind() SectionType {
+	return SingleDocument
+}
+
+// Len implements the Section interface
+func (sb SectionBody) Len() int {
+	return 1 + len(sb.Document) // 1 for PayloadType
+}
+
+// AppendSection implements the Section interface.
+func (sb SectionBody) AppendSection(dest []byte) []byte {
+	dest = append(dest, byte(SingleDocument))
+	dest = append(dest, sb.Document...)
+	return dest
+}
+
+// SectionDocumentSequence represents the kind document sequence of an OP_MSG message.
+type SectionDocumentSequence struct {
+	PayloadType SectionType
+	Size        int32
+	Identifier  string
+	Documents   []bson.Raw
+}
+
+// Kind implements the Section interface.
+func (sds SectionDocumentSequence) Kind() SectionType {
+	return DocumentSequence
+}
+
+// Len implements the Section interface
+func (sds SectionDocumentSequence) Len() int {
+	// PayloadType + Size + Identifier + 1 (null terminator) + totalDocLen
+	totalDocLen := 0
+	for _, doc := range sds.Documents {
+		totalDocLen += len(doc)
+	}
+
+	return 1 + 4 + len(sds.Identifier) + 1 + totalDocLen
+}
+
+// PayloadLen returns the length of the payload
+func (sds SectionDocumentSequence) PayloadLen() int {
+	// 4 bytes for size field, len identifier (including \0), and total docs len
+	return sds.Len() - 1
+}
+
+// AppendSection implements the Section interface
+func (sds SectionDocumentSequence) AppendSection(dest []byte) []byte {
+	dest = append(dest, byte(DocumentSequence))
+	dest = appendInt32(dest, sds.Size)
+	dest = appendCString(dest, sds.Identifier)
+
+	for _, doc := range sds.Documents {
+		dest = append(dest, doc...)
+	}
+
+	return dest
+}
+
+// SectionType represents the type for 1 section in an OP_MSG
+type SectionType uint8
+
+// These constants represent the individual section types for a section in an OP_MSG
+const (
+	SingleDocument SectionType = iota
+	DocumentSequence
+)
+
+// OpmsgWireVersion is the minimum wire version needed to use OP_MSG
+const OpmsgWireVersion = 6
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go
new file mode 100644
index 0000000..568a3ec
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go
@@ -0,0 +1,336 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Query represents the OP_QUERY message of the MongoDB wire protocol.
+type Query struct {
+	MsgHeader            Header
+	Flags                QueryFlag
+	FullCollectionName   string
+	NumberToSkip         int32
+	NumberToReturn       int32
+	Query                bson.Raw
+	ReturnFieldsSelector bson.Raw
+
+	SkipSet   bool
+	Limit     *int32
+	BatchSize *int32
+}
+
+var optionsMap = map[string]string{
+	"$orderby":     "sort",
+	"$hint":        "hint",
+	"$comment":     "comment",
+	"$maxScan":     "maxScan",
+	"$max":         "max",
+	"$min":         "min",
+	"$returnKey":   "returnKey",
+	"$showDiskLoc": "showRecordId",
+	"$maxTimeMS":   "maxTimeMS",
+	"$snapshot":    "snapshot",
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+//
+// See AppendWireMessage for a description of the rules this method follows.
+func (q Query) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, q.Len())
+	return q.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (q Query) ValidateWireMessage() error {
+	if int(q.MsgHeader.MessageLength) != q.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if q.MsgHeader.OpCode != OpQuery {
+		return errors.New("incorrect header: op code is not OpQuery")
+	}
+	if strings.Index(q.FullCollectionName, ".") == -1 {
+		return errors.New("incorrect header: collection name does not contain a dot")
+	}
+	if q.Query != nil && len(q.Query) > 0 {
+		err := q.Query.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	if q.ReturnFieldsSelector != nil && len(q.ReturnFieldsSelector) > 0 {
+		err := q.ReturnFieldsSelector.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpQuery if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (q Query) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = q.MsgHeader.SetDefaults(q.Len(), OpQuery)
+
+	b = q.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(q.Flags))
+	b = appendCString(b, q.FullCollectionName)
+	b = appendInt32(b, q.NumberToSkip)
+	b = appendInt32(b, q.NumberToReturn)
+	b = append(b, q.Query...)
+	b = append(b, q.ReturnFieldsSelector...)
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (q Query) String() string {
+	return fmt.Sprintf(
+		`OP_QUERY{MsgHeader: %s, Flags: %s, FullCollectionname: %s, NumberToSkip: %d, NumberToReturn: %d, Query: %s, ReturnFieldsSelector: %s}`,
+		q.MsgHeader, q.Flags, q.FullCollectionName, q.NumberToSkip, q.NumberToReturn, q.Query, q.ReturnFieldsSelector,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (q Query) Len() int {
+	// Header + Flags + CollectionName + Null Byte + Skip + Return + Query + ReturnFieldsSelector
+	return 16 + 4 + len(q.FullCollectionName) + 1 + 4 + 4 + len(q.Query) + len(q.ReturnFieldsSelector)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (q *Query) UnmarshalWireMessage(b []byte) error {
+	var err error
+	q.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if len(b) < int(q.MsgHeader.MessageLength) {
+		return Error{Type: ErrOpQuery, Message: "[]byte too small"}
+	}
+
+	q.Flags = QueryFlag(readInt32(b, 16))
+	q.FullCollectionName, err = readCString(b, 20)
+	if err != nil {
+		return err
+	}
+	pos := 20 + len(q.FullCollectionName) + 1
+	q.NumberToSkip = readInt32(b, int32(pos))
+	pos += 4
+	q.NumberToReturn = readInt32(b, int32(pos))
+	pos += 4
+
+	var size int
+	var wmerr Error
+	q.Query, size, wmerr = readDocument(b, int32(pos))
+	if wmerr.Message != "" {
+		wmerr.Type = ErrOpQuery
+		return wmerr
+	}
+	pos += size
+	if pos < len(b) {
+		q.ReturnFieldsSelector, size, wmerr = readDocument(b, int32(pos))
+		if wmerr.Message != "" {
+			wmerr.Type = ErrOpQuery
+			return wmerr
+		}
+		pos += size
+	}
+
+	return nil
+}
+
+// AcknowledgedWrite returns true if this command represents an acknowledged write
+func (q *Query) AcknowledgedWrite() bool {
+	wcElem, err := q.Query.LookupErr("writeConcern")
+	if err != nil {
+		// no wc --> ack
+		return true
+	}
+
+	return writeconcern.AcknowledgedValue(wcElem)
+}
+
+// Legacy returns true if the query represents a legacy find operation.
+func (q Query) Legacy() bool {
+	return !strings.Contains(q.FullCollectionName, "$cmd")
+}
+
+// DatabaseName returns the database name for the query.
+func (q Query) DatabaseName() string {
+	if q.Legacy() {
+		return strings.Split(q.FullCollectionName, ".")[0]
+	}
+
+	return q.FullCollectionName[:len(q.FullCollectionName)-5] // remove .$cmd
+}
+
+// CollectionName returns the collection name for the query.
+func (q Query) CollectionName() string {
+	parts := strings.Split(q.FullCollectionName, ".")
+	return parts[len(parts)-1]
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (q Query) CommandDocument() (bsonx.Doc, error) {
+	if q.Legacy() {
+		return q.legacyCommandDocument()
+	}
+
+	cmd, err := bsonx.ReadDoc([]byte(q.Query))
+	if err != nil {
+		return nil, err
+	}
+
+	cmdElem := cmd[0]
+	if cmdElem.Key == "$query" {
+		cmd = cmdElem.Value.Document()
+	}
+
+	return cmd, nil
+}
+
+func (q Query) legacyCommandDocument() (bsonx.Doc, error) {
+	doc, err := bsonx.ReadDoc(q.Query)
+	if err != nil {
+		return nil, err
+	}
+
+	parts := strings.Split(q.FullCollectionName, ".")
+	collName := parts[len(parts)-1]
+	doc = append(bsonx.Doc{{"find", bsonx.String(collName)}}, doc...)
+
+	var filter bsonx.Doc
+	var queryIndex int
+	for i, elem := range doc {
+		if newKey, ok := optionsMap[elem.Key]; ok {
+			doc[i].Key = newKey
+			continue
+		}
+
+		if elem.Key == "$query" {
+			filter = elem.Value.Document()
+		} else {
+			// the element is the filter
+			filter = filter.Append(elem.Key, elem.Value)
+		}
+
+		queryIndex = i
+	}
+
+	doc = append(doc[:queryIndex], doc[queryIndex+1:]...) // remove $query
+	if len(filter) != 0 {
+		doc = doc.Append("filter", bsonx.Document(filter))
+	}
+
+	doc, err = q.convertLegacyParams(doc)
+	if err != nil {
+		return nil, err
+	}
+
+	return doc, nil
+}
+
+func (q Query) convertLegacyParams(doc bsonx.Doc) (bsonx.Doc, error) {
+	if q.ReturnFieldsSelector != nil {
+		projDoc, err := bsonx.ReadDoc(q.ReturnFieldsSelector)
+		if err != nil {
+			return nil, err
+		}
+		doc = doc.Append("projection", bsonx.Document(projDoc))
+	}
+	if q.Limit != nil {
+		limit := *q.Limit
+		if limit < 0 {
+			limit *= -1
+			doc = doc.Append("singleBatch", bsonx.Boolean(true))
+		}
+
+		doc = doc.Append("limit", bsonx.Int32(*q.Limit))
+	}
+	if q.BatchSize != nil {
+		doc = doc.Append("batchSize", bsonx.Int32(*q.BatchSize))
+	}
+	if q.SkipSet {
+		doc = doc.Append("skip", bsonx.Int32(q.NumberToSkip))
+	}
+	if q.Flags&TailableCursor > 0 {
+		doc = doc.Append("tailable", bsonx.Boolean(true))
+	}
+	if q.Flags&OplogReplay > 0 {
+		doc = doc.Append("oplogReplay", bsonx.Boolean(true))
+	}
+	if q.Flags&NoCursorTimeout > 0 {
+		doc = doc.Append("noCursorTimeout", bsonx.Boolean(true))
+	}
+	if q.Flags&AwaitData > 0 {
+		doc = doc.Append("awaitData", bsonx.Boolean(true))
+	}
+	if q.Flags&Partial > 0 {
+		doc = doc.Append("allowPartialResults", bsonx.Boolean(true))
+	}
+
+	return doc, nil
+}
+
+// QueryFlag represents the flags on an OP_QUERY message.
+type QueryFlag int32
+
+// These constants represent the individual flags on an OP_QUERY message.
+const (
+	_ QueryFlag = 1 << iota
+	TailableCursor
+	SlaveOK
+	OplogReplay
+	NoCursorTimeout
+	AwaitData
+	Exhaust
+	Partial
+)
+
+// String implements the fmt.Stringer interface.
+func (qf QueryFlag) String() string {
+	strs := make([]string, 0)
+	if qf&TailableCursor == TailableCursor {
+		strs = append(strs, "TailableCursor")
+	}
+	if qf&SlaveOK == SlaveOK {
+		strs = append(strs, "SlaveOK")
+	}
+	if qf&OplogReplay == OplogReplay {
+		strs = append(strs, "OplogReplay")
+	}
+	if qf&NoCursorTimeout == NoCursorTimeout {
+		strs = append(strs, "NoCursorTimeout")
+	}
+	if qf&AwaitData == AwaitData {
+		strs = append(strs, "AwaitData")
+	}
+	if qf&Exhaust == Exhaust {
+		strs = append(strs, "Exhaust")
+	}
+	if qf&Partial == Partial {
+		strs = append(strs, "Partial")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go
new file mode 100644
index 0000000..bbf394d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"bytes"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+func readInt32(b []byte, pos int32) int32 {
+	return (int32(b[pos+0])) | (int32(b[pos+1]) << 8) | (int32(b[pos+2]) << 16) | (int32(b[pos+3]) << 24)
+}
+
+func readCString(b []byte, pos int32) (string, error) {
+	null := bytes.IndexByte(b[pos:], 0x00)
+	if null == -1 {
+		return "", errors.New("invalid cstring")
+	}
+	return string(b[pos : int(pos)+null]), nil
+}
+
+func readInt64(b []byte, pos int32) int64 {
+	return (int64(b[pos+0])) | (int64(b[pos+1]) << 8) | (int64(b[pos+2]) << 16) | (int64(b[pos+3]) << 24) | (int64(b[pos+4]) << 32) |
+		(int64(b[pos+5]) << 40) | (int64(b[pos+6]) << 48) | (int64(b[pos+7]) << 56)
+
+}
+
+// readDocument will attempt to read a bson.Reader from the given slice of bytes
+// from the given position.
+func readDocument(b []byte, pos int32) (bson.Raw, int, Error) {
+	if int(pos)+4 > len(b) {
+		return nil, 0, Error{Message: "document too small to be valid"}
+	}
+	size := int(readInt32(b, int32(pos)))
+	if int(pos)+size > len(b) {
+		return nil, 0, Error{Message: "document size is larger than available bytes"}
+	}
+	if b[int(pos)+size-1] != 0x00 {
+		return nil, 0, Error{Message: "document invalid, last byte is not null"}
+	}
+	// TODO(GODRIVER-138): When we add 3.0 support, alter this so we either do one larger make or use a pool.
+	rdr := make(bson.Raw, size)
+	copy(rdr, b[pos:int(pos)+size])
+	return rdr, size, Error{Type: ErrNil}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go
new file mode 100644
index 0000000..dc8e450
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go
@@ -0,0 +1,201 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Reply represents the OP_REPLY message of the MongoDB wire protocol.
+type Reply struct {
+	MsgHeader      Header
+	ResponseFlags  ReplyFlag
+	CursorID       int64
+	StartingFrom   int32
+	NumberReturned int32
+	Documents      []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+//
+// See AppendWireMessage for a description of the rules this method follows.
+func (r Reply) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, r.Len())
+	return r.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (r Reply) ValidateWireMessage() error {
+	if int(r.MsgHeader.MessageLength) != r.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if r.MsgHeader.OpCode != OpReply {
+		return errors.New("incorrect header: op code is not OpReply")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpQuery if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (r Reply) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = r.MsgHeader.SetDefaults(r.Len(), OpReply)
+
+	b = r.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(r.ResponseFlags))
+	b = appendInt64(b, r.CursorID)
+	b = appendInt32(b, r.StartingFrom)
+	b = appendInt32(b, r.NumberReturned)
+	for _, d := range r.Documents {
+		b = append(b, d...)
+	}
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (r Reply) String() string {
+	return fmt.Sprintf(
+		`OP_REPLY{MsgHeader: %s, ResponseFlags: %s, CursorID: %d, StartingFrom: %d, NumberReturned: %d, Documents: %v}`,
+		r.MsgHeader, r.ResponseFlags, r.CursorID, r.StartingFrom, r.NumberReturned, r.Documents,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (r Reply) Len() int {
+	// Header + Flags + CursorID + StartingFrom + NumberReturned + Length of Length of Documents
+	docsLen := 0
+	for _, d := range r.Documents {
+		docsLen += len(d)
+	}
+	return 16 + 4 + 8 + 4 + 4 + docsLen
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (r *Reply) UnmarshalWireMessage(b []byte) error {
+	var err error
+	r.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if r.MsgHeader.MessageLength < 36 {
+		return errors.New("invalid OP_REPLY: header length too small")
+	}
+	if len(b) < int(r.MsgHeader.MessageLength) {
+		return errors.New("invalid OP_REPLY: []byte too small")
+	}
+
+	r.ResponseFlags = ReplyFlag(readInt32(b, 16))
+	r.CursorID = readInt64(b, 20)
+	r.StartingFrom = readInt32(b, 28)
+	r.NumberReturned = readInt32(b, 32)
+	pos := 36
+	for pos < len(b) {
+		rdr, size, err := readDocument(b, int32(pos))
+		if err.Message != "" {
+			err.Type = ErrOpReply
+			return err
+		}
+		r.Documents = append(r.Documents, rdr)
+		pos += size
+	}
+
+	return nil
+}
+
+// GetMainLegacyDocument constructs and returns a BSON document for this reply.
+func (r *Reply) GetMainLegacyDocument(fullCollectionName string) (bsonx.Doc, error) {
+	if r.ResponseFlags&CursorNotFound > 0 {
+		fmt.Println("cursor not found err")
+		return bsonx.Doc{
+			{"ok", bsonx.Int32(0)},
+		}, nil
+	}
+	if r.ResponseFlags&QueryFailure > 0 {
+		firstDoc := r.Documents[0]
+		return bsonx.Doc{
+			{"ok", bsonx.Int32(0)},
+			{"errmsg", bsonx.String(firstDoc.Lookup("$err").StringValue())},
+			{"code", bsonx.Int32(firstDoc.Lookup("code").Int32())},
+		}, nil
+	}
+
+	doc := bsonx.Doc{
+		{"ok", bsonx.Int32(1)},
+	}
+
+	batchStr := "firstBatch"
+	if r.StartingFrom != 0 {
+		batchStr = "nextBatch"
+	}
+
+	batchArr := make([]bsonx.Val, len(r.Documents))
+	for i, docRaw := range r.Documents {
+		doc, err := bsonx.ReadDoc(docRaw)
+		if err != nil {
+			return nil, err
+		}
+
+		batchArr[i] = bsonx.Document(doc)
+	}
+
+	cursorDoc := bsonx.Doc{
+		{"id", bsonx.Int64(r.CursorID)},
+		{"ns", bsonx.String(fullCollectionName)},
+		{batchStr, bsonx.Array(batchArr)},
+	}
+
+	doc = doc.Append("cursor", bsonx.Document(cursorDoc))
+	return doc, nil
+}
+
+// GetMainDocument returns the main BSON document for this reply.
+func (r *Reply) GetMainDocument() (bsonx.Doc, error) {
+	return bsonx.ReadDoc([]byte(r.Documents[0]))
+}
+
+// ReplyFlag represents the flags of an OP_REPLY message.
+type ReplyFlag int32
+
+// These constants represent the individual flags of an OP_REPLY message.
+const (
+	CursorNotFound ReplyFlag = 1 << iota
+	QueryFailure
+	ShardConfigStale
+	AwaitCapable
+)
+
+// String implements the fmt.Stringer interface.
+func (rf ReplyFlag) String() string {
+	strs := make([]string, 0)
+	if rf&CursorNotFound == CursorNotFound {
+		strs = append(strs, "CursorNotFound")
+	}
+	if rf&QueryFailure == QueryFailure {
+		strs = append(strs, "QueryFailure")
+	}
+	if rf&ShardConfigStale == ShardConfigStale {
+		strs = append(strs, "ShardConfigStale")
+	}
+	if rf&AwaitCapable == AwaitCapable {
+		strs = append(strs, "AwaitCapable")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go
new file mode 100644
index 0000000..a987327
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Update represents the OP_UPDATE message of the MongoDB wire protocol.
+type Update struct {
+	MsgHeader          Header
+	FullCollectionName string
+	Flags              UpdateFlag
+	Selector           bson.Raw
+	Update             bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (u Update) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (u Update) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (u Update) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (u Update) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (u Update) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (u *Update) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// UpdateFlag represents the flags on an OP_UPDATE message.
+type UpdateFlag int32
+
+// These constants represent the individual flags on an OP_UPDATE message.
+const (
+	Upsert UpdateFlag = 1 << iota
+	MultiUpdate
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go
new file mode 100644
index 0000000..c2cb153
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go
@@ -0,0 +1,206 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package wiremessage contains types for speaking the MongoDB Wire Protocol. Since this low
+// level library is meant to be used in the context of a driver and in the context of a server
+// all of the flags and types of the wire protocol are implemented. For each op there are two
+// corresponding implementations. One prefixed with Immutable which can be created by casting a
+// []byte to the type, and another prefixed with Mutable that is a struct with methods to mutate
+// the op.
+package wiremessage
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"sync/atomic"
+)
+
+// ErrInvalidMessageLength is returned when the provided message length is too small to be valid.
+var ErrInvalidMessageLength = errors.New("the message length is too small, it must be at least 16")
+
+// ErrUnknownOpCode is returned when the provided opcode is not a valid opcode.
+var ErrUnknownOpCode = errors.New("the opcode is unknown")
+
+var globalRequestID int32
+
+// CurrentRequestID returns the current request ID.
+func CurrentRequestID() int32 { return atomic.LoadInt32(&globalRequestID) }
+
+// NextRequestID returns the next request ID.
+func NextRequestID() int32 { return atomic.AddInt32(&globalRequestID, 1) }
+
+// Error represents an error related to wire protocol messages.
+type Error struct {
+	Type    ErrorType
+	Message string
+}
+
+// Error implements the err interface.
+func (e Error) Error() string {
+	return e.Message
+}
+
+// ErrorType is the type of error, which indicates from which part of the code
+// the error originated.
+type ErrorType uint16
+
+// These constants are the types of errors exposed by this package.
+const (
+	ErrNil ErrorType = iota
+	ErrHeader
+	ErrOpQuery
+	ErrOpReply
+	ErrOpCompressed
+	ErrOpMsg
+	ErrRead
+)
+
+// OpCode represents a MongoDB wire protocol opcode.
+type OpCode int32
+
+// These constants are the valid opcodes for the version of the wireprotocol
+// supported by this library. The skipped OpCodes are historical OpCodes that
+// are no longer used.
+const (
+	OpReply        OpCode = 1
+	_              OpCode = 1001
+	OpUpdate       OpCode = 2001
+	OpInsert       OpCode = 2002
+	_              OpCode = 2003
+	OpQuery        OpCode = 2004
+	OpGetMore      OpCode = 2005
+	OpDelete       OpCode = 2006
+	OpKillCursors  OpCode = 2007
+	OpCommand      OpCode = 2010
+	OpCommandReply OpCode = 2011
+	OpCompressed   OpCode = 2012
+	OpMsg          OpCode = 2013
+)
+
+// String implements the fmt.Stringer interface.
+func (oc OpCode) String() string {
+	switch oc {
+	case OpReply:
+		return "OP_REPLY"
+	case OpUpdate:
+		return "OP_UPDATE"
+	case OpInsert:
+		return "OP_INSERT"
+	case OpQuery:
+		return "OP_QUERY"
+	case OpGetMore:
+		return "OP_GET_MORE"
+	case OpDelete:
+		return "OP_DELETE"
+	case OpKillCursors:
+		return "OP_KILL_CURSORS"
+	case OpCommand:
+		return "OP_COMMAND"
+	case OpCommandReply:
+		return "OP_COMMANDREPLY"
+	case OpCompressed:
+		return "OP_COMPRESSED"
+	case OpMsg:
+		return "OP_MSG"
+	default:
+		return "<invalid opcode>"
+	}
+}
+
+// WireMessage represents a message in the MongoDB wire protocol.
+type WireMessage interface {
+	Marshaler
+	Validator
+	Appender
+	fmt.Stringer
+
+	// Len returns the length in bytes of this WireMessage.
+	Len() int
+}
+
+// Validator is the interface implemented by types that can validate
+// themselves as a MongoDB wire protocol message.
+type Validator interface {
+	ValidateWireMessage() error
+}
+
+// Marshaler is the interface implemented by types that can marshal
+// themselves into a valid MongoDB wire protocol message.
+type Marshaler interface {
+	MarshalWireMessage() ([]byte, error)
+}
+
+// Appender is the interface implemented by types that can append themselves, as
+// a MongoDB wire protocol message, to the provided slice of bytes.
+type Appender interface {
+	AppendWireMessage([]byte) ([]byte, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a
+// MongoDB wire protocol message version of themselves. The input can be
+// assumed to be a valid MongoDB wire protocol message. UnmarshalWireMessage
+// must copy the data if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalWireMessage([]byte) error
+}
+
+// Writer is the interface implemented by types that can have WireMessages
+// written to them.
+//
+// Implementation must obey the cancellation, timeouts, and deadlines of the
+// provided context.Context object.
+type Writer interface {
+	WriteWireMessage(context.Context, WireMessage) error
+}
+
+// Reader is the interface implemented by types that can have WireMessages
+// read from them.
+//
+// Implementation must obey the cancellation, timeouts, and deadlines of the
+// provided context.Context object.
+type Reader interface {
+	ReadWireMessage(context.Context) (WireMessage, error)
+}
+
+// ReadWriter is the interface implemented by types that can both read and write
+// WireMessages.
+type ReadWriter interface {
+	Reader
+	Writer
+}
+
+// ReadWriteCloser is the interface implemented by types that can read and write
+// WireMessages and can also be closed.
+type ReadWriteCloser interface {
+	Reader
+	Writer
+	io.Closer
+}
+
+// Transformer is the interface implemented by types that can alter a WireMessage.
+// Implementations should not directly alter the provided WireMessage and instead
+// make a copy of the message, alter it, and returned the new message.
+type Transformer interface {
+	TransformWireMessage(WireMessage) (WireMessage, error)
+}
+
+// ReadFrom will read a single WireMessage from the given io.Reader. This function will
+// validate the WireMessage. If the WireMessage is not valid, this method will
+// return both the error and the invalid WireMessage. If another type of processing
+// error occurs, WireMessage will be nil.
+//
+// This function will return the immutable versions of wire protocol messages. The
+// Convert function can be used to retrieve a mutable version of wire protocol
+// messages.
+func ReadFrom(io.Reader) (WireMessage, error) { return nil, nil }
+
+// Unmarshal will unmarshal data into a WireMessage.
+func Unmarshal([]byte) (WireMessage, error) { return nil, nil }
+
+// Validate will validate that data is a valid MongoDB wire protocol message.
+func Validate([]byte) error { return nil }
diff --git a/vendor/github.com/xdg/scram/.gitignore b/vendor/github.com/xdg/scram/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/xdg/scram/.gitignore
diff --git a/vendor/github.com/xdg/scram/.travis.yml b/vendor/github.com/xdg/scram/.travis.yml
new file mode 100644
index 0000000..f391327
--- /dev/null
+++ b/vendor/github.com/xdg/scram/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+sudo: false
+go:
+  - "1.7"
+  - "1.8"
+  - "1.9"
+  - "1.10"
+  - master
+matrix:
+  allow_failures:
+    - go: master
diff --git a/vendor/github.com/xdg/scram/LICENSE b/vendor/github.com/xdg/scram/LICENSE
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/vendor/github.com/xdg/scram/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/xdg/scram/README.md b/vendor/github.com/xdg/scram/README.md
new file mode 100644
index 0000000..6782d94
--- /dev/null
+++ b/vendor/github.com/xdg/scram/README.md
@@ -0,0 +1,71 @@
+[![GoDoc](https://godoc.org/github.com/xdg/scram?status.svg)](https://godoc.org/github.com/xdg/scram)
+[![Build Status](https://travis-ci.org/xdg/scram.svg?branch=master)](https://travis-ci.org/xdg/scram)
+
+# scram – Go implementation of RFC-5802
+
+## Description
+
+Package scram provides client and server implementations of the Salted
+Challenge Response Authentication Mechanism (SCRAM) described in
+[RFC-5802](https://tools.ietf.org/html/rfc5802) and
+[RFC-7677](https://tools.ietf.org/html/rfc7677).
+
+It includes both client and server side support.
+
+Channel binding and extensions are not (yet) supported.
+
+## Examples
+
+### Client side
+
+    package main
+
+    import "github.com/xdg/scram"
+
+    func main() {
+        // Get Client with username, password and (optional) authorization ID.
+        clientSHA1, err := scram.SHA1.NewClient("mulder", "trustno1", "")
+        if err != nil {
+            panic(err)
+        }
+
+        // Prepare the authentication conversation. Use the empty string as the
+        // initial server message argument to start the conversation.
+        conv := clientSHA1.NewConversation()
+        var serverMsg string
+
+        // Get the first message, send it and read the response.
+        firstMsg, err := conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+        serverMsg = sendClientMsg(firstMsg)
+
+        // Get the second message, send it, and read the response.
+        secondMsg, err := conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+        serverMsg = sendClientMsg(secondMsg)
+
+        // Validate the server's final message.  We have no further message to
+        // send so ignore that return value.
+        _, err = conv.Step(serverMsg)
+        if err != nil {
+            panic(err)
+        }
+
+        return
+    }
+
+    func sendClientMsg(s string) string {
+        // A real implementation would send this to a server and read a reply.
+        return ""
+    }
+
+## Copyright and License
+
+Copyright 2018 by David A. Golden. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"). You may
+obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/xdg/scram/client.go b/vendor/github.com/xdg/scram/client.go
new file mode 100644
index 0000000..ca0c4c7
--- /dev/null
+++ b/vendor/github.com/xdg/scram/client.go
@@ -0,0 +1,130 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"sync"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+// Client implements the client side of SCRAM authentication.  It holds
+// configuration values needed to initialize new client-side conversations for
+// a specific username, password and authorization ID tuple.  Client caches
+// the computationally-expensive parts of a SCRAM conversation as described in
+// RFC-5802.  If repeated authentication conversations may be required for a
+// user (e.g. disconnect/reconnect), the user's Client should be preserved.
+//
+// For security reasons, Clients have a default minimum PBKDF2 iteration count
+// of 4096.  If a server requests a smaller iteration count, an authentication
+// conversation will error.
+//
+// A Client can also be used by a server application to construct the hashed
+// authentication values to be stored for a new user.  See StoredCredentials()
+// for more.
+type Client struct {
+	sync.RWMutex
+	username string
+	password string
+	authzID  string
+	minIters int
+	nonceGen NonceGeneratorFcn
+	hashGen  HashGeneratorFcn
+	cache    map[KeyFactors]derivedKeys
+}
+
+func newClient(username, password, authzID string, fcn HashGeneratorFcn) *Client {
+	return &Client{
+		username: username,
+		password: password,
+		authzID:  authzID,
+		minIters: 4096,
+		nonceGen: defaultNonceGenerator,
+		hashGen:  fcn,
+		cache:    make(map[KeyFactors]derivedKeys),
+	}
+}
+
+// WithMinIterations changes minimum required PBKDF2 iteration count.
+func (c *Client) WithMinIterations(n int) *Client {
+	c.Lock()
+	defer c.Unlock()
+	c.minIters = n
+	return c
+}
+
+// WithNonceGenerator replaces the default nonce generator (base64 encoding of
+// 24 bytes from crypto/rand) with a custom generator.  This is provided for
+// testing or for users with custom nonce requirements.
+func (c *Client) WithNonceGenerator(ng NonceGeneratorFcn) *Client {
+	c.Lock()
+	defer c.Unlock()
+	c.nonceGen = ng
+	return c
+}
+
+// NewConversation constructs a client-side authentication conversation.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (c *Client) NewConversation() *ClientConversation {
+	c.RLock()
+	defer c.RUnlock()
+	return &ClientConversation{
+		client:   c,
+		nonceGen: c.nonceGen,
+		hashGen:  c.hashGen,
+		minIters: c.minIters,
+	}
+}
+
+func (c *Client) getDerivedKeys(kf KeyFactors) derivedKeys {
+	dk, ok := c.getCache(kf)
+	if !ok {
+		dk = c.computeKeys(kf)
+		c.setCache(kf, dk)
+	}
+	return dk
+}
+
+// GetStoredCredentials takes a salt and iteration count structure and
+// provides the values that must be stored by a server to authentication a
+// user.  These values are what the Server credential lookup function must
+// return for a given username.
+func (c *Client) GetStoredCredentials(kf KeyFactors) StoredCredentials {
+	dk := c.getDerivedKeys(kf)
+	return StoredCredentials{
+		KeyFactors: kf,
+		StoredKey:  dk.StoredKey,
+		ServerKey:  dk.ServerKey,
+	}
+}
+
+func (c *Client) computeKeys(kf KeyFactors) derivedKeys {
+	h := c.hashGen()
+	saltedPassword := pbkdf2.Key([]byte(c.password), []byte(kf.Salt), kf.Iters, h.Size(), c.hashGen)
+	clientKey := computeHMAC(c.hashGen, saltedPassword, []byte("Client Key"))
+
+	return derivedKeys{
+		ClientKey: clientKey,
+		StoredKey: computeHash(c.hashGen, clientKey),
+		ServerKey: computeHMAC(c.hashGen, saltedPassword, []byte("Server Key")),
+	}
+}
+
+func (c *Client) getCache(kf KeyFactors) (derivedKeys, bool) {
+	c.RLock()
+	defer c.RUnlock()
+	dk, ok := c.cache[kf]
+	return dk, ok
+}
+
+func (c *Client) setCache(kf KeyFactors, dk derivedKeys) {
+	c.Lock()
+	defer c.Unlock()
+	c.cache[kf] = dk
+	return
+}
diff --git a/vendor/github.com/xdg/scram/client_conv.go b/vendor/github.com/xdg/scram/client_conv.go
new file mode 100644
index 0000000..8340568
--- /dev/null
+++ b/vendor/github.com/xdg/scram/client_conv.go
@@ -0,0 +1,149 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"strings"
+)
+
+type clientState int
+
+const (
+	clientStarting clientState = iota
+	clientFirst
+	clientFinal
+	clientDone
+)
+
+// ClientConversation implements the client-side of an authentication
+// conversation with a server.  A new conversation must be created for
+// each authentication attempt.
+type ClientConversation struct {
+	client   *Client
+	nonceGen NonceGeneratorFcn
+	hashGen  HashGeneratorFcn
+	minIters int
+	state    clientState
+	valid    bool
+	gs2      string
+	nonce    string
+	c1b      string
+	serveSig []byte
+}
+
+// Step takes a string provided from a server (or just an empty string for the
+// very first conversation step) and attempts to move the authentication
+// conversation forward.  It returns a string to be sent to the server or an
+// error if the server message is invalid.  Calling Step after a conversation
+// completes is also an error.
+func (cc *ClientConversation) Step(challenge string) (response string, err error) {
+	switch cc.state {
+	case clientStarting:
+		cc.state = clientFirst
+		response, err = cc.firstMsg()
+	case clientFirst:
+		cc.state = clientFinal
+		response, err = cc.finalMsg(challenge)
+	case clientFinal:
+		cc.state = clientDone
+		response, err = cc.validateServer(challenge)
+	default:
+		response, err = "", errors.New("Conversation already completed")
+	}
+	return
+}
+
+// Done returns true if the conversation is completed or has errored.
+func (cc *ClientConversation) Done() bool {
+	return cc.state == clientDone
+}
+
+// Valid returns true if the conversation successfully authenticated with the
+// server, including counter-validation that the server actually has the
+// user's stored credentials.
+func (cc *ClientConversation) Valid() bool {
+	return cc.valid
+}
+
+func (cc *ClientConversation) firstMsg() (string, error) {
+	// Values are cached for use in final message parameters
+	cc.gs2 = cc.gs2Header()
+	cc.nonce = cc.client.nonceGen()
+	cc.c1b = fmt.Sprintf("n=%s,r=%s", encodeName(cc.client.username), cc.nonce)
+
+	return cc.gs2 + cc.c1b, nil
+}
+
+func (cc *ClientConversation) finalMsg(s1 string) (string, error) {
+	msg, err := parseServerFirst(s1)
+	if err != nil {
+		return "", err
+	}
+
+	// Check nonce prefix and update
+	if !strings.HasPrefix(msg.nonce, cc.nonce) {
+		return "", errors.New("server nonce did not extend client nonce")
+	}
+	cc.nonce = msg.nonce
+
+	// Check iteration count vs minimum
+	if msg.iters < cc.minIters {
+		return "", fmt.Errorf("server requested too few iterations (%d)", msg.iters)
+	}
+
+	// Create client-final-message-without-proof
+	c2wop := fmt.Sprintf(
+		"c=%s,r=%s",
+		base64.StdEncoding.EncodeToString([]byte(cc.gs2)),
+		cc.nonce,
+	)
+
+	// Create auth message
+	authMsg := cc.c1b + "," + s1 + "," + c2wop
+
+	// Get derived keys from client cache
+	dk := cc.client.getDerivedKeys(KeyFactors{Salt: string(msg.salt), Iters: msg.iters})
+
+	// Create proof as clientkey XOR clientsignature
+	clientSignature := computeHMAC(cc.hashGen, dk.StoredKey, []byte(authMsg))
+	clientProof := xorBytes(dk.ClientKey, clientSignature)
+	proof := base64.StdEncoding.EncodeToString(clientProof)
+
+	// Cache ServerSignature for later validation
+	cc.serveSig = computeHMAC(cc.hashGen, dk.ServerKey, []byte(authMsg))
+
+	return fmt.Sprintf("%s,p=%s", c2wop, proof), nil
+}
+
+func (cc *ClientConversation) validateServer(s2 string) (string, error) {
+	msg, err := parseServerFinal(s2)
+	if err != nil {
+		return "", err
+	}
+
+	if len(msg.err) > 0 {
+		return "", fmt.Errorf("server error: %s", msg.err)
+	}
+
+	if !hmac.Equal(msg.verifier, cc.serveSig) {
+		return "", errors.New("server validation failed")
+	}
+
+	cc.valid = true
+	return "", nil
+}
+
+func (cc *ClientConversation) gs2Header() string {
+	if cc.client.authzID == "" {
+		return "n,,"
+	}
+	return fmt.Sprintf("n,%s,", encodeName(cc.client.authzID))
+}
diff --git a/vendor/github.com/xdg/scram/common.go b/vendor/github.com/xdg/scram/common.go
new file mode 100644
index 0000000..cb705cb
--- /dev/null
+++ b/vendor/github.com/xdg/scram/common.go
@@ -0,0 +1,97 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"crypto/rand"
+	"encoding/base64"
+	"strings"
+)
+
+// NonceGeneratorFcn defines a function that returns a string of high-quality
+// random printable ASCII characters EXCLUDING the comma (',') character.  The
+// default nonce generator provides Base64 encoding of 24 bytes from
+// crypto/rand.
+type NonceGeneratorFcn func() string
+
+// derivedKeys collects the three cryptographically derived values
+// into one struct for caching.
+type derivedKeys struct {
+	ClientKey []byte
+	StoredKey []byte
+	ServerKey []byte
+}
+
+// KeyFactors represent the two server-provided factors needed to compute
+// client credentials for authentication.  Salt is decoded bytes (i.e. not
+// base64), but in string form so that KeyFactors can be used as a map key for
+// cached credentials.
+type KeyFactors struct {
+	Salt  string
+	Iters int
+}
+
+// StoredCredentials are the values that a server must store for a given
+// username to allow authentication.  They include the salt and iteration
+// count, plus the derived values to authenticate a client and for the server
+// to authenticate itself back to the client.
+//
+// NOTE: these are specific to a given hash function.  To allow a user to
+// authenticate with either SCRAM-SHA-1 or SCRAM-SHA-256, two sets of
+// StoredCredentials must be created and stored, one for each hash function.
+type StoredCredentials struct {
+	KeyFactors
+	StoredKey []byte
+	ServerKey []byte
+}
+
+// CredentialLookup is a callback to provide StoredCredentials for a given
+// username.  This is used to configure Server objects.
+//
+// NOTE: these are specific to a given hash function.  The callback provided
+// to a Server with a given hash function must provide the corresponding
+// StoredCredentials.
+type CredentialLookup func(string) (StoredCredentials, error)
+
+func defaultNonceGenerator() string {
+	raw := make([]byte, 24)
+	nonce := make([]byte, base64.StdEncoding.EncodedLen(len(raw)))
+	rand.Read(raw)
+	base64.StdEncoding.Encode(nonce, raw)
+	return string(nonce)
+}
+
+func encodeName(s string) string {
+	return strings.Replace(strings.Replace(s, "=", "=3D", -1), ",", "=2C", -1)
+}
+
+func decodeName(s string) (string, error) {
+	// TODO Check for = not followed by 2C or 3D
+	return strings.Replace(strings.Replace(s, "=2C", ",", -1), "=3D", "=", -1), nil
+}
+
+func computeHash(hg HashGeneratorFcn, b []byte) []byte {
+	h := hg()
+	h.Write(b)
+	return h.Sum(nil)
+}
+
+func computeHMAC(hg HashGeneratorFcn, key, data []byte) []byte {
+	mac := hmac.New(hg, key)
+	mac.Write(data)
+	return mac.Sum(nil)
+}
+
+func xorBytes(a, b []byte) []byte {
+	// TODO check a & b are same length, or just xor to smallest
+	xor := make([]byte, len(a))
+	for i := range a {
+		xor[i] = a[i] ^ b[i]
+	}
+	return xor
+}
diff --git a/vendor/github.com/xdg/scram/doc.go b/vendor/github.com/xdg/scram/doc.go
new file mode 100644
index 0000000..d43bee6
--- /dev/null
+++ b/vendor/github.com/xdg/scram/doc.go
@@ -0,0 +1,24 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package scram provides client and server implementations of the Salted
+// Challenge Response Authentication Mechanism (SCRAM) described in RFC-5802
+// and RFC-7677.
+//
+// Usage
+//
+// The scram package provides two variables, `SHA1` and `SHA256`, that are
+// used to construct Client or Server objects.
+//
+//     clientSHA1,   err := scram.SHA1.NewClient(username, password, authID)
+//     clientSHA256, err := scram.SHA256.NewClient(username, password, authID)
+//
+//     serverSHA1,   err := scram.SHA1.NewServer(credentialLookupFcn)
+//     serverSHA256, err := scram.SHA256.NewServer(credentialLookupFcn)
+//
+// These objects are used to construct ClientConversation or
+// ServerConversation objects that are used to carry out authentication.
+package scram
diff --git a/vendor/github.com/xdg/scram/parse.go b/vendor/github.com/xdg/scram/parse.go
new file mode 100644
index 0000000..722f604
--- /dev/null
+++ b/vendor/github.com/xdg/scram/parse.go
@@ -0,0 +1,205 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+type c1Msg struct {
+	gs2Header string
+	authzID   string
+	username  string
+	nonce     string
+	c1b       string
+}
+
+type c2Msg struct {
+	cbind []byte
+	nonce string
+	proof []byte
+	c2wop string
+}
+
+type s1Msg struct {
+	nonce string
+	salt  []byte
+	iters int
+}
+
+type s2Msg struct {
+	verifier []byte
+	err      string
+}
+
+func parseField(s, k string) (string, error) {
+	t := strings.TrimPrefix(s, k+"=")
+	if t == s {
+		return "", fmt.Errorf("error parsing '%s' for field '%s'", s, k)
+	}
+	return t, nil
+}
+
+func parseGS2Flag(s string) (string, error) {
+	if s[0] == 'p' {
+		return "", fmt.Errorf("channel binding requested but not supported")
+	}
+
+	if s == "n" || s == "y" {
+		return s, nil
+	}
+
+	return "", fmt.Errorf("error parsing '%s' for gs2 flag", s)
+}
+
+func parseFieldBase64(s, k string) ([]byte, error) {
+	raw, err := parseField(s, k)
+	if err != nil {
+		return nil, err
+	}
+
+	dec, err := base64.StdEncoding.DecodeString(raw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dec, nil
+}
+
+func parseFieldInt(s, k string) (int, error) {
+	raw, err := parseField(s, k)
+	if err != nil {
+		return 0, err
+	}
+
+	num, err := strconv.Atoi(raw)
+	if err != nil {
+		return 0, fmt.Errorf("error parsing field '%s': %v", k, err)
+	}
+
+	return num, nil
+}
+
+func parseClientFirst(c1 string) (msg c1Msg, err error) {
+
+	fields := strings.Split(c1, ",")
+	if len(fields) < 4 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	gs2flag, err := parseGS2Flag(fields[0])
+	if err != nil {
+		return
+	}
+
+	// 'a' field is optional
+	if len(fields[1]) > 0 {
+		msg.authzID, err = parseField(fields[1], "a")
+		if err != nil {
+			return
+		}
+	}
+
+	// Recombine and save the gs2 header
+	msg.gs2Header = gs2flag + "," + msg.authzID + ","
+
+	// Check for unsupported extensions field "m".
+	if strings.HasPrefix(fields[2], "m=") {
+		err = errors.New("SCRAM message extensions are not supported")
+		return
+	}
+
+	msg.username, err = parseField(fields[2], "n")
+	if err != nil {
+		return
+	}
+
+	msg.nonce, err = parseField(fields[3], "r")
+	if err != nil {
+		return
+	}
+
+	msg.c1b = strings.Join(fields[2:], ",")
+
+	return
+}
+
+func parseClientFinal(c2 string) (msg c2Msg, err error) {
+	fields := strings.Split(c2, ",")
+	if len(fields) < 3 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	msg.cbind, err = parseFieldBase64(fields[0], "c")
+	if err != nil {
+		return
+	}
+
+	msg.nonce, err = parseField(fields[1], "r")
+	if err != nil {
+		return
+	}
+
+	// Extension fields may come between nonce and proof, so we
+	// grab the *last* fields as proof.
+	msg.proof, err = parseFieldBase64(fields[len(fields)-1], "p")
+	if err != nil {
+		return
+	}
+
+	msg.c2wop = c2[:strings.LastIndex(c2, ",")]
+
+	return
+}
+
+func parseServerFirst(s1 string) (msg s1Msg, err error) {
+
+	// Check for unsupported extensions field "m".
+	if strings.HasPrefix(s1, "m=") {
+		err = errors.New("SCRAM message extensions are not supported")
+		return
+	}
+
+	fields := strings.Split(s1, ",")
+	if len(fields) < 3 {
+		err = errors.New("not enough fields in first server message")
+		return
+	}
+
+	msg.nonce, err = parseField(fields[0], "r")
+	if err != nil {
+		return
+	}
+
+	msg.salt, err = parseFieldBase64(fields[1], "s")
+	if err != nil {
+		return
+	}
+
+	msg.iters, err = parseFieldInt(fields[2], "i")
+
+	return
+}
+
+func parseServerFinal(s2 string) (msg s2Msg, err error) {
+	fields := strings.Split(s2, ",")
+
+	msg.verifier, err = parseFieldBase64(fields[0], "v")
+	if err == nil {
+		return
+	}
+
+	msg.err, err = parseField(fields[0], "e")
+
+	return
+}
diff --git a/vendor/github.com/xdg/scram/scram.go b/vendor/github.com/xdg/scram/scram.go
new file mode 100644
index 0000000..9e9836a
--- /dev/null
+++ b/vendor/github.com/xdg/scram/scram.go
@@ -0,0 +1,66 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/sha1"
+	"crypto/sha256"
+	"fmt"
+	"hash"
+
+	"github.com/xdg/stringprep"
+)
+
+// HashGeneratorFcn abstracts a factory function that returns a hash.Hash
+// value to be used for SCRAM operations.  Generally, one would use the
+// provided package variables, `scram.SHA1` and `scram.SHA256`, for the most
+// common forms of SCRAM.
+type HashGeneratorFcn func() hash.Hash
+
+// SHA1 is a function that returns a crypto/sha1 hasher and should be used to
+// create Client objects configured for SHA-1 hashing.
+var SHA1 HashGeneratorFcn = func() hash.Hash { return sha1.New() }
+
+// SHA256 is a function that returns a crypto/sha256 hasher and should be used
+// to create Client objects configured for SHA-256 hashing.
+var SHA256 HashGeneratorFcn = func() hash.Hash { return sha256.New() }
+
+// NewClient constructs a SCRAM client component based on a given hash.Hash
+// factory receiver.  This constructor will normalize the username, password
+// and authzID via the SASLprep algorithm, as recommended by RFC-5802.  If
+// SASLprep fails, the method returns an error.
+func (f HashGeneratorFcn) NewClient(username, password, authzID string) (*Client, error) {
+	var userprep, passprep, authprep string
+	var err error
+
+	if userprep, err = stringprep.SASLprep.Prepare(username); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping username '%s': %v", username, err)
+	}
+	if passprep, err = stringprep.SASLprep.Prepare(password); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping password '%s': %v", password, err)
+	}
+	if authprep, err = stringprep.SASLprep.Prepare(authzID); err != nil {
+		return nil, fmt.Errorf("Error SASLprepping authzID '%s': %v", authzID, err)
+	}
+
+	return newClient(userprep, passprep, authprep, f), nil
+}
+
+// NewClientUnprepped acts like NewClient, except none of the arguments will
+// be normalized via SASLprep.  This is not generally recommended, but is
+// provided for users that may have custom normalization needs.
+func (f HashGeneratorFcn) NewClientUnprepped(username, password, authzID string) (*Client, error) {
+	return newClient(username, password, authzID, f), nil
+}
+
+// NewServer constructs a SCRAM server component based on a given hash.Hash
+// factory receiver.  To be maximally generic, it uses dependency injection to
+// handle credential lookup, which is the process of turning a username string
+// into a struct with stored credentials for authentication.
+func (f HashGeneratorFcn) NewServer(cl CredentialLookup) (*Server, error) {
+	return newServer(cl, f)
+}
diff --git a/vendor/github.com/xdg/scram/server.go b/vendor/github.com/xdg/scram/server.go
new file mode 100644
index 0000000..b119b36
--- /dev/null
+++ b/vendor/github.com/xdg/scram/server.go
@@ -0,0 +1,50 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import "sync"
+
+// Server implements the server side of SCRAM authentication.  It holds
+// configuration values needed to initialize new server-side conversations.
+// Generally, this can be persistent within an application.
+type Server struct {
+	sync.RWMutex
+	credentialCB CredentialLookup
+	nonceGen     NonceGeneratorFcn
+	hashGen      HashGeneratorFcn
+}
+
+func newServer(cl CredentialLookup, fcn HashGeneratorFcn) (*Server, error) {
+	return &Server{
+		credentialCB: cl,
+		nonceGen:     defaultNonceGenerator,
+		hashGen:      fcn,
+	}, nil
+}
+
+// WithNonceGenerator replaces the default nonce generator (base64 encoding of
+// 24 bytes from crypto/rand) with a custom generator.  This is provided for
+// testing or for users with custom nonce requirements.
+func (s *Server) WithNonceGenerator(ng NonceGeneratorFcn) *Server {
+	s.Lock()
+	defer s.Unlock()
+	s.nonceGen = ng
+	return s
+}
+
+// NewConversation constructs a server-side authentication conversation.
+// Conversations cannot be reused, so this must be called for each new
+// authentication attempt.
+func (s *Server) NewConversation() *ServerConversation {
+	s.RLock()
+	defer s.RUnlock()
+	return &ServerConversation{
+		nonceGen:     s.nonceGen,
+		hashGen:      s.hashGen,
+		credentialCB: s.credentialCB,
+	}
+}
diff --git a/vendor/github.com/xdg/scram/server_conv.go b/vendor/github.com/xdg/scram/server_conv.go
new file mode 100644
index 0000000..9c8838c
--- /dev/null
+++ b/vendor/github.com/xdg/scram/server_conv.go
@@ -0,0 +1,151 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package scram
+
+import (
+	"crypto/hmac"
+	"encoding/base64"
+	"errors"
+	"fmt"
+)
+
+type serverState int
+
+const (
+	serverFirst serverState = iota
+	serverFinal
+	serverDone
+)
+
+// ServerConversation implements the server-side of an authentication
+// conversation with a client.  A new conversation must be created for
+// each authentication attempt.
+type ServerConversation struct {
+	nonceGen     NonceGeneratorFcn
+	hashGen      HashGeneratorFcn
+	credentialCB CredentialLookup
+	state        serverState
+	credential   StoredCredentials
+	valid        bool
+	gs2Header    string
+	username     string
+	authzID      string
+	nonce        string
+	c1b          string
+	s1           string
+}
+
+// Step takes a string provided from a client and attempts to move the
+// authentication conversation forward.  It returns a string to be sent to the
+// client or an error if the client message is invalid.  Calling Step after a
+// conversation completes is also an error.
+func (sc *ServerConversation) Step(challenge string) (response string, err error) {
+	switch sc.state {
+	case serverFirst:
+		sc.state = serverFinal
+		response, err = sc.firstMsg(challenge)
+	case serverFinal:
+		sc.state = serverDone
+		response, err = sc.finalMsg(challenge)
+	default:
+		response, err = "", errors.New("Conversation already completed")
+	}
+	return
+}
+
+// Done returns true if the conversation is completed or has errored.
+func (sc *ServerConversation) Done() bool {
+	return sc.state == serverDone
+}
+
+// Valid returns true if the conversation successfully authenticated the
+// client.
+func (sc *ServerConversation) Valid() bool {
+	return sc.valid
+}
+
+// Username returns the client-provided username.  This is valid to call
+// if the first conversation Step() is successful.
+func (sc *ServerConversation) Username() string {
+	return sc.username
+}
+
+// AuthzID returns the (optional) client-provided authorization identity, if
+// any.  If one was not provided, it returns the empty string.  This is valid
+// to call if the first conversation Step() is successful.
+func (sc *ServerConversation) AuthzID() string {
+	return sc.authzID
+}
+
+func (sc *ServerConversation) firstMsg(c1 string) (string, error) {
+	msg, err := parseClientFirst(c1)
+	if err != nil {
+		sc.state = serverDone
+		return "", err
+	}
+
+	sc.gs2Header = msg.gs2Header
+	sc.username = msg.username
+	sc.authzID = msg.authzID
+
+	sc.credential, err = sc.credentialCB(msg.username)
+	if err != nil {
+		sc.state = serverDone
+		return "e=unknown-user", err
+	}
+
+	sc.nonce = msg.nonce + sc.nonceGen()
+	sc.c1b = msg.c1b
+	sc.s1 = fmt.Sprintf("r=%s,s=%s,i=%d",
+		sc.nonce,
+		base64.StdEncoding.EncodeToString([]byte(sc.credential.Salt)),
+		sc.credential.Iters,
+	)
+
+	return sc.s1, nil
+}
+
+// For errors, returns server error message as well as non-nil error.  Callers
+// can choose whether to send server error or not.
+func (sc *ServerConversation) finalMsg(c2 string) (string, error) {
+	msg, err := parseClientFinal(c2)
+	if err != nil {
+		return "", err
+	}
+
+	// Check channel binding matches what we expect; in this case, we expect
+	// just the gs2 header we received as we don't support channel binding
+	// with a data payload.  If we add binding, we need to independently
+	// compute the header to match here.
+	if string(msg.cbind) != sc.gs2Header {
+		return "e=channel-bindings-dont-match", fmt.Errorf("channel binding received '%s' doesn't match expected '%s'", msg.cbind, sc.gs2Header)
+	}
+
+	// Check nonce received matches what we sent
+	if msg.nonce != sc.nonce {
+		return "e=other-error", errors.New("nonce received did not match nonce sent")
+	}
+
+	// Create auth message
+	authMsg := sc.c1b + "," + sc.s1 + "," + msg.c2wop
+
+	// Retrieve ClientKey from proof and verify it
+	clientSignature := computeHMAC(sc.hashGen, sc.credential.StoredKey, []byte(authMsg))
+	clientKey := xorBytes([]byte(msg.proof), clientSignature)
+	storedKey := computeHash(sc.hashGen, clientKey)
+
+	// Compare with constant-time function
+	if !hmac.Equal(storedKey, sc.credential.StoredKey) {
+		return "e=invalid-proof", errors.New("challenge proof invalid")
+	}
+
+	sc.valid = true
+
+	// Compute and return server verifier
+	serverSignature := computeHMAC(sc.hashGen, sc.credential.ServerKey, []byte(authMsg))
+	return "v=" + base64.StdEncoding.EncodeToString(serverSignature), nil
+}
diff --git a/vendor/github.com/xdg/stringprep/.gitignore b/vendor/github.com/xdg/stringprep/.gitignore
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/.gitignore
diff --git a/vendor/github.com/xdg/stringprep/.travis.yml b/vendor/github.com/xdg/stringprep/.travis.yml
new file mode 100644
index 0000000..f391327
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/.travis.yml
@@ -0,0 +1,11 @@
+language: go
+sudo: false
+go:
+  - "1.7"
+  - "1.8"
+  - "1.9"
+  - "1.10"
+  - master
+matrix:
+  allow_failures:
+    - go: master
diff --git a/vendor/github.com/xdg/stringprep/LICENSE b/vendor/github.com/xdg/stringprep/LICENSE
new file mode 100644
index 0000000..67db858
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/LICENSE
@@ -0,0 +1,175 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
diff --git a/vendor/github.com/xdg/stringprep/README.md b/vendor/github.com/xdg/stringprep/README.md
new file mode 100644
index 0000000..87279e3
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/README.md
@@ -0,0 +1,27 @@
+[![GoDoc](https://godoc.org/github.com/xdg/stringprep?status.svg)](https://godoc.org/github.com/xdg/stringprep)
+[![Build Status](https://travis-ci.org/xdg/stringprep.svg?branch=master)](https://travis-ci.org/xdg/stringprep)
+
+# stringprep – Go implementation of RFC-3454 stringprep and RFC-4013 SASLprep
+
+## Synopsis
+
+```
+    import "github.com/xdg/stringprep"
+
+    prepped := stringprep.SASLprep.Prepare("TrustNô1")
+
+```
+
+## Description
+
+This library provides an implementation of the stringprep algorithm
+(RFC-3454) in Go, including all data tables.
+
+A pre-built SASLprep (RFC-4013) profile is provided as well.
+
+## Copyright and License
+
+Copyright 2018 by David A. Golden. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License"). You may
+obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
diff --git a/vendor/github.com/xdg/stringprep/bidi.go b/vendor/github.com/xdg/stringprep/bidi.go
new file mode 100644
index 0000000..6f6d321
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/bidi.go
@@ -0,0 +1,73 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+var errHasLCat = "BiDi string can't have runes from category L"
+var errFirstRune = "BiDi string first rune must have category R or AL"
+var errLastRune = "BiDi string last rune must have category R or AL"
+
+// Check for prohibited characters from table C.8
+func checkBiDiProhibitedRune(s string) error {
+	for _, r := range s {
+		if TableC8.Contains(r) {
+			return Error{Msg: errProhibited, Rune: r}
+		}
+	}
+	return nil
+}
+
+// Check for LCat characters from table D.2
+func checkBiDiLCat(s string) error {
+	for _, r := range s {
+		if TableD2.Contains(r) {
+			return Error{Msg: errHasLCat, Rune: r}
+		}
+	}
+	return nil
+}
+
+// Check first and last characters are in table D.1; requires non-empty string
+func checkBadFirstAndLastRandALCat(s string) error {
+	rs := []rune(s)
+	if !TableD1.Contains(rs[0]) {
+		return Error{Msg: errFirstRune, Rune: rs[0]}
+	}
+	n := len(rs) - 1
+	if !TableD1.Contains(rs[n]) {
+		return Error{Msg: errLastRune, Rune: rs[n]}
+	}
+	return nil
+}
+
+// Look for RandALCat characters from table D.1
+func hasBiDiRandALCat(s string) bool {
+	for _, r := range s {
+		if TableD1.Contains(r) {
+			return true
+		}
+	}
+	return false
+}
+
+// Check that BiDi rules are satisfied ; let empty string pass this rule
+func passesBiDiRules(s string) error {
+	if len(s) == 0 {
+		return nil
+	}
+	if err := checkBiDiProhibitedRune(s); err != nil {
+		return err
+	}
+	if hasBiDiRandALCat(s) {
+		if err := checkBiDiLCat(s); err != nil {
+			return err
+		}
+		if err := checkBadFirstAndLastRandALCat(s); err != nil {
+			return err
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/xdg/stringprep/doc.go b/vendor/github.com/xdg/stringprep/doc.go
new file mode 100644
index 0000000..b319e08
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package stringprep provides data tables and algorithms for RFC-3454,
+// including errata (as of 2018-02).  It also provides a profile for
+// SASLprep as defined in RFC-4013.
+package stringprep
diff --git a/vendor/github.com/xdg/stringprep/error.go b/vendor/github.com/xdg/stringprep/error.go
new file mode 100644
index 0000000..7403e49
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/error.go
@@ -0,0 +1,14 @@
+package stringprep
+
+import "fmt"
+
+// Error describes problems encountered during stringprep, including what rune
+// was problematic.
+type Error struct {
+	Msg  string
+	Rune rune
+}
+
+func (e Error) Error() string {
+	return fmt.Sprintf("%s (rune: '\\u%04x')", e.Msg, e.Rune)
+}
diff --git a/vendor/github.com/xdg/stringprep/map.go b/vendor/github.com/xdg/stringprep/map.go
new file mode 100644
index 0000000..e56a0dd
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/map.go
@@ -0,0 +1,21 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+// Mapping represents a stringprep mapping, from a single rune to zero or more
+// runes.
+type Mapping map[rune][]rune
+
+// Map maps a rune to a (possibly empty) rune slice via a stringprep Mapping.
+// The ok return value is false if the rune was not found.
+func (m Mapping) Map(r rune) (replacement []rune, ok bool) {
+	rs, ok := m[r]
+	if !ok {
+		return nil, false
+	}
+	return rs, true
+}
diff --git a/vendor/github.com/xdg/stringprep/profile.go b/vendor/github.com/xdg/stringprep/profile.go
new file mode 100644
index 0000000..5a73be9
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/profile.go
@@ -0,0 +1,75 @@
+package stringprep
+
+import (
+	"golang.org/x/text/unicode/norm"
+)
+
+// Profile represents a stringprep profile.
+type Profile struct {
+	Mappings  []Mapping
+	Normalize bool
+	Prohibits []Set
+	CheckBiDi bool
+}
+
+var errProhibited = "prohibited character"
+
+// Prepare transforms an input string to an output string following
+// the rules defined in the profile as defined by RFC-3454.
+func (p Profile) Prepare(s string) (string, error) {
+	// Optimistically, assume output will be same length as input
+	temp := make([]rune, 0, len(s))
+
+	// Apply maps
+	for _, r := range s {
+		rs, ok := p.applyMaps(r)
+		if ok {
+			temp = append(temp, rs...)
+		} else {
+			temp = append(temp, r)
+		}
+	}
+
+	// Normalize
+	var out string
+	if p.Normalize {
+		out = norm.NFKC.String(string(temp))
+	} else {
+		out = string(temp)
+	}
+
+	// Check prohibited
+	for _, r := range out {
+		if p.runeIsProhibited(r) {
+			return "", Error{Msg: errProhibited, Rune: r}
+		}
+	}
+
+	// Check BiDi allowed
+	if p.CheckBiDi {
+		if err := passesBiDiRules(out); err != nil {
+			return "", err
+		}
+	}
+
+	return out, nil
+}
+
+func (p Profile) applyMaps(r rune) ([]rune, bool) {
+	for _, m := range p.Mappings {
+		rs, ok := m.Map(r)
+		if ok {
+			return rs, true
+		}
+	}
+	return nil, false
+}
+
+func (p Profile) runeIsProhibited(r rune) bool {
+	for _, s := range p.Prohibits {
+		if s.Contains(r) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/xdg/stringprep/saslprep.go b/vendor/github.com/xdg/stringprep/saslprep.go
new file mode 100644
index 0000000..4001348
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/saslprep.go
@@ -0,0 +1,52 @@
+package stringprep
+
+var mapNonASCIISpaceToASCIISpace = Mapping{
+	0x00A0: []rune{0x0020},
+	0x1680: []rune{0x0020},
+	0x2000: []rune{0x0020},
+	0x2001: []rune{0x0020},
+	0x2002: []rune{0x0020},
+	0x2003: []rune{0x0020},
+	0x2004: []rune{0x0020},
+	0x2005: []rune{0x0020},
+	0x2006: []rune{0x0020},
+	0x2007: []rune{0x0020},
+	0x2008: []rune{0x0020},
+	0x2009: []rune{0x0020},
+	0x200A: []rune{0x0020},
+	0x200B: []rune{0x0020},
+	0x202F: []rune{0x0020},
+	0x205F: []rune{0x0020},
+	0x3000: []rune{0x0020},
+}
+
+// SASLprep is a pre-defined stringprep profile for user names and passwords
+// as described in RFC-4013.
+//
+// Because the stringprep distinction between query and stored strings was
+// intended for compatibility across profile versions, but SASLprep was never
+// updated and is now deprecated, this profile only operates in stored
+// strings mode, prohibiting unassigned code points.
+var SASLprep Profile = saslprep
+
+var saslprep = Profile{
+	Mappings: []Mapping{
+		TableB1,
+		mapNonASCIISpaceToASCIISpace,
+	},
+	Normalize: true,
+	Prohibits: []Set{
+		TableA1,
+		TableC1_2,
+		TableC2_1,
+		TableC2_2,
+		TableC3,
+		TableC4,
+		TableC5,
+		TableC6,
+		TableC7,
+		TableC8,
+		TableC9,
+	},
+	CheckBiDi: true,
+}
diff --git a/vendor/github.com/xdg/stringprep/set.go b/vendor/github.com/xdg/stringprep/set.go
new file mode 100644
index 0000000..c837e28
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/set.go
@@ -0,0 +1,36 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+import "sort"
+
+// RuneRange represents a close-ended range of runes: [N,M].  For a range
+// consisting of a single rune, N and M will be equal.
+type RuneRange [2]rune
+
+// Contains returns true if a rune is within the bounds of the RuneRange.
+func (rr RuneRange) Contains(r rune) bool {
+	return rr[0] <= r && r <= rr[1]
+}
+
+func (rr RuneRange) isAbove(r rune) bool {
+	return r <= rr[0]
+}
+
+// Set represents a stringprep data table used to identify runes of a
+// particular type.
+type Set []RuneRange
+
+// Contains returns true if a rune is within any of the RuneRanges in the
+// Set.
+func (s Set) Contains(r rune) bool {
+	i := sort.Search(len(s), func(i int) bool { return s[i].Contains(r) || s[i].isAbove(r) })
+	if i < len(s) && s[i].Contains(r) {
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/xdg/stringprep/tables.go b/vendor/github.com/xdg/stringprep/tables.go
new file mode 100644
index 0000000..c3fc1fa
--- /dev/null
+++ b/vendor/github.com/xdg/stringprep/tables.go
@@ -0,0 +1,3215 @@
+// Copyright 2018 by David A. Golden. All rights reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package stringprep
+
+var tableA1 = Set{
+	RuneRange{0x0221, 0x0221},
+	RuneRange{0x0234, 0x024F},
+	RuneRange{0x02AE, 0x02AF},
+	RuneRange{0x02EF, 0x02FF},
+	RuneRange{0x0350, 0x035F},
+	RuneRange{0x0370, 0x0373},
+	RuneRange{0x0376, 0x0379},
+	RuneRange{0x037B, 0x037D},
+	RuneRange{0x037F, 0x0383},
+	RuneRange{0x038B, 0x038B},
+	RuneRange{0x038D, 0x038D},
+	RuneRange{0x03A2, 0x03A2},
+	RuneRange{0x03CF, 0x03CF},
+	RuneRange{0x03F7, 0x03FF},
+	RuneRange{0x0487, 0x0487},
+	RuneRange{0x04CF, 0x04CF},
+	RuneRange{0x04F6, 0x04F7},
+	RuneRange{0x04FA, 0x04FF},
+	RuneRange{0x0510, 0x0530},
+	RuneRange{0x0557, 0x0558},
+	RuneRange{0x0560, 0x0560},
+	RuneRange{0x0588, 0x0588},
+	RuneRange{0x058B, 0x0590},
+	RuneRange{0x05A2, 0x05A2},
+	RuneRange{0x05BA, 0x05BA},
+	RuneRange{0x05C5, 0x05CF},
+	RuneRange{0x05EB, 0x05EF},
+	RuneRange{0x05F5, 0x060B},
+	RuneRange{0x060D, 0x061A},
+	RuneRange{0x061C, 0x061E},
+	RuneRange{0x0620, 0x0620},
+	RuneRange{0x063B, 0x063F},
+	RuneRange{0x0656, 0x065F},
+	RuneRange{0x06EE, 0x06EF},
+	RuneRange{0x06FF, 0x06FF},
+	RuneRange{0x070E, 0x070E},
+	RuneRange{0x072D, 0x072F},
+	RuneRange{0x074B, 0x077F},
+	RuneRange{0x07B2, 0x0900},
+	RuneRange{0x0904, 0x0904},
+	RuneRange{0x093A, 0x093B},
+	RuneRange{0x094E, 0x094F},
+	RuneRange{0x0955, 0x0957},
+	RuneRange{0x0971, 0x0980},
+	RuneRange{0x0984, 0x0984},
+	RuneRange{0x098D, 0x098E},
+	RuneRange{0x0991, 0x0992},
+	RuneRange{0x09A9, 0x09A9},
+	RuneRange{0x09B1, 0x09B1},
+	RuneRange{0x09B3, 0x09B5},
+	RuneRange{0x09BA, 0x09BB},
+	RuneRange{0x09BD, 0x09BD},
+	RuneRange{0x09C5, 0x09C6},
+	RuneRange{0x09C9, 0x09CA},
+	RuneRange{0x09CE, 0x09D6},
+	RuneRange{0x09D8, 0x09DB},
+	RuneRange{0x09DE, 0x09DE},
+	RuneRange{0x09E4, 0x09E5},
+	RuneRange{0x09FB, 0x0A01},
+	RuneRange{0x0A03, 0x0A04},
+	RuneRange{0x0A0B, 0x0A0E},
+	RuneRange{0x0A11, 0x0A12},
+	RuneRange{0x0A29, 0x0A29},
+	RuneRange{0x0A31, 0x0A31},
+	RuneRange{0x0A34, 0x0A34},
+	RuneRange{0x0A37, 0x0A37},
+	RuneRange{0x0A3A, 0x0A3B},
+	RuneRange{0x0A3D, 0x0A3D},
+	RuneRange{0x0A43, 0x0A46},
+	RuneRange{0x0A49, 0x0A4A},
+	RuneRange{0x0A4E, 0x0A58},
+	RuneRange{0x0A5D, 0x0A5D},
+	RuneRange{0x0A5F, 0x0A65},
+	RuneRange{0x0A75, 0x0A80},
+	RuneRange{0x0A84, 0x0A84},
+	RuneRange{0x0A8C, 0x0A8C},
+	RuneRange{0x0A8E, 0x0A8E},
+	RuneRange{0x0A92, 0x0A92},
+	RuneRange{0x0AA9, 0x0AA9},
+	RuneRange{0x0AB1, 0x0AB1},
+	RuneRange{0x0AB4, 0x0AB4},
+	RuneRange{0x0ABA, 0x0ABB},
+	RuneRange{0x0AC6, 0x0AC6},
+	RuneRange{0x0ACA, 0x0ACA},
+	RuneRange{0x0ACE, 0x0ACF},
+	RuneRange{0x0AD1, 0x0ADF},
+	RuneRange{0x0AE1, 0x0AE5},
+	RuneRange{0x0AF0, 0x0B00},
+	RuneRange{0x0B04, 0x0B04},
+	RuneRange{0x0B0D, 0x0B0E},
+	RuneRange{0x0B11, 0x0B12},
+	RuneRange{0x0B29, 0x0B29},
+	RuneRange{0x0B31, 0x0B31},
+	RuneRange{0x0B34, 0x0B35},
+	RuneRange{0x0B3A, 0x0B3B},
+	RuneRange{0x0B44, 0x0B46},
+	RuneRange{0x0B49, 0x0B4A},
+	RuneRange{0x0B4E, 0x0B55},
+	RuneRange{0x0B58, 0x0B5B},
+	RuneRange{0x0B5E, 0x0B5E},
+	RuneRange{0x0B62, 0x0B65},
+	RuneRange{0x0B71, 0x0B81},
+	RuneRange{0x0B84, 0x0B84},
+	RuneRange{0x0B8B, 0x0B8D},
+	RuneRange{0x0B91, 0x0B91},
+	RuneRange{0x0B96, 0x0B98},
+	RuneRange{0x0B9B, 0x0B9B},
+	RuneRange{0x0B9D, 0x0B9D},
+	RuneRange{0x0BA0, 0x0BA2},
+	RuneRange{0x0BA5, 0x0BA7},
+	RuneRange{0x0BAB, 0x0BAD},
+	RuneRange{0x0BB6, 0x0BB6},
+	RuneRange{0x0BBA, 0x0BBD},
+	RuneRange{0x0BC3, 0x0BC5},
+	RuneRange{0x0BC9, 0x0BC9},
+	RuneRange{0x0BCE, 0x0BD6},
+	RuneRange{0x0BD8, 0x0BE6},
+	RuneRange{0x0BF3, 0x0C00},
+	RuneRange{0x0C04, 0x0C04},
+	RuneRange{0x0C0D, 0x0C0D},
+	RuneRange{0x0C11, 0x0C11},
+	RuneRange{0x0C29, 0x0C29},
+	RuneRange{0x0C34, 0x0C34},
+	RuneRange{0x0C3A, 0x0C3D},
+	RuneRange{0x0C45, 0x0C45},
+	RuneRange{0x0C49, 0x0C49},
+	RuneRange{0x0C4E, 0x0C54},
+	RuneRange{0x0C57, 0x0C5F},
+	RuneRange{0x0C62, 0x0C65},
+	RuneRange{0x0C70, 0x0C81},
+	RuneRange{0x0C84, 0x0C84},
+	RuneRange{0x0C8D, 0x0C8D},
+	RuneRange{0x0C91, 0x0C91},
+	RuneRange{0x0CA9, 0x0CA9},
+	RuneRange{0x0CB4, 0x0CB4},
+	RuneRange{0x0CBA, 0x0CBD},
+	RuneRange{0x0CC5, 0x0CC5},
+	RuneRange{0x0CC9, 0x0CC9},
+	RuneRange{0x0CCE, 0x0CD4},
+	RuneRange{0x0CD7, 0x0CDD},
+	RuneRange{0x0CDF, 0x0CDF},
+	RuneRange{0x0CE2, 0x0CE5},
+	RuneRange{0x0CF0, 0x0D01},
+	RuneRange{0x0D04, 0x0D04},
+	RuneRange{0x0D0D, 0x0D0D},
+	RuneRange{0x0D11, 0x0D11},
+	RuneRange{0x0D29, 0x0D29},
+	RuneRange{0x0D3A, 0x0D3D},
+	RuneRange{0x0D44, 0x0D45},
+	RuneRange{0x0D49, 0x0D49},
+	RuneRange{0x0D4E, 0x0D56},
+	RuneRange{0x0D58, 0x0D5F},
+	RuneRange{0x0D62, 0x0D65},
+	RuneRange{0x0D70, 0x0D81},
+	RuneRange{0x0D84, 0x0D84},
+	RuneRange{0x0D97, 0x0D99},
+	RuneRange{0x0DB2, 0x0DB2},
+	RuneRange{0x0DBC, 0x0DBC},
+	RuneRange{0x0DBE, 0x0DBF},
+	RuneRange{0x0DC7, 0x0DC9},
+	RuneRange{0x0DCB, 0x0DCE},
+	RuneRange{0x0DD5, 0x0DD5},
+	RuneRange{0x0DD7, 0x0DD7},
+	RuneRange{0x0DE0, 0x0DF1},
+	RuneRange{0x0DF5, 0x0E00},
+	RuneRange{0x0E3B, 0x0E3E},
+	RuneRange{0x0E5C, 0x0E80},
+	RuneRange{0x0E83, 0x0E83},
+	RuneRange{0x0E85, 0x0E86},
+	RuneRange{0x0E89, 0x0E89},
+	RuneRange{0x0E8B, 0x0E8C},
+	RuneRange{0x0E8E, 0x0E93},
+	RuneRange{0x0E98, 0x0E98},
+	RuneRange{0x0EA0, 0x0EA0},
+	RuneRange{0x0EA4, 0x0EA4},
+	RuneRange{0x0EA6, 0x0EA6},
+	RuneRange{0x0EA8, 0x0EA9},
+	RuneRange{0x0EAC, 0x0EAC},
+	RuneRange{0x0EBA, 0x0EBA},
+	RuneRange{0x0EBE, 0x0EBF},
+	RuneRange{0x0EC5, 0x0EC5},
+	RuneRange{0x0EC7, 0x0EC7},
+	RuneRange{0x0ECE, 0x0ECF},
+	RuneRange{0x0EDA, 0x0EDB},
+	RuneRange{0x0EDE, 0x0EFF},
+	RuneRange{0x0F48, 0x0F48},
+	RuneRange{0x0F6B, 0x0F70},
+	RuneRange{0x0F8C, 0x0F8F},
+	RuneRange{0x0F98, 0x0F98},
+	RuneRange{0x0FBD, 0x0FBD},
+	RuneRange{0x0FCD, 0x0FCE},
+	RuneRange{0x0FD0, 0x0FFF},
+	RuneRange{0x1022, 0x1022},
+	RuneRange{0x1028, 0x1028},
+	RuneRange{0x102B, 0x102B},
+	RuneRange{0x1033, 0x1035},
+	RuneRange{0x103A, 0x103F},
+	RuneRange{0x105A, 0x109F},
+	RuneRange{0x10C6, 0x10CF},
+	RuneRange{0x10F9, 0x10FA},
+	RuneRange{0x10FC, 0x10FF},
+	RuneRange{0x115A, 0x115E},
+	RuneRange{0x11A3, 0x11A7},
+	RuneRange{0x11FA, 0x11FF},
+	RuneRange{0x1207, 0x1207},
+	RuneRange{0x1247, 0x1247},
+	RuneRange{0x1249, 0x1249},
+	RuneRange{0x124E, 0x124F},
+	RuneRange{0x1257, 0x1257},
+	RuneRange{0x1259, 0x1259},
+	RuneRange{0x125E, 0x125F},
+	RuneRange{0x1287, 0x1287},
+	RuneRange{0x1289, 0x1289},
+	RuneRange{0x128E, 0x128F},
+	RuneRange{0x12AF, 0x12AF},
+	RuneRange{0x12B1, 0x12B1},
+	RuneRange{0x12B6, 0x12B7},
+	RuneRange{0x12BF, 0x12BF},
+	RuneRange{0x12C1, 0x12C1},
+	RuneRange{0x12C6, 0x12C7},
+	RuneRange{0x12CF, 0x12CF},
+	RuneRange{0x12D7, 0x12D7},
+	RuneRange{0x12EF, 0x12EF},
+	RuneRange{0x130F, 0x130F},
+	RuneRange{0x1311, 0x1311},
+	RuneRange{0x1316, 0x1317},
+	RuneRange{0x131F, 0x131F},
+	RuneRange{0x1347, 0x1347},
+	RuneRange{0x135B, 0x1360},
+	RuneRange{0x137D, 0x139F},
+	RuneRange{0x13F5, 0x1400},
+	RuneRange{0x1677, 0x167F},
+	RuneRange{0x169D, 0x169F},
+	RuneRange{0x16F1, 0x16FF},
+	RuneRange{0x170D, 0x170D},
+	RuneRange{0x1715, 0x171F},
+	RuneRange{0x1737, 0x173F},
+	RuneRange{0x1754, 0x175F},
+	RuneRange{0x176D, 0x176D},
+	RuneRange{0x1771, 0x1771},
+	RuneRange{0x1774, 0x177F},
+	RuneRange{0x17DD, 0x17DF},
+	RuneRange{0x17EA, 0x17FF},
+	RuneRange{0x180F, 0x180F},
+	RuneRange{0x181A, 0x181F},
+	RuneRange{0x1878, 0x187F},
+	RuneRange{0x18AA, 0x1DFF},
+	RuneRange{0x1E9C, 0x1E9F},
+	RuneRange{0x1EFA, 0x1EFF},
+	RuneRange{0x1F16, 0x1F17},
+	RuneRange{0x1F1E, 0x1F1F},
+	RuneRange{0x1F46, 0x1F47},
+	RuneRange{0x1F4E, 0x1F4F},
+	RuneRange{0x1F58, 0x1F58},
+	RuneRange{0x1F5A, 0x1F5A},
+	RuneRange{0x1F5C, 0x1F5C},
+	RuneRange{0x1F5E, 0x1F5E},
+	RuneRange{0x1F7E, 0x1F7F},
+	RuneRange{0x1FB5, 0x1FB5},
+	RuneRange{0x1FC5, 0x1FC5},
+	RuneRange{0x1FD4, 0x1FD5},
+	RuneRange{0x1FDC, 0x1FDC},
+	RuneRange{0x1FF0, 0x1FF1},
+	RuneRange{0x1FF5, 0x1FF5},
+	RuneRange{0x1FFF, 0x1FFF},
+	RuneRange{0x2053, 0x2056},
+	RuneRange{0x2058, 0x205E},
+	RuneRange{0x2064, 0x2069},
+	RuneRange{0x2072, 0x2073},
+	RuneRange{0x208F, 0x209F},
+	RuneRange{0x20B2, 0x20CF},
+	RuneRange{0x20EB, 0x20FF},
+	RuneRange{0x213B, 0x213C},
+	RuneRange{0x214C, 0x2152},
+	RuneRange{0x2184, 0x218F},
+	RuneRange{0x23CF, 0x23FF},
+	RuneRange{0x2427, 0x243F},
+	RuneRange{0x244B, 0x245F},
+	RuneRange{0x24FF, 0x24FF},
+	RuneRange{0x2614, 0x2615},
+	RuneRange{0x2618, 0x2618},
+	RuneRange{0x267E, 0x267F},
+	RuneRange{0x268A, 0x2700},
+	RuneRange{0x2705, 0x2705},
+	RuneRange{0x270A, 0x270B},
+	RuneRange{0x2728, 0x2728},
+	RuneRange{0x274C, 0x274C},
+	RuneRange{0x274E, 0x274E},
+	RuneRange{0x2753, 0x2755},
+	RuneRange{0x2757, 0x2757},
+	RuneRange{0x275F, 0x2760},
+	RuneRange{0x2795, 0x2797},
+	RuneRange{0x27B0, 0x27B0},
+	RuneRange{0x27BF, 0x27CF},
+	RuneRange{0x27EC, 0x27EF},
+	RuneRange{0x2B00, 0x2E7F},
+	RuneRange{0x2E9A, 0x2E9A},
+	RuneRange{0x2EF4, 0x2EFF},
+	RuneRange{0x2FD6, 0x2FEF},
+	RuneRange{0x2FFC, 0x2FFF},
+	RuneRange{0x3040, 0x3040},
+	RuneRange{0x3097, 0x3098},
+	RuneRange{0x3100, 0x3104},
+	RuneRange{0x312D, 0x3130},
+	RuneRange{0x318F, 0x318F},
+	RuneRange{0x31B8, 0x31EF},
+	RuneRange{0x321D, 0x321F},
+	RuneRange{0x3244, 0x3250},
+	RuneRange{0x327C, 0x327E},
+	RuneRange{0x32CC, 0x32CF},
+	RuneRange{0x32FF, 0x32FF},
+	RuneRange{0x3377, 0x337A},
+	RuneRange{0x33DE, 0x33DF},
+	RuneRange{0x33FF, 0x33FF},
+	RuneRange{0x4DB6, 0x4DFF},
+	RuneRange{0x9FA6, 0x9FFF},
+	RuneRange{0xA48D, 0xA48F},
+	RuneRange{0xA4C7, 0xABFF},
+	RuneRange{0xD7A4, 0xD7FF},
+	RuneRange{0xFA2E, 0xFA2F},
+	RuneRange{0xFA6B, 0xFAFF},
+	RuneRange{0xFB07, 0xFB12},
+	RuneRange{0xFB18, 0xFB1C},
+	RuneRange{0xFB37, 0xFB37},
+	RuneRange{0xFB3D, 0xFB3D},
+	RuneRange{0xFB3F, 0xFB3F},
+	RuneRange{0xFB42, 0xFB42},
+	RuneRange{0xFB45, 0xFB45},
+	RuneRange{0xFBB2, 0xFBD2},
+	RuneRange{0xFD40, 0xFD4F},
+	RuneRange{0xFD90, 0xFD91},
+	RuneRange{0xFDC8, 0xFDCF},
+	RuneRange{0xFDFD, 0xFDFF},
+	RuneRange{0xFE10, 0xFE1F},
+	RuneRange{0xFE24, 0xFE2F},
+	RuneRange{0xFE47, 0xFE48},
+	RuneRange{0xFE53, 0xFE53},
+	RuneRange{0xFE67, 0xFE67},
+	RuneRange{0xFE6C, 0xFE6F},
+	RuneRange{0xFE75, 0xFE75},
+	RuneRange{0xFEFD, 0xFEFE},
+	RuneRange{0xFF00, 0xFF00},
+	RuneRange{0xFFBF, 0xFFC1},
+	RuneRange{0xFFC8, 0xFFC9},
+	RuneRange{0xFFD0, 0xFFD1},
+	RuneRange{0xFFD8, 0xFFD9},
+	RuneRange{0xFFDD, 0xFFDF},
+	RuneRange{0xFFE7, 0xFFE7},
+	RuneRange{0xFFEF, 0xFFF8},
+	RuneRange{0x10000, 0x102FF},
+	RuneRange{0x1031F, 0x1031F},
+	RuneRange{0x10324, 0x1032F},
+	RuneRange{0x1034B, 0x103FF},
+	RuneRange{0x10426, 0x10427},
+	RuneRange{0x1044E, 0x1CFFF},
+	RuneRange{0x1D0F6, 0x1D0FF},
+	RuneRange{0x1D127, 0x1D129},
+	RuneRange{0x1D1DE, 0x1D3FF},
+	RuneRange{0x1D455, 0x1D455},
+	RuneRange{0x1D49D, 0x1D49D},
+	RuneRange{0x1D4A0, 0x1D4A1},
+	RuneRange{0x1D4A3, 0x1D4A4},
+	RuneRange{0x1D4A7, 0x1D4A8},
+	RuneRange{0x1D4AD, 0x1D4AD},
+	RuneRange{0x1D4BA, 0x1D4BA},
+	RuneRange{0x1D4BC, 0x1D4BC},
+	RuneRange{0x1D4C1, 0x1D4C1},
+	RuneRange{0x1D4C4, 0x1D4C4},
+	RuneRange{0x1D506, 0x1D506},
+	RuneRange{0x1D50B, 0x1D50C},
+	RuneRange{0x1D515, 0x1D515},
+	RuneRange{0x1D51D, 0x1D51D},
+	RuneRange{0x1D53A, 0x1D53A},
+	RuneRange{0x1D53F, 0x1D53F},
+	RuneRange{0x1D545, 0x1D545},
+	RuneRange{0x1D547, 0x1D549},
+	RuneRange{0x1D551, 0x1D551},
+	RuneRange{0x1D6A4, 0x1D6A7},
+	RuneRange{0x1D7CA, 0x1D7CD},
+	RuneRange{0x1D800, 0x1FFFD},
+	RuneRange{0x2A6D7, 0x2F7FF},
+	RuneRange{0x2FA1E, 0x2FFFD},
+	RuneRange{0x30000, 0x3FFFD},
+	RuneRange{0x40000, 0x4FFFD},
+	RuneRange{0x50000, 0x5FFFD},
+	RuneRange{0x60000, 0x6FFFD},
+	RuneRange{0x70000, 0x7FFFD},
+	RuneRange{0x80000, 0x8FFFD},
+	RuneRange{0x90000, 0x9FFFD},
+	RuneRange{0xA0000, 0xAFFFD},
+	RuneRange{0xB0000, 0xBFFFD},
+	RuneRange{0xC0000, 0xCFFFD},
+	RuneRange{0xD0000, 0xDFFFD},
+	RuneRange{0xE0000, 0xE0000},
+	RuneRange{0xE0002, 0xE001F},
+	RuneRange{0xE0080, 0xEFFFD},
+}
+
+// TableA1 represents RFC-3454 Table A.1.
+var TableA1 Set = tableA1
+
+var tableB1 = Mapping{
+	0x00AD: []rune{}, // Map to nothing
+	0x034F: []rune{}, // Map to nothing
+	0x180B: []rune{}, // Map to nothing
+	0x180C: []rune{}, // Map to nothing
+	0x180D: []rune{}, // Map to nothing
+	0x200B: []rune{}, // Map to nothing
+	0x200C: []rune{}, // Map to nothing
+	0x200D: []rune{}, // Map to nothing
+	0x2060: []rune{}, // Map to nothing
+	0xFE00: []rune{}, // Map to nothing
+	0xFE01: []rune{}, // Map to nothing
+	0xFE02: []rune{}, // Map to nothing
+	0xFE03: []rune{}, // Map to nothing
+	0xFE04: []rune{}, // Map to nothing
+	0xFE05: []rune{}, // Map to nothing
+	0xFE06: []rune{}, // Map to nothing
+	0xFE07: []rune{}, // Map to nothing
+	0xFE08: []rune{}, // Map to nothing
+	0xFE09: []rune{}, // Map to nothing
+	0xFE0A: []rune{}, // Map to nothing
+	0xFE0B: []rune{}, // Map to nothing
+	0xFE0C: []rune{}, // Map to nothing
+	0xFE0D: []rune{}, // Map to nothing
+	0xFE0E: []rune{}, // Map to nothing
+	0xFE0F: []rune{}, // Map to nothing
+	0xFEFF: []rune{}, // Map to nothing
+}
+
+// TableB1 represents RFC-3454 Table B.1.
+var TableB1 Mapping = tableB1
+
+var tableB2 = Mapping{
+	0x0041:  []rune{0x0061},                         // Case map
+	0x0042:  []rune{0x0062},                         // Case map
+	0x0043:  []rune{0x0063},                         // Case map
+	0x0044:  []rune{0x0064},                         // Case map
+	0x0045:  []rune{0x0065},                         // Case map
+	0x0046:  []rune{0x0066},                         // Case map
+	0x0047:  []rune{0x0067},                         // Case map
+	0x0048:  []rune{0x0068},                         // Case map
+	0x0049:  []rune{0x0069},                         // Case map
+	0x004A:  []rune{0x006A},                         // Case map
+	0x004B:  []rune{0x006B},                         // Case map
+	0x004C:  []rune{0x006C},                         // Case map
+	0x004D:  []rune{0x006D},                         // Case map
+	0x004E:  []rune{0x006E},                         // Case map
+	0x004F:  []rune{0x006F},                         // Case map
+	0x0050:  []rune{0x0070},                         // Case map
+	0x0051:  []rune{0x0071},                         // Case map
+	0x0052:  []rune{0x0072},                         // Case map
+	0x0053:  []rune{0x0073},                         // Case map
+	0x0054:  []rune{0x0074},                         // Case map
+	0x0055:  []rune{0x0075},                         // Case map
+	0x0056:  []rune{0x0076},                         // Case map
+	0x0057:  []rune{0x0077},                         // Case map
+	0x0058:  []rune{0x0078},                         // Case map
+	0x0059:  []rune{0x0079},                         // Case map
+	0x005A:  []rune{0x007A},                         // Case map
+	0x00B5:  []rune{0x03BC},                         // Case map
+	0x00C0:  []rune{0x00E0},                         // Case map
+	0x00C1:  []rune{0x00E1},                         // Case map
+	0x00C2:  []rune{0x00E2},                         // Case map
+	0x00C3:  []rune{0x00E3},                         // Case map
+	0x00C4:  []rune{0x00E4},                         // Case map
+	0x00C5:  []rune{0x00E5},                         // Case map
+	0x00C6:  []rune{0x00E6},                         // Case map
+	0x00C7:  []rune{0x00E7},                         // Case map
+	0x00C8:  []rune{0x00E8},                         // Case map
+	0x00C9:  []rune{0x00E9},                         // Case map
+	0x00CA:  []rune{0x00EA},                         // Case map
+	0x00CB:  []rune{0x00EB},                         // Case map
+	0x00CC:  []rune{0x00EC},                         // Case map
+	0x00CD:  []rune{0x00ED},                         // Case map
+	0x00CE:  []rune{0x00EE},                         // Case map
+	0x00CF:  []rune{0x00EF},                         // Case map
+	0x00D0:  []rune{0x00F0},                         // Case map
+	0x00D1:  []rune{0x00F1},                         // Case map
+	0x00D2:  []rune{0x00F2},                         // Case map
+	0x00D3:  []rune{0x00F3},                         // Case map
+	0x00D4:  []rune{0x00F4},                         // Case map
+	0x00D5:  []rune{0x00F5},                         // Case map
+	0x00D6:  []rune{0x00F6},                         // Case map
+	0x00D8:  []rune{0x00F8},                         // Case map
+	0x00D9:  []rune{0x00F9},                         // Case map
+	0x00DA:  []rune{0x00FA},                         // Case map
+	0x00DB:  []rune{0x00FB},                         // Case map
+	0x00DC:  []rune{0x00FC},                         // Case map
+	0x00DD:  []rune{0x00FD},                         // Case map
+	0x00DE:  []rune{0x00FE},                         // Case map
+	0x00DF:  []rune{0x0073, 0x0073},                 // Case map
+	0x0100:  []rune{0x0101},                         // Case map
+	0x0102:  []rune{0x0103},                         // Case map
+	0x0104:  []rune{0x0105},                         // Case map
+	0x0106:  []rune{0x0107},                         // Case map
+	0x0108:  []rune{0x0109},                         // Case map
+	0x010A:  []rune{0x010B},                         // Case map
+	0x010C:  []rune{0x010D},                         // Case map
+	0x010E:  []rune{0x010F},                         // Case map
+	0x0110:  []rune{0x0111},                         // Case map
+	0x0112:  []rune{0x0113},                         // Case map
+	0x0114:  []rune{0x0115},                         // Case map
+	0x0116:  []rune{0x0117},                         // Case map
+	0x0118:  []rune{0x0119},                         // Case map
+	0x011A:  []rune{0x011B},                         // Case map
+	0x011C:  []rune{0x011D},                         // Case map
+	0x011E:  []rune{0x011F},                         // Case map
+	0x0120:  []rune{0x0121},                         // Case map
+	0x0122:  []rune{0x0123},                         // Case map
+	0x0124:  []rune{0x0125},                         // Case map
+	0x0126:  []rune{0x0127},                         // Case map
+	0x0128:  []rune{0x0129},                         // Case map
+	0x012A:  []rune{0x012B},                         // Case map
+	0x012C:  []rune{0x012D},                         // Case map
+	0x012E:  []rune{0x012F},                         // Case map
+	0x0130:  []rune{0x0069, 0x0307},                 // Case map
+	0x0132:  []rune{0x0133},                         // Case map
+	0x0134:  []rune{0x0135},                         // Case map
+	0x0136:  []rune{0x0137},                         // Case map
+	0x0139:  []rune{0x013A},                         // Case map
+	0x013B:  []rune{0x013C},                         // Case map
+	0x013D:  []rune{0x013E},                         // Case map
+	0x013F:  []rune{0x0140},                         // Case map
+	0x0141:  []rune{0x0142},                         // Case map
+	0x0143:  []rune{0x0144},                         // Case map
+	0x0145:  []rune{0x0146},                         // Case map
+	0x0147:  []rune{0x0148},                         // Case map
+	0x0149:  []rune{0x02BC, 0x006E},                 // Case map
+	0x014A:  []rune{0x014B},                         // Case map
+	0x014C:  []rune{0x014D},                         // Case map
+	0x014E:  []rune{0x014F},                         // Case map
+	0x0150:  []rune{0x0151},                         // Case map
+	0x0152:  []rune{0x0153},                         // Case map
+	0x0154:  []rune{0x0155},                         // Case map
+	0x0156:  []rune{0x0157},                         // Case map
+	0x0158:  []rune{0x0159},                         // Case map
+	0x015A:  []rune{0x015B},                         // Case map
+	0x015C:  []rune{0x015D},                         // Case map
+	0x015E:  []rune{0x015F},                         // Case map
+	0x0160:  []rune{0x0161},                         // Case map
+	0x0162:  []rune{0x0163},                         // Case map
+	0x0164:  []rune{0x0165},                         // Case map
+	0x0166:  []rune{0x0167},                         // Case map
+	0x0168:  []rune{0x0169},                         // Case map
+	0x016A:  []rune{0x016B},                         // Case map
+	0x016C:  []rune{0x016D},                         // Case map
+	0x016E:  []rune{0x016F},                         // Case map
+	0x0170:  []rune{0x0171},                         // Case map
+	0x0172:  []rune{0x0173},                         // Case map
+	0x0174:  []rune{0x0175},                         // Case map
+	0x0176:  []rune{0x0177},                         // Case map
+	0x0178:  []rune{0x00FF},                         // Case map
+	0x0179:  []rune{0x017A},                         // Case map
+	0x017B:  []rune{0x017C},                         // Case map
+	0x017D:  []rune{0x017E},                         // Case map
+	0x017F:  []rune{0x0073},                         // Case map
+	0x0181:  []rune{0x0253},                         // Case map
+	0x0182:  []rune{0x0183},                         // Case map
+	0x0184:  []rune{0x0185},                         // Case map
+	0x0186:  []rune{0x0254},                         // Case map
+	0x0187:  []rune{0x0188},                         // Case map
+	0x0189:  []rune{0x0256},                         // Case map
+	0x018A:  []rune{0x0257},                         // Case map
+	0x018B:  []rune{0x018C},                         // Case map
+	0x018E:  []rune{0x01DD},                         // Case map
+	0x018F:  []rune{0x0259},                         // Case map
+	0x0190:  []rune{0x025B},                         // Case map
+	0x0191:  []rune{0x0192},                         // Case map
+	0x0193:  []rune{0x0260},                         // Case map
+	0x0194:  []rune{0x0263},                         // Case map
+	0x0196:  []rune{0x0269},                         // Case map
+	0x0197:  []rune{0x0268},                         // Case map
+	0x0198:  []rune{0x0199},                         // Case map
+	0x019C:  []rune{0x026F},                         // Case map
+	0x019D:  []rune{0x0272},                         // Case map
+	0x019F:  []rune{0x0275},                         // Case map
+	0x01A0:  []rune{0x01A1},                         // Case map
+	0x01A2:  []rune{0x01A3},                         // Case map
+	0x01A4:  []rune{0x01A5},                         // Case map
+	0x01A6:  []rune{0x0280},                         // Case map
+	0x01A7:  []rune{0x01A8},                         // Case map
+	0x01A9:  []rune{0x0283},                         // Case map
+	0x01AC:  []rune{0x01AD},                         // Case map
+	0x01AE:  []rune{0x0288},                         // Case map
+	0x01AF:  []rune{0x01B0},                         // Case map
+	0x01B1:  []rune{0x028A},                         // Case map
+	0x01B2:  []rune{0x028B},                         // Case map
+	0x01B3:  []rune{0x01B4},                         // Case map
+	0x01B5:  []rune{0x01B6},                         // Case map
+	0x01B7:  []rune{0x0292},                         // Case map
+	0x01B8:  []rune{0x01B9},                         // Case map
+	0x01BC:  []rune{0x01BD},                         // Case map
+	0x01C4:  []rune{0x01C6},                         // Case map
+	0x01C5:  []rune{0x01C6},                         // Case map
+	0x01C7:  []rune{0x01C9},                         // Case map
+	0x01C8:  []rune{0x01C9},                         // Case map
+	0x01CA:  []rune{0x01CC},                         // Case map
+	0x01CB:  []rune{0x01CC},                         // Case map
+	0x01CD:  []rune{0x01CE},                         // Case map
+	0x01CF:  []rune{0x01D0},                         // Case map
+	0x01D1:  []rune{0x01D2},                         // Case map
+	0x01D3:  []rune{0x01D4},                         // Case map
+	0x01D5:  []rune{0x01D6},                         // Case map
+	0x01D7:  []rune{0x01D8},                         // Case map
+	0x01D9:  []rune{0x01DA},                         // Case map
+	0x01DB:  []rune{0x01DC},                         // Case map
+	0x01DE:  []rune{0x01DF},                         // Case map
+	0x01E0:  []rune{0x01E1},                         // Case map
+	0x01E2:  []rune{0x01E3},                         // Case map
+	0x01E4:  []rune{0x01E5},                         // Case map
+	0x01E6:  []rune{0x01E7},                         // Case map
+	0x01E8:  []rune{0x01E9},                         // Case map
+	0x01EA:  []rune{0x01EB},                         // Case map
+	0x01EC:  []rune{0x01ED},                         // Case map
+	0x01EE:  []rune{0x01EF},                         // Case map
+	0x01F0:  []rune{0x006A, 0x030C},                 // Case map
+	0x01F1:  []rune{0x01F3},                         // Case map
+	0x01F2:  []rune{0x01F3},                         // Case map
+	0x01F4:  []rune{0x01F5},                         // Case map
+	0x01F6:  []rune{0x0195},                         // Case map
+	0x01F7:  []rune{0x01BF},                         // Case map
+	0x01F8:  []rune{0x01F9},                         // Case map
+	0x01FA:  []rune{0x01FB},                         // Case map
+	0x01FC:  []rune{0x01FD},                         // Case map
+	0x01FE:  []rune{0x01FF},                         // Case map
+	0x0200:  []rune{0x0201},                         // Case map
+	0x0202:  []rune{0x0203},                         // Case map
+	0x0204:  []rune{0x0205},                         // Case map
+	0x0206:  []rune{0x0207},                         // Case map
+	0x0208:  []rune{0x0209},                         // Case map
+	0x020A:  []rune{0x020B},                         // Case map
+	0x020C:  []rune{0x020D},                         // Case map
+	0x020E:  []rune{0x020F},                         // Case map
+	0x0210:  []rune{0x0211},                         // Case map
+	0x0212:  []rune{0x0213},                         // Case map
+	0x0214:  []rune{0x0215},                         // Case map
+	0x0216:  []rune{0x0217},                         // Case map
+	0x0218:  []rune{0x0219},                         // Case map
+	0x021A:  []rune{0x021B},                         // Case map
+	0x021C:  []rune{0x021D},                         // Case map
+	0x021E:  []rune{0x021F},                         // Case map
+	0x0220:  []rune{0x019E},                         // Case map
+	0x0222:  []rune{0x0223},                         // Case map
+	0x0224:  []rune{0x0225},                         // Case map
+	0x0226:  []rune{0x0227},                         // Case map
+	0x0228:  []rune{0x0229},                         // Case map
+	0x022A:  []rune{0x022B},                         // Case map
+	0x022C:  []rune{0x022D},                         // Case map
+	0x022E:  []rune{0x022F},                         // Case map
+	0x0230:  []rune{0x0231},                         // Case map
+	0x0232:  []rune{0x0233},                         // Case map
+	0x0345:  []rune{0x03B9},                         // Case map
+	0x037A:  []rune{0x0020, 0x03B9},                 // Additional folding
+	0x0386:  []rune{0x03AC},                         // Case map
+	0x0388:  []rune{0x03AD},                         // Case map
+	0x0389:  []rune{0x03AE},                         // Case map
+	0x038A:  []rune{0x03AF},                         // Case map
+	0x038C:  []rune{0x03CC},                         // Case map
+	0x038E:  []rune{0x03CD},                         // Case map
+	0x038F:  []rune{0x03CE},                         // Case map
+	0x0390:  []rune{0x03B9, 0x0308, 0x0301},         // Case map
+	0x0391:  []rune{0x03B1},                         // Case map
+	0x0392:  []rune{0x03B2},                         // Case map
+	0x0393:  []rune{0x03B3},                         // Case map
+	0x0394:  []rune{0x03B4},                         // Case map
+	0x0395:  []rune{0x03B5},                         // Case map
+	0x0396:  []rune{0x03B6},                         // Case map
+	0x0397:  []rune{0x03B7},                         // Case map
+	0x0398:  []rune{0x03B8},                         // Case map
+	0x0399:  []rune{0x03B9},                         // Case map
+	0x039A:  []rune{0x03BA},                         // Case map
+	0x039B:  []rune{0x03BB},                         // Case map
+	0x039C:  []rune{0x03BC},                         // Case map
+	0x039D:  []rune{0x03BD},                         // Case map
+	0x039E:  []rune{0x03BE},                         // Case map
+	0x039F:  []rune{0x03BF},                         // Case map
+	0x03A0:  []rune{0x03C0},                         // Case map
+	0x03A1:  []rune{0x03C1},                         // Case map
+	0x03A3:  []rune{0x03C3},                         // Case map
+	0x03A4:  []rune{0x03C4},                         // Case map
+	0x03A5:  []rune{0x03C5},                         // Case map
+	0x03A6:  []rune{0x03C6},                         // Case map
+	0x03A7:  []rune{0x03C7},                         // Case map
+	0x03A8:  []rune{0x03C8},                         // Case map
+	0x03A9:  []rune{0x03C9},                         // Case map
+	0x03AA:  []rune{0x03CA},                         // Case map
+	0x03AB:  []rune{0x03CB},                         // Case map
+	0x03B0:  []rune{0x03C5, 0x0308, 0x0301},         // Case map
+	0x03C2:  []rune{0x03C3},                         // Case map
+	0x03D0:  []rune{0x03B2},                         // Case map
+	0x03D1:  []rune{0x03B8},                         // Case map
+	0x03D2:  []rune{0x03C5},                         // Additional folding
+	0x03D3:  []rune{0x03CD},                         // Additional folding
+	0x03D4:  []rune{0x03CB},                         // Additional folding
+	0x03D5:  []rune{0x03C6},                         // Case map
+	0x03D6:  []rune{0x03C0},                         // Case map
+	0x03D8:  []rune{0x03D9},                         // Case map
+	0x03DA:  []rune{0x03DB},                         // Case map
+	0x03DC:  []rune{0x03DD},                         // Case map
+	0x03DE:  []rune{0x03DF},                         // Case map
+	0x03E0:  []rune{0x03E1},                         // Case map
+	0x03E2:  []rune{0x03E3},                         // Case map
+	0x03E4:  []rune{0x03E5},                         // Case map
+	0x03E6:  []rune{0x03E7},                         // Case map
+	0x03E8:  []rune{0x03E9},                         // Case map
+	0x03EA:  []rune{0x03EB},                         // Case map
+	0x03EC:  []rune{0x03ED},                         // Case map
+	0x03EE:  []rune{0x03EF},                         // Case map
+	0x03F0:  []rune{0x03BA},                         // Case map
+	0x03F1:  []rune{0x03C1},                         // Case map
+	0x03F2:  []rune{0x03C3},                         // Case map
+	0x03F4:  []rune{0x03B8},                         // Case map
+	0x03F5:  []rune{0x03B5},                         // Case map
+	0x0400:  []rune{0x0450},                         // Case map
+	0x0401:  []rune{0x0451},                         // Case map
+	0x0402:  []rune{0x0452},                         // Case map
+	0x0403:  []rune{0x0453},                         // Case map
+	0x0404:  []rune{0x0454},                         // Case map
+	0x0405:  []rune{0x0455},                         // Case map
+	0x0406:  []rune{0x0456},                         // Case map
+	0x0407:  []rune{0x0457},                         // Case map
+	0x0408:  []rune{0x0458},                         // Case map
+	0x0409:  []rune{0x0459},                         // Case map
+	0x040A:  []rune{0x045A},                         // Case map
+	0x040B:  []rune{0x045B},                         // Case map
+	0x040C:  []rune{0x045C},                         // Case map
+	0x040D:  []rune{0x045D},                         // Case map
+	0x040E:  []rune{0x045E},                         // Case map
+	0x040F:  []rune{0x045F},                         // Case map
+	0x0410:  []rune{0x0430},                         // Case map
+	0x0411:  []rune{0x0431},                         // Case map
+	0x0412:  []rune{0x0432},                         // Case map
+	0x0413:  []rune{0x0433},                         // Case map
+	0x0414:  []rune{0x0434},                         // Case map
+	0x0415:  []rune{0x0435},                         // Case map
+	0x0416:  []rune{0x0436},                         // Case map
+	0x0417:  []rune{0x0437},                         // Case map
+	0x0418:  []rune{0x0438},                         // Case map
+	0x0419:  []rune{0x0439},                         // Case map
+	0x041A:  []rune{0x043A},                         // Case map
+	0x041B:  []rune{0x043B},                         // Case map
+	0x041C:  []rune{0x043C},                         // Case map
+	0x041D:  []rune{0x043D},                         // Case map
+	0x041E:  []rune{0x043E},                         // Case map
+	0x041F:  []rune{0x043F},                         // Case map
+	0x0420:  []rune{0x0440},                         // Case map
+	0x0421:  []rune{0x0441},                         // Case map
+	0x0422:  []rune{0x0442},                         // Case map
+	0x0423:  []rune{0x0443},                         // Case map
+	0x0424:  []rune{0x0444},                         // Case map
+	0x0425:  []rune{0x0445},                         // Case map
+	0x0426:  []rune{0x0446},                         // Case map
+	0x0427:  []rune{0x0447},                         // Case map
+	0x0428:  []rune{0x0448},                         // Case map
+	0x0429:  []rune{0x0449},                         // Case map
+	0x042A:  []rune{0x044A},                         // Case map
+	0x042B:  []rune{0x044B},                         // Case map
+	0x042C:  []rune{0x044C},                         // Case map
+	0x042D:  []rune{0x044D},                         // Case map
+	0x042E:  []rune{0x044E},                         // Case map
+	0x042F:  []rune{0x044F},                         // Case map
+	0x0460:  []rune{0x0461},                         // Case map
+	0x0462:  []rune{0x0463},                         // Case map
+	0x0464:  []rune{0x0465},                         // Case map
+	0x0466:  []rune{0x0467},                         // Case map
+	0x0468:  []rune{0x0469},                         // Case map
+	0x046A:  []rune{0x046B},                         // Case map
+	0x046C:  []rune{0x046D},                         // Case map
+	0x046E:  []rune{0x046F},                         // Case map
+	0x0470:  []rune{0x0471},                         // Case map
+	0x0472:  []rune{0x0473},                         // Case map
+	0x0474:  []rune{0x0475},                         // Case map
+	0x0476:  []rune{0x0477},                         // Case map
+	0x0478:  []rune{0x0479},                         // Case map
+	0x047A:  []rune{0x047B},                         // Case map
+	0x047C:  []rune{0x047D},                         // Case map
+	0x047E:  []rune{0x047F},                         // Case map
+	0x0480:  []rune{0x0481},                         // Case map
+	0x048A:  []rune{0x048B},                         // Case map
+	0x048C:  []rune{0x048D},                         // Case map
+	0x048E:  []rune{0x048F},                         // Case map
+	0x0490:  []rune{0x0491},                         // Case map
+	0x0492:  []rune{0x0493},                         // Case map
+	0x0494:  []rune{0x0495},                         // Case map
+	0x0496:  []rune{0x0497},                         // Case map
+	0x0498:  []rune{0x0499},                         // Case map
+	0x049A:  []rune{0x049B},                         // Case map
+	0x049C:  []rune{0x049D},                         // Case map
+	0x049E:  []rune{0x049F},                         // Case map
+	0x04A0:  []rune{0x04A1},                         // Case map
+	0x04A2:  []rune{0x04A3},                         // Case map
+	0x04A4:  []rune{0x04A5},                         // Case map
+	0x04A6:  []rune{0x04A7},                         // Case map
+	0x04A8:  []rune{0x04A9},                         // Case map
+	0x04AA:  []rune{0x04AB},                         // Case map
+	0x04AC:  []rune{0x04AD},                         // Case map
+	0x04AE:  []rune{0x04AF},                         // Case map
+	0x04B0:  []rune{0x04B1},                         // Case map
+	0x04B2:  []rune{0x04B3},                         // Case map
+	0x04B4:  []rune{0x04B5},                         // Case map
+	0x04B6:  []rune{0x04B7},                         // Case map
+	0x04B8:  []rune{0x04B9},                         // Case map
+	0x04BA:  []rune{0x04BB},                         // Case map
+	0x04BC:  []rune{0x04BD},                         // Case map
+	0x04BE:  []rune{0x04BF},                         // Case map
+	0x04C1:  []rune{0x04C2},                         // Case map
+	0x04C3:  []rune{0x04C4},                         // Case map
+	0x04C5:  []rune{0x04C6},                         // Case map
+	0x04C7:  []rune{0x04C8},                         // Case map
+	0x04C9:  []rune{0x04CA},                         // Case map
+	0x04CB:  []rune{0x04CC},                         // Case map
+	0x04CD:  []rune{0x04CE},                         // Case map
+	0x04D0:  []rune{0x04D1},                         // Case map
+	0x04D2:  []rune{0x04D3},                         // Case map
+	0x04D4:  []rune{0x04D5},                         // Case map
+	0x04D6:  []rune{0x04D7},                         // Case map
+	0x04D8:  []rune{0x04D9},                         // Case map
+	0x04DA:  []rune{0x04DB},                         // Case map
+	0x04DC:  []rune{0x04DD},                         // Case map
+	0x04DE:  []rune{0x04DF},                         // Case map
+	0x04E0:  []rune{0x04E1},                         // Case map
+	0x04E2:  []rune{0x04E3},                         // Case map
+	0x04E4:  []rune{0x04E5},                         // Case map
+	0x04E6:  []rune{0x04E7},                         // Case map
+	0x04E8:  []rune{0x04E9},                         // Case map
+	0x04EA:  []rune{0x04EB},                         // Case map
+	0x04EC:  []rune{0x04ED},                         // Case map
+	0x04EE:  []rune{0x04EF},                         // Case map
+	0x04F0:  []rune{0x04F1},                         // Case map
+	0x04F2:  []rune{0x04F3},                         // Case map
+	0x04F4:  []rune{0x04F5},                         // Case map
+	0x04F8:  []rune{0x04F9},                         // Case map
+	0x0500:  []rune{0x0501},                         // Case map
+	0x0502:  []rune{0x0503},                         // Case map
+	0x0504:  []rune{0x0505},                         // Case map
+	0x0506:  []rune{0x0507},                         // Case map
+	0x0508:  []rune{0x0509},                         // Case map
+	0x050A:  []rune{0x050B},                         // Case map
+	0x050C:  []rune{0x050D},                         // Case map
+	0x050E:  []rune{0x050F},                         // Case map
+	0x0531:  []rune{0x0561},                         // Case map
+	0x0532:  []rune{0x0562},                         // Case map
+	0x0533:  []rune{0x0563},                         // Case map
+	0x0534:  []rune{0x0564},                         // Case map
+	0x0535:  []rune{0x0565},                         // Case map
+	0x0536:  []rune{0x0566},                         // Case map
+	0x0537:  []rune{0x0567},                         // Case map
+	0x0538:  []rune{0x0568},                         // Case map
+	0x0539:  []rune{0x0569},                         // Case map
+	0x053A:  []rune{0x056A},                         // Case map
+	0x053B:  []rune{0x056B},                         // Case map
+	0x053C:  []rune{0x056C},                         // Case map
+	0x053D:  []rune{0x056D},                         // Case map
+	0x053E:  []rune{0x056E},                         // Case map
+	0x053F:  []rune{0x056F},                         // Case map
+	0x0540:  []rune{0x0570},                         // Case map
+	0x0541:  []rune{0x0571},                         // Case map
+	0x0542:  []rune{0x0572},                         // Case map
+	0x0543:  []rune{0x0573},                         // Case map
+	0x0544:  []rune{0x0574},                         // Case map
+	0x0545:  []rune{0x0575},                         // Case map
+	0x0546:  []rune{0x0576},                         // Case map
+	0x0547:  []rune{0x0577},                         // Case map
+	0x0548:  []rune{0x0578},                         // Case map
+	0x0549:  []rune{0x0579},                         // Case map
+	0x054A:  []rune{0x057A},                         // Case map
+	0x054B:  []rune{0x057B},                         // Case map
+	0x054C:  []rune{0x057C},                         // Case map
+	0x054D:  []rune{0x057D},                         // Case map
+	0x054E:  []rune{0x057E},                         // Case map
+	0x054F:  []rune{0x057F},                         // Case map
+	0x0550:  []rune{0x0580},                         // Case map
+	0x0551:  []rune{0x0581},                         // Case map
+	0x0552:  []rune{0x0582},                         // Case map
+	0x0553:  []rune{0x0583},                         // Case map
+	0x0554:  []rune{0x0584},                         // Case map
+	0x0555:  []rune{0x0585},                         // Case map
+	0x0556:  []rune{0x0586},                         // Case map
+	0x0587:  []rune{0x0565, 0x0582},                 // Case map
+	0x1E00:  []rune{0x1E01},                         // Case map
+	0x1E02:  []rune{0x1E03},                         // Case map
+	0x1E04:  []rune{0x1E05},                         // Case map
+	0x1E06:  []rune{0x1E07},                         // Case map
+	0x1E08:  []rune{0x1E09},                         // Case map
+	0x1E0A:  []rune{0x1E0B},                         // Case map
+	0x1E0C:  []rune{0x1E0D},                         // Case map
+	0x1E0E:  []rune{0x1E0F},                         // Case map
+	0x1E10:  []rune{0x1E11},                         // Case map
+	0x1E12:  []rune{0x1E13},                         // Case map
+	0x1E14:  []rune{0x1E15},                         // Case map
+	0x1E16:  []rune{0x1E17},                         // Case map
+	0x1E18:  []rune{0x1E19},                         // Case map
+	0x1E1A:  []rune{0x1E1B},                         // Case map
+	0x1E1C:  []rune{0x1E1D},                         // Case map
+	0x1E1E:  []rune{0x1E1F},                         // Case map
+	0x1E20:  []rune{0x1E21},                         // Case map
+	0x1E22:  []rune{0x1E23},                         // Case map
+	0x1E24:  []rune{0x1E25},                         // Case map
+	0x1E26:  []rune{0x1E27},                         // Case map
+	0x1E28:  []rune{0x1E29},                         // Case map
+	0x1E2A:  []rune{0x1E2B},                         // Case map
+	0x1E2C:  []rune{0x1E2D},                         // Case map
+	0x1E2E:  []rune{0x1E2F},                         // Case map
+	0x1E30:  []rune{0x1E31},                         // Case map
+	0x1E32:  []rune{0x1E33},                         // Case map
+	0x1E34:  []rune{0x1E35},                         // Case map
+	0x1E36:  []rune{0x1E37},                         // Case map
+	0x1E38:  []rune{0x1E39},                         // Case map
+	0x1E3A:  []rune{0x1E3B},                         // Case map
+	0x1E3C:  []rune{0x1E3D},                         // Case map
+	0x1E3E:  []rune{0x1E3F},                         // Case map
+	0x1E40:  []rune{0x1E41},                         // Case map
+	0x1E42:  []rune{0x1E43},                         // Case map
+	0x1E44:  []rune{0x1E45},                         // Case map
+	0x1E46:  []rune{0x1E47},                         // Case map
+	0x1E48:  []rune{0x1E49},                         // Case map
+	0x1E4A:  []rune{0x1E4B},                         // Case map
+	0x1E4C:  []rune{0x1E4D},                         // Case map
+	0x1E4E:  []rune{0x1E4F},                         // Case map
+	0x1E50:  []rune{0x1E51},                         // Case map
+	0x1E52:  []rune{0x1E53},                         // Case map
+	0x1E54:  []rune{0x1E55},                         // Case map
+	0x1E56:  []rune{0x1E57},                         // Case map
+	0x1E58:  []rune{0x1E59},                         // Case map
+	0x1E5A:  []rune{0x1E5B},                         // Case map
+	0x1E5C:  []rune{0x1E5D},                         // Case map
+	0x1E5E:  []rune{0x1E5F},                         // Case map
+	0x1E60:  []rune{0x1E61},                         // Case map
+	0x1E62:  []rune{0x1E63},                         // Case map
+	0x1E64:  []rune{0x1E65},                         // Case map
+	0x1E66:  []rune{0x1E67},                         // Case map
+	0x1E68:  []rune{0x1E69},                         // Case map
+	0x1E6A:  []rune{0x1E6B},                         // Case map
+	0x1E6C:  []rune{0x1E6D},                         // Case map
+	0x1E6E:  []rune{0x1E6F},                         // Case map
+	0x1E70:  []rune{0x1E71},                         // Case map
+	0x1E72:  []rune{0x1E73},                         // Case map
+	0x1E74:  []rune{0x1E75},                         // Case map
+	0x1E76:  []rune{0x1E77},                         // Case map
+	0x1E78:  []rune{0x1E79},                         // Case map
+	0x1E7A:  []rune{0x1E7B},                         // Case map
+	0x1E7C:  []rune{0x1E7D},                         // Case map
+	0x1E7E:  []rune{0x1E7F},                         // Case map
+	0x1E80:  []rune{0x1E81},                         // Case map
+	0x1E82:  []rune{0x1E83},                         // Case map
+	0x1E84:  []rune{0x1E85},                         // Case map
+	0x1E86:  []rune{0x1E87},                         // Case map
+	0x1E88:  []rune{0x1E89},                         // Case map
+	0x1E8A:  []rune{0x1E8B},                         // Case map
+	0x1E8C:  []rune{0x1E8D},                         // Case map
+	0x1E8E:  []rune{0x1E8F},                         // Case map
+	0x1E90:  []rune{0x1E91},                         // Case map
+	0x1E92:  []rune{0x1E93},                         // Case map
+	0x1E94:  []rune{0x1E95},                         // Case map
+	0x1E96:  []rune{0x0068, 0x0331},                 // Case map
+	0x1E97:  []rune{0x0074, 0x0308},                 // Case map
+	0x1E98:  []rune{0x0077, 0x030A},                 // Case map
+	0x1E99:  []rune{0x0079, 0x030A},                 // Case map
+	0x1E9A:  []rune{0x0061, 0x02BE},                 // Case map
+	0x1E9B:  []rune{0x1E61},                         // Case map
+	0x1EA0:  []rune{0x1EA1},                         // Case map
+	0x1EA2:  []rune{0x1EA3},                         // Case map
+	0x1EA4:  []rune{0x1EA5},                         // Case map
+	0x1EA6:  []rune{0x1EA7},                         // Case map
+	0x1EA8:  []rune{0x1EA9},                         // Case map
+	0x1EAA:  []rune{0x1EAB},                         // Case map
+	0x1EAC:  []rune{0x1EAD},                         // Case map
+	0x1EAE:  []rune{0x1EAF},                         // Case map
+	0x1EB0:  []rune{0x1EB1},                         // Case map
+	0x1EB2:  []rune{0x1EB3},                         // Case map
+	0x1EB4:  []rune{0x1EB5},                         // Case map
+	0x1EB6:  []rune{0x1EB7},                         // Case map
+	0x1EB8:  []rune{0x1EB9},                         // Case map
+	0x1EBA:  []rune{0x1EBB},                         // Case map
+	0x1EBC:  []rune{0x1EBD},                         // Case map
+	0x1EBE:  []rune{0x1EBF},                         // Case map
+	0x1EC0:  []rune{0x1EC1},                         // Case map
+	0x1EC2:  []rune{0x1EC3},                         // Case map
+	0x1EC4:  []rune{0x1EC5},                         // Case map
+	0x1EC6:  []rune{0x1EC7},                         // Case map
+	0x1EC8:  []rune{0x1EC9},                         // Case map
+	0x1ECA:  []rune{0x1ECB},                         // Case map
+	0x1ECC:  []rune{0x1ECD},                         // Case map
+	0x1ECE:  []rune{0x1ECF},                         // Case map
+	0x1ED0:  []rune{0x1ED1},                         // Case map
+	0x1ED2:  []rune{0x1ED3},                         // Case map
+	0x1ED4:  []rune{0x1ED5},                         // Case map
+	0x1ED6:  []rune{0x1ED7},                         // Case map
+	0x1ED8:  []rune{0x1ED9},                         // Case map
+	0x1EDA:  []rune{0x1EDB},                         // Case map
+	0x1EDC:  []rune{0x1EDD},                         // Case map
+	0x1EDE:  []rune{0x1EDF},                         // Case map
+	0x1EE0:  []rune{0x1EE1},                         // Case map
+	0x1EE2:  []rune{0x1EE3},                         // Case map
+	0x1EE4:  []rune{0x1EE5},                         // Case map
+	0x1EE6:  []rune{0x1EE7},                         // Case map
+	0x1EE8:  []rune{0x1EE9},                         // Case map
+	0x1EEA:  []rune{0x1EEB},                         // Case map
+	0x1EEC:  []rune{0x1EED},                         // Case map
+	0x1EEE:  []rune{0x1EEF},                         // Case map
+	0x1EF0:  []rune{0x1EF1},                         // Case map
+	0x1EF2:  []rune{0x1EF3},                         // Case map
+	0x1EF4:  []rune{0x1EF5},                         // Case map
+	0x1EF6:  []rune{0x1EF7},                         // Case map
+	0x1EF8:  []rune{0x1EF9},                         // Case map
+	0x1F08:  []rune{0x1F00},                         // Case map
+	0x1F09:  []rune{0x1F01},                         // Case map
+	0x1F0A:  []rune{0x1F02},                         // Case map
+	0x1F0B:  []rune{0x1F03},                         // Case map
+	0x1F0C:  []rune{0x1F04},                         // Case map
+	0x1F0D:  []rune{0x1F05},                         // Case map
+	0x1F0E:  []rune{0x1F06},                         // Case map
+	0x1F0F:  []rune{0x1F07},                         // Case map
+	0x1F18:  []rune{0x1F10},                         // Case map
+	0x1F19:  []rune{0x1F11},                         // Case map
+	0x1F1A:  []rune{0x1F12},                         // Case map
+	0x1F1B:  []rune{0x1F13},                         // Case map
+	0x1F1C:  []rune{0x1F14},                         // Case map
+	0x1F1D:  []rune{0x1F15},                         // Case map
+	0x1F28:  []rune{0x1F20},                         // Case map
+	0x1F29:  []rune{0x1F21},                         // Case map
+	0x1F2A:  []rune{0x1F22},                         // Case map
+	0x1F2B:  []rune{0x1F23},                         // Case map
+	0x1F2C:  []rune{0x1F24},                         // Case map
+	0x1F2D:  []rune{0x1F25},                         // Case map
+	0x1F2E:  []rune{0x1F26},                         // Case map
+	0x1F2F:  []rune{0x1F27},                         // Case map
+	0x1F38:  []rune{0x1F30},                         // Case map
+	0x1F39:  []rune{0x1F31},                         // Case map
+	0x1F3A:  []rune{0x1F32},                         // Case map
+	0x1F3B:  []rune{0x1F33},                         // Case map
+	0x1F3C:  []rune{0x1F34},                         // Case map
+	0x1F3D:  []rune{0x1F35},                         // Case map
+	0x1F3E:  []rune{0x1F36},                         // Case map
+	0x1F3F:  []rune{0x1F37},                         // Case map
+	0x1F48:  []rune{0x1F40},                         // Case map
+	0x1F49:  []rune{0x1F41},                         // Case map
+	0x1F4A:  []rune{0x1F42},                         // Case map
+	0x1F4B:  []rune{0x1F43},                         // Case map
+	0x1F4C:  []rune{0x1F44},                         // Case map
+	0x1F4D:  []rune{0x1F45},                         // Case map
+	0x1F50:  []rune{0x03C5, 0x0313},                 // Case map
+	0x1F52:  []rune{0x03C5, 0x0313, 0x0300},         // Case map
+	0x1F54:  []rune{0x03C5, 0x0313, 0x0301},         // Case map
+	0x1F56:  []rune{0x03C5, 0x0313, 0x0342},         // Case map
+	0x1F59:  []rune{0x1F51},                         // Case map
+	0x1F5B:  []rune{0x1F53},                         // Case map
+	0x1F5D:  []rune{0x1F55},                         // Case map
+	0x1F5F:  []rune{0x1F57},                         // Case map
+	0x1F68:  []rune{0x1F60},                         // Case map
+	0x1F69:  []rune{0x1F61},                         // Case map
+	0x1F6A:  []rune{0x1F62},                         // Case map
+	0x1F6B:  []rune{0x1F63},                         // Case map
+	0x1F6C:  []rune{0x1F64},                         // Case map
+	0x1F6D:  []rune{0x1F65},                         // Case map
+	0x1F6E:  []rune{0x1F66},                         // Case map
+	0x1F6F:  []rune{0x1F67},                         // Case map
+	0x1F80:  []rune{0x1F00, 0x03B9},                 // Case map
+	0x1F81:  []rune{0x1F01, 0x03B9},                 // Case map
+	0x1F82:  []rune{0x1F02, 0x03B9},                 // Case map
+	0x1F83:  []rune{0x1F03, 0x03B9},                 // Case map
+	0x1F84:  []rune{0x1F04, 0x03B9},                 // Case map
+	0x1F85:  []rune{0x1F05, 0x03B9},                 // Case map
+	0x1F86:  []rune{0x1F06, 0x03B9},                 // Case map
+	0x1F87:  []rune{0x1F07, 0x03B9},                 // Case map
+	0x1F88:  []rune{0x1F00, 0x03B9},                 // Case map
+	0x1F89:  []rune{0x1F01, 0x03B9},                 // Case map
+	0x1F8A:  []rune{0x1F02, 0x03B9},                 // Case map
+	0x1F8B:  []rune{0x1F03, 0x03B9},                 // Case map
+	0x1F8C:  []rune{0x1F04, 0x03B9},                 // Case map
+	0x1F8D:  []rune{0x1F05, 0x03B9},                 // Case map
+	0x1F8E:  []rune{0x1F06, 0x03B9},                 // Case map
+	0x1F8F:  []rune{0x1F07, 0x03B9},                 // Case map
+	0x1F90:  []rune{0x1F20, 0x03B9},                 // Case map
+	0x1F91:  []rune{0x1F21, 0x03B9},                 // Case map
+	0x1F92:  []rune{0x1F22, 0x03B9},                 // Case map
+	0x1F93:  []rune{0x1F23, 0x03B9},                 // Case map
+	0x1F94:  []rune{0x1F24, 0x03B9},                 // Case map
+	0x1F95:  []rune{0x1F25, 0x03B9},                 // Case map
+	0x1F96:  []rune{0x1F26, 0x03B9},                 // Case map
+	0x1F97:  []rune{0x1F27, 0x03B9},                 // Case map
+	0x1F98:  []rune{0x1F20, 0x03B9},                 // Case map
+	0x1F99:  []rune{0x1F21, 0x03B9},                 // Case map
+	0x1F9A:  []rune{0x1F22, 0x03B9},                 // Case map
+	0x1F9B:  []rune{0x1F23, 0x03B9},                 // Case map
+	0x1F9C:  []rune{0x1F24, 0x03B9},                 // Case map
+	0x1F9D:  []rune{0x1F25, 0x03B9},                 // Case map
+	0x1F9E:  []rune{0x1F26, 0x03B9},                 // Case map
+	0x1F9F:  []rune{0x1F27, 0x03B9},                 // Case map
+	0x1FA0:  []rune{0x1F60, 0x03B9},                 // Case map
+	0x1FA1:  []rune{0x1F61, 0x03B9},                 // Case map
+	0x1FA2:  []rune{0x1F62, 0x03B9},                 // Case map
+	0x1FA3:  []rune{0x1F63, 0x03B9},                 // Case map
+	0x1FA4:  []rune{0x1F64, 0x03B9},                 // Case map
+	0x1FA5:  []rune{0x1F65, 0x03B9},                 // Case map
+	0x1FA6:  []rune{0x1F66, 0x03B9},                 // Case map
+	0x1FA7:  []rune{0x1F67, 0x03B9},                 // Case map
+	0x1FA8:  []rune{0x1F60, 0x03B9},                 // Case map
+	0x1FA9:  []rune{0x1F61, 0x03B9},                 // Case map
+	0x1FAA:  []rune{0x1F62, 0x03B9},                 // Case map
+	0x1FAB:  []rune{0x1F63, 0x03B9},                 // Case map
+	0x1FAC:  []rune{0x1F64, 0x03B9},                 // Case map
+	0x1FAD:  []rune{0x1F65, 0x03B9},                 // Case map
+	0x1FAE:  []rune{0x1F66, 0x03B9},                 // Case map
+	0x1FAF:  []rune{0x1F67, 0x03B9},                 // Case map
+	0x1FB2:  []rune{0x1F70, 0x03B9},                 // Case map
+	0x1FB3:  []rune{0x03B1, 0x03B9},                 // Case map
+	0x1FB4:  []rune{0x03AC, 0x03B9},                 // Case map
+	0x1FB6:  []rune{0x03B1, 0x0342},                 // Case map
+	0x1FB7:  []rune{0x03B1, 0x0342, 0x03B9},         // Case map
+	0x1FB8:  []rune{0x1FB0},                         // Case map
+	0x1FB9:  []rune{0x1FB1},                         // Case map
+	0x1FBA:  []rune{0x1F70},                         // Case map
+	0x1FBB:  []rune{0x1F71},                         // Case map
+	0x1FBC:  []rune{0x03B1, 0x03B9},                 // Case map
+	0x1FBE:  []rune{0x03B9},                         // Case map
+	0x1FC2:  []rune{0x1F74, 0x03B9},                 // Case map
+	0x1FC3:  []rune{0x03B7, 0x03B9},                 // Case map
+	0x1FC4:  []rune{0x03AE, 0x03B9},                 // Case map
+	0x1FC6:  []rune{0x03B7, 0x0342},                 // Case map
+	0x1FC7:  []rune{0x03B7, 0x0342, 0x03B9},         // Case map
+	0x1FC8:  []rune{0x1F72},                         // Case map
+	0x1FC9:  []rune{0x1F73},                         // Case map
+	0x1FCA:  []rune{0x1F74},                         // Case map
+	0x1FCB:  []rune{0x1F75},                         // Case map
+	0x1FCC:  []rune{0x03B7, 0x03B9},                 // Case map
+	0x1FD2:  []rune{0x03B9, 0x0308, 0x0300},         // Case map
+	0x1FD3:  []rune{0x03B9, 0x0308, 0x0301},         // Case map
+	0x1FD6:  []rune{0x03B9, 0x0342},                 // Case map
+	0x1FD7:  []rune{0x03B9, 0x0308, 0x0342},         // Case map
+	0x1FD8:  []rune{0x1FD0},                         // Case map
+	0x1FD9:  []rune{0x1FD1},                         // Case map
+	0x1FDA:  []rune{0x1F76},                         // Case map
+	0x1FDB:  []rune{0x1F77},                         // Case map
+	0x1FE2:  []rune{0x03C5, 0x0308, 0x0300},         // Case map
+	0x1FE3:  []rune{0x03C5, 0x0308, 0x0301},         // Case map
+	0x1FE4:  []rune{0x03C1, 0x0313},                 // Case map
+	0x1FE6:  []rune{0x03C5, 0x0342},                 // Case map
+	0x1FE7:  []rune{0x03C5, 0x0308, 0x0342},         // Case map
+	0x1FE8:  []rune{0x1FE0},                         // Case map
+	0x1FE9:  []rune{0x1FE1},                         // Case map
+	0x1FEA:  []rune{0x1F7A},                         // Case map
+	0x1FEB:  []rune{0x1F7B},                         // Case map
+	0x1FEC:  []rune{0x1FE5},                         // Case map
+	0x1FF2:  []rune{0x1F7C, 0x03B9},                 // Case map
+	0x1FF3:  []rune{0x03C9, 0x03B9},                 // Case map
+	0x1FF4:  []rune{0x03CE, 0x03B9},                 // Case map
+	0x1FF6:  []rune{0x03C9, 0x0342},                 // Case map
+	0x1FF7:  []rune{0x03C9, 0x0342, 0x03B9},         // Case map
+	0x1FF8:  []rune{0x1F78},                         // Case map
+	0x1FF9:  []rune{0x1F79},                         // Case map
+	0x1FFA:  []rune{0x1F7C},                         // Case map
+	0x1FFB:  []rune{0x1F7D},                         // Case map
+	0x1FFC:  []rune{0x03C9, 0x03B9},                 // Case map
+	0x20A8:  []rune{0x0072, 0x0073},                 // Additional folding
+	0x2102:  []rune{0x0063},                         // Additional folding
+	0x2103:  []rune{0x00B0, 0x0063},                 // Additional folding
+	0x2107:  []rune{0x025B},                         // Additional folding
+	0x2109:  []rune{0x00B0, 0x0066},                 // Additional folding
+	0x210B:  []rune{0x0068},                         // Additional folding
+	0x210C:  []rune{0x0068},                         // Additional folding
+	0x210D:  []rune{0x0068},                         // Additional folding
+	0x2110:  []rune{0x0069},                         // Additional folding
+	0x2111:  []rune{0x0069},                         // Additional folding
+	0x2112:  []rune{0x006C},                         // Additional folding
+	0x2115:  []rune{0x006E},                         // Additional folding
+	0x2116:  []rune{0x006E, 0x006F},                 // Additional folding
+	0x2119:  []rune{0x0070},                         // Additional folding
+	0x211A:  []rune{0x0071},                         // Additional folding
+	0x211B:  []rune{0x0072},                         // Additional folding
+	0x211C:  []rune{0x0072},                         // Additional folding
+	0x211D:  []rune{0x0072},                         // Additional folding
+	0x2120:  []rune{0x0073, 0x006D},                 // Additional folding
+	0x2121:  []rune{0x0074, 0x0065, 0x006C},         // Additional folding
+	0x2122:  []rune{0x0074, 0x006D},                 // Additional folding
+	0x2124:  []rune{0x007A},                         // Additional folding
+	0x2126:  []rune{0x03C9},                         // Case map
+	0x2128:  []rune{0x007A},                         // Additional folding
+	0x212A:  []rune{0x006B},                         // Case map
+	0x212B:  []rune{0x00E5},                         // Case map
+	0x212C:  []rune{0x0062},                         // Additional folding
+	0x212D:  []rune{0x0063},                         // Additional folding
+	0x2130:  []rune{0x0065},                         // Additional folding
+	0x2131:  []rune{0x0066},                         // Additional folding
+	0x2133:  []rune{0x006D},                         // Additional folding
+	0x213E:  []rune{0x03B3},                         // Additional folding
+	0x213F:  []rune{0x03C0},                         // Additional folding
+	0x2145:  []rune{0x0064},                         // Additional folding
+	0x2160:  []rune{0x2170},                         // Case map
+	0x2161:  []rune{0x2171},                         // Case map
+	0x2162:  []rune{0x2172},                         // Case map
+	0x2163:  []rune{0x2173},                         // Case map
+	0x2164:  []rune{0x2174},                         // Case map
+	0x2165:  []rune{0x2175},                         // Case map
+	0x2166:  []rune{0x2176},                         // Case map
+	0x2167:  []rune{0x2177},                         // Case map
+	0x2168:  []rune{0x2178},                         // Case map
+	0x2169:  []rune{0x2179},                         // Case map
+	0x216A:  []rune{0x217A},                         // Case map
+	0x216B:  []rune{0x217B},                         // Case map
+	0x216C:  []rune{0x217C},                         // Case map
+	0x216D:  []rune{0x217D},                         // Case map
+	0x216E:  []rune{0x217E},                         // Case map
+	0x216F:  []rune{0x217F},                         // Case map
+	0x24B6:  []rune{0x24D0},                         // Case map
+	0x24B7:  []rune{0x24D1},                         // Case map
+	0x24B8:  []rune{0x24D2},                         // Case map
+	0x24B9:  []rune{0x24D3},                         // Case map
+	0x24BA:  []rune{0x24D4},                         // Case map
+	0x24BB:  []rune{0x24D5},                         // Case map
+	0x24BC:  []rune{0x24D6},                         // Case map
+	0x24BD:  []rune{0x24D7},                         // Case map
+	0x24BE:  []rune{0x24D8},                         // Case map
+	0x24BF:  []rune{0x24D9},                         // Case map
+	0x24C0:  []rune{0x24DA},                         // Case map
+	0x24C1:  []rune{0x24DB},                         // Case map
+	0x24C2:  []rune{0x24DC},                         // Case map
+	0x24C3:  []rune{0x24DD},                         // Case map
+	0x24C4:  []rune{0x24DE},                         // Case map
+	0x24C5:  []rune{0x24DF},                         // Case map
+	0x24C6:  []rune{0x24E0},                         // Case map
+	0x24C7:  []rune{0x24E1},                         // Case map
+	0x24C8:  []rune{0x24E2},                         // Case map
+	0x24C9:  []rune{0x24E3},                         // Case map
+	0x24CA:  []rune{0x24E4},                         // Case map
+	0x24CB:  []rune{0x24E5},                         // Case map
+	0x24CC:  []rune{0x24E6},                         // Case map
+	0x24CD:  []rune{0x24E7},                         // Case map
+	0x24CE:  []rune{0x24E8},                         // Case map
+	0x24CF:  []rune{0x24E9},                         // Case map
+	0x3371:  []rune{0x0068, 0x0070, 0x0061},         // Additional folding
+	0x3373:  []rune{0x0061, 0x0075},                 // Additional folding
+	0x3375:  []rune{0x006F, 0x0076},                 // Additional folding
+	0x3380:  []rune{0x0070, 0x0061},                 // Additional folding
+	0x3381:  []rune{0x006E, 0x0061},                 // Additional folding
+	0x3382:  []rune{0x03BC, 0x0061},                 // Additional folding
+	0x3383:  []rune{0x006D, 0x0061},                 // Additional folding
+	0x3384:  []rune{0x006B, 0x0061},                 // Additional folding
+	0x3385:  []rune{0x006B, 0x0062},                 // Additional folding
+	0x3386:  []rune{0x006D, 0x0062},                 // Additional folding
+	0x3387:  []rune{0x0067, 0x0062},                 // Additional folding
+	0x338A:  []rune{0x0070, 0x0066},                 // Additional folding
+	0x338B:  []rune{0x006E, 0x0066},                 // Additional folding
+	0x338C:  []rune{0x03BC, 0x0066},                 // Additional folding
+	0x3390:  []rune{0x0068, 0x007A},                 // Additional folding
+	0x3391:  []rune{0x006B, 0x0068, 0x007A},         // Additional folding
+	0x3392:  []rune{0x006D, 0x0068, 0x007A},         // Additional folding
+	0x3393:  []rune{0x0067, 0x0068, 0x007A},         // Additional folding
+	0x3394:  []rune{0x0074, 0x0068, 0x007A},         // Additional folding
+	0x33A9:  []rune{0x0070, 0x0061},                 // Additional folding
+	0x33AA:  []rune{0x006B, 0x0070, 0x0061},         // Additional folding
+	0x33AB:  []rune{0x006D, 0x0070, 0x0061},         // Additional folding
+	0x33AC:  []rune{0x0067, 0x0070, 0x0061},         // Additional folding
+	0x33B4:  []rune{0x0070, 0x0076},                 // Additional folding
+	0x33B5:  []rune{0x006E, 0x0076},                 // Additional folding
+	0x33B6:  []rune{0x03BC, 0x0076},                 // Additional folding
+	0x33B7:  []rune{0x006D, 0x0076},                 // Additional folding
+	0x33B8:  []rune{0x006B, 0x0076},                 // Additional folding
+	0x33B9:  []rune{0x006D, 0x0076},                 // Additional folding
+	0x33BA:  []rune{0x0070, 0x0077},                 // Additional folding
+	0x33BB:  []rune{0x006E, 0x0077},                 // Additional folding
+	0x33BC:  []rune{0x03BC, 0x0077},                 // Additional folding
+	0x33BD:  []rune{0x006D, 0x0077},                 // Additional folding
+	0x33BE:  []rune{0x006B, 0x0077},                 // Additional folding
+	0x33BF:  []rune{0x006D, 0x0077},                 // Additional folding
+	0x33C0:  []rune{0x006B, 0x03C9},                 // Additional folding
+	0x33C1:  []rune{0x006D, 0x03C9},                 // Additional folding
+	0x33C3:  []rune{0x0062, 0x0071},                 // Additional folding
+	0x33C6:  []rune{0x0063, 0x2215, 0x006B, 0x0067}, // Additional folding
+	0x33C7:  []rune{0x0063, 0x006F, 0x002E},         // Additional folding
+	0x33C8:  []rune{0x0064, 0x0062},                 // Additional folding
+	0x33C9:  []rune{0x0067, 0x0079},                 // Additional folding
+	0x33CB:  []rune{0x0068, 0x0070},                 // Additional folding
+	0x33CD:  []rune{0x006B, 0x006B},                 // Additional folding
+	0x33CE:  []rune{0x006B, 0x006D},                 // Additional folding
+	0x33D7:  []rune{0x0070, 0x0068},                 // Additional folding
+	0x33D9:  []rune{0x0070, 0x0070, 0x006D},         // Additional folding
+	0x33DA:  []rune{0x0070, 0x0072},                 // Additional folding
+	0x33DC:  []rune{0x0073, 0x0076},                 // Additional folding
+	0x33DD:  []rune{0x0077, 0x0062},                 // Additional folding
+	0xFB00:  []rune{0x0066, 0x0066},                 // Case map
+	0xFB01:  []rune{0x0066, 0x0069},                 // Case map
+	0xFB02:  []rune{0x0066, 0x006C},                 // Case map
+	0xFB03:  []rune{0x0066, 0x0066, 0x0069},         // Case map
+	0xFB04:  []rune{0x0066, 0x0066, 0x006C},         // Case map
+	0xFB05:  []rune{0x0073, 0x0074},                 // Case map
+	0xFB06:  []rune{0x0073, 0x0074},                 // Case map
+	0xFB13:  []rune{0x0574, 0x0576},                 // Case map
+	0xFB14:  []rune{0x0574, 0x0565},                 // Case map
+	0xFB15:  []rune{0x0574, 0x056B},                 // Case map
+	0xFB16:  []rune{0x057E, 0x0576},                 // Case map
+	0xFB17:  []rune{0x0574, 0x056D},                 // Case map
+	0xFF21:  []rune{0xFF41},                         // Case map
+	0xFF22:  []rune{0xFF42},                         // Case map
+	0xFF23:  []rune{0xFF43},                         // Case map
+	0xFF24:  []rune{0xFF44},                         // Case map
+	0xFF25:  []rune{0xFF45},                         // Case map
+	0xFF26:  []rune{0xFF46},                         // Case map
+	0xFF27:  []rune{0xFF47},                         // Case map
+	0xFF28:  []rune{0xFF48},                         // Case map
+	0xFF29:  []rune{0xFF49},                         // Case map
+	0xFF2A:  []rune{0xFF4A},                         // Case map
+	0xFF2B:  []rune{0xFF4B},                         // Case map
+	0xFF2C:  []rune{0xFF4C},                         // Case map
+	0xFF2D:  []rune{0xFF4D},                         // Case map
+	0xFF2E:  []rune{0xFF4E},                         // Case map
+	0xFF2F:  []rune{0xFF4F},                         // Case map
+	0xFF30:  []rune{0xFF50},                         // Case map
+	0xFF31:  []rune{0xFF51},                         // Case map
+	0xFF32:  []rune{0xFF52},                         // Case map
+	0xFF33:  []rune{0xFF53},                         // Case map
+	0xFF34:  []rune{0xFF54},                         // Case map
+	0xFF35:  []rune{0xFF55},                         // Case map
+	0xFF36:  []rune{0xFF56},                         // Case map
+	0xFF37:  []rune{0xFF57},                         // Case map
+	0xFF38:  []rune{0xFF58},                         // Case map
+	0xFF39:  []rune{0xFF59},                         // Case map
+	0xFF3A:  []rune{0xFF5A},                         // Case map
+	0x10400: []rune{0x10428},                        // Case map
+	0x10401: []rune{0x10429},                        // Case map
+	0x10402: []rune{0x1042A},                        // Case map
+	0x10403: []rune{0x1042B},                        // Case map
+	0x10404: []rune{0x1042C},                        // Case map
+	0x10405: []rune{0x1042D},                        // Case map
+	0x10406: []rune{0x1042E},                        // Case map
+	0x10407: []rune{0x1042F},                        // Case map
+	0x10408: []rune{0x10430},                        // Case map
+	0x10409: []rune{0x10431},                        // Case map
+	0x1040A: []rune{0x10432},                        // Case map
+	0x1040B: []rune{0x10433},                        // Case map
+	0x1040C: []rune{0x10434},                        // Case map
+	0x1040D: []rune{0x10435},                        // Case map
+	0x1040E: []rune{0x10436},                        // Case map
+	0x1040F: []rune{0x10437},                        // Case map
+	0x10410: []rune{0x10438},                        // Case map
+	0x10411: []rune{0x10439},                        // Case map
+	0x10412: []rune{0x1043A},                        // Case map
+	0x10413: []rune{0x1043B},                        // Case map
+	0x10414: []rune{0x1043C},                        // Case map
+	0x10415: []rune{0x1043D},                        // Case map
+	0x10416: []rune{0x1043E},                        // Case map
+	0x10417: []rune{0x1043F},                        // Case map
+	0x10418: []rune{0x10440},                        // Case map
+	0x10419: []rune{0x10441},                        // Case map
+	0x1041A: []rune{0x10442},                        // Case map
+	0x1041B: []rune{0x10443},                        // Case map
+	0x1041C: []rune{0x10444},                        // Case map
+	0x1041D: []rune{0x10445},                        // Case map
+	0x1041E: []rune{0x10446},                        // Case map
+	0x1041F: []rune{0x10447},                        // Case map
+	0x10420: []rune{0x10448},                        // Case map
+	0x10421: []rune{0x10449},                        // Case map
+	0x10422: []rune{0x1044A},                        // Case map
+	0x10423: []rune{0x1044B},                        // Case map
+	0x10424: []rune{0x1044C},                        // Case map
+	0x10425: []rune{0x1044D},                        // Case map
+	0x1D400: []rune{0x0061},                         // Additional folding
+	0x1D401: []rune{0x0062},                         // Additional folding
+	0x1D402: []rune{0x0063},                         // Additional folding
+	0x1D403: []rune{0x0064},                         // Additional folding
+	0x1D404: []rune{0x0065},                         // Additional folding
+	0x1D405: []rune{0x0066},                         // Additional folding
+	0x1D406: []rune{0x0067},                         // Additional folding
+	0x1D407: []rune{0x0068},                         // Additional folding
+	0x1D408: []rune{0x0069},                         // Additional folding
+	0x1D409: []rune{0x006A},                         // Additional folding
+	0x1D40A: []rune{0x006B},                         // Additional folding
+	0x1D40B: []rune{0x006C},                         // Additional folding
+	0x1D40C: []rune{0x006D},                         // Additional folding
+	0x1D40D: []rune{0x006E},                         // Additional folding
+	0x1D40E: []rune{0x006F},                         // Additional folding
+	0x1D40F: []rune{0x0070},                         // Additional folding
+	0x1D410: []rune{0x0071},                         // Additional folding
+	0x1D411: []rune{0x0072},                         // Additional folding
+	0x1D412: []rune{0x0073},                         // Additional folding
+	0x1D413: []rune{0x0074},                         // Additional folding
+	0x1D414: []rune{0x0075},                         // Additional folding
+	0x1D415: []rune{0x0076},                         // Additional folding
+	0x1D416: []rune{0x0077},                         // Additional folding
+	0x1D417: []rune{0x0078},                         // Additional folding
+	0x1D418: []rune{0x0079},                         // Additional folding
+	0x1D419: []rune{0x007A},                         // Additional folding
+	0x1D434: []rune{0x0061},                         // Additional folding
+	0x1D435: []rune{0x0062},                         // Additional folding
+	0x1D436: []rune{0x0063},                         // Additional folding
+	0x1D437: []rune{0x0064},                         // Additional folding
+	0x1D438: []rune{0x0065},                         // Additional folding
+	0x1D439: []rune{0x0066},                         // Additional folding
+	0x1D43A: []rune{0x0067},                         // Additional folding
+	0x1D43B: []rune{0x0068},                         // Additional folding
+	0x1D43C: []rune{0x0069},                         // Additional folding
+	0x1D43D: []rune{0x006A},                         // Additional folding
+	0x1D43E: []rune{0x006B},                         // Additional folding
+	0x1D43F: []rune{0x006C},                         // Additional folding
+	0x1D440: []rune{0x006D},                         // Additional folding
+	0x1D441: []rune{0x006E},                         // Additional folding
+	0x1D442: []rune{0x006F},                         // Additional folding
+	0x1D443: []rune{0x0070},                         // Additional folding
+	0x1D444: []rune{0x0071},                         // Additional folding
+	0x1D445: []rune{0x0072},                         // Additional folding
+	0x1D446: []rune{0x0073},                         // Additional folding
+	0x1D447: []rune{0x0074},                         // Additional folding
+	0x1D448: []rune{0x0075},                         // Additional folding
+	0x1D449: []rune{0x0076},                         // Additional folding
+	0x1D44A: []rune{0x0077},                         // Additional folding
+	0x1D44B: []rune{0x0078},                         // Additional folding
+	0x1D44C: []rune{0x0079},                         // Additional folding
+	0x1D44D: []rune{0x007A},                         // Additional folding
+	0x1D468: []rune{0x0061},                         // Additional folding
+	0x1D469: []rune{0x0062},                         // Additional folding
+	0x1D46A: []rune{0x0063},                         // Additional folding
+	0x1D46B: []rune{0x0064},                         // Additional folding
+	0x1D46C: []rune{0x0065},                         // Additional folding
+	0x1D46D: []rune{0x0066},                         // Additional folding
+	0x1D46E: []rune{0x0067},                         // Additional folding
+	0x1D46F: []rune{0x0068},                         // Additional folding
+	0x1D470: []rune{0x0069},                         // Additional folding
+	0x1D471: []rune{0x006A},                         // Additional folding
+	0x1D472: []rune{0x006B},                         // Additional folding
+	0x1D473: []rune{0x006C},                         // Additional folding
+	0x1D474: []rune{0x006D},                         // Additional folding
+	0x1D475: []rune{0x006E},                         // Additional folding
+	0x1D476: []rune{0x006F},                         // Additional folding
+	0x1D477: []rune{0x0070},                         // Additional folding
+	0x1D478: []rune{0x0071},                         // Additional folding
+	0x1D479: []rune{0x0072},                         // Additional folding
+	0x1D47A: []rune{0x0073},                         // Additional folding
+	0x1D47B: []rune{0x0074},                         // Additional folding
+	0x1D47C: []rune{0x0075},                         // Additional folding
+	0x1D47D: []rune{0x0076},                         // Additional folding
+	0x1D47E: []rune{0x0077},                         // Additional folding
+	0x1D47F: []rune{0x0078},                         // Additional folding
+	0x1D480: []rune{0x0079},                         // Additional folding
+	0x1D481: []rune{0x007A},                         // Additional folding
+	0x1D49C: []rune{0x0061},                         // Additional folding
+	0x1D49E: []rune{0x0063},                         // Additional folding
+	0x1D49F: []rune{0x0064},                         // Additional folding
+	0x1D4A2: []rune{0x0067},                         // Additional folding
+	0x1D4A5: []rune{0x006A},                         // Additional folding
+	0x1D4A6: []rune{0x006B},                         // Additional folding
+	0x1D4A9: []rune{0x006E},                         // Additional folding
+	0x1D4AA: []rune{0x006F},                         // Additional folding
+	0x1D4AB: []rune{0x0070},                         // Additional folding
+	0x1D4AC: []rune{0x0071},                         // Additional folding
+	0x1D4AE: []rune{0x0073},                         // Additional folding
+	0x1D4AF: []rune{0x0074},                         // Additional folding
+	0x1D4B0: []rune{0x0075},                         // Additional folding
+	0x1D4B1: []rune{0x0076},                         // Additional folding
+	0x1D4B2: []rune{0x0077},                         // Additional folding
+	0x1D4B3: []rune{0x0078},                         // Additional folding
+	0x1D4B4: []rune{0x0079},                         // Additional folding
+	0x1D4B5: []rune{0x007A},                         // Additional folding
+	0x1D4D0: []rune{0x0061},                         // Additional folding
+	0x1D4D1: []rune{0x0062},                         // Additional folding
+	0x1D4D2: []rune{0x0063},                         // Additional folding
+	0x1D4D3: []rune{0x0064},                         // Additional folding
+	0x1D4D4: []rune{0x0065},                         // Additional folding
+	0x1D4D5: []rune{0x0066},                         // Additional folding
+	0x1D4D6: []rune{0x0067},                         // Additional folding
+	0x1D4D7: []rune{0x0068},                         // Additional folding
+	0x1D4D8: []rune{0x0069},                         // Additional folding
+	0x1D4D9: []rune{0x006A},                         // Additional folding
+	0x1D4DA: []rune{0x006B},                         // Additional folding
+	0x1D4DB: []rune{0x006C},                         // Additional folding
+	0x1D4DC: []rune{0x006D},                         // Additional folding
+	0x1D4DD: []rune{0x006E},                         // Additional folding
+	0x1D4DE: []rune{0x006F},                         // Additional folding
+	0x1D4DF: []rune{0x0070},                         // Additional folding
+	0x1D4E0: []rune{0x0071},                         // Additional folding
+	0x1D4E1: []rune{0x0072},                         // Additional folding
+	0x1D4E2: []rune{0x0073},                         // Additional folding
+	0x1D4E3: []rune{0x0074},                         // Additional folding
+	0x1D4E4: []rune{0x0075},                         // Additional folding
+	0x1D4E5: []rune{0x0076},                         // Additional folding
+	0x1D4E6: []rune{0x0077},                         // Additional folding
+	0x1D4E7: []rune{0x0078},                         // Additional folding
+	0x1D4E8: []rune{0x0079},                         // Additional folding
+	0x1D4E9: []rune{0x007A},                         // Additional folding
+	0x1D504: []rune{0x0061},                         // Additional folding
+	0x1D505: []rune{0x0062},                         // Additional folding
+	0x1D507: []rune{0x0064},                         // Additional folding
+	0x1D508: []rune{0x0065},                         // Additional folding
+	0x1D509: []rune{0x0066},                         // Additional folding
+	0x1D50A: []rune{0x0067},                         // Additional folding
+	0x1D50D: []rune{0x006A},                         // Additional folding
+	0x1D50E: []rune{0x006B},                         // Additional folding
+	0x1D50F: []rune{0x006C},                         // Additional folding
+	0x1D510: []rune{0x006D},                         // Additional folding
+	0x1D511: []rune{0x006E},                         // Additional folding
+	0x1D512: []rune{0x006F},                         // Additional folding
+	0x1D513: []rune{0x0070},                         // Additional folding
+	0x1D514: []rune{0x0071},                         // Additional folding
+	0x1D516: []rune{0x0073},                         // Additional folding
+	0x1D517: []rune{0x0074},                         // Additional folding
+	0x1D518: []rune{0x0075},                         // Additional folding
+	0x1D519: []rune{0x0076},                         // Additional folding
+	0x1D51A: []rune{0x0077},                         // Additional folding
+	0x1D51B: []rune{0x0078},                         // Additional folding
+	0x1D51C: []rune{0x0079},                         // Additional folding
+	0x1D538: []rune{0x0061},                         // Additional folding
+	0x1D539: []rune{0x0062},                         // Additional folding
+	0x1D53B: []rune{0x0064},                         // Additional folding
+	0x1D53C: []rune{0x0065},                         // Additional folding
+	0x1D53D: []rune{0x0066},                         // Additional folding
+	0x1D53E: []rune{0x0067},                         // Additional folding
+	0x1D540: []rune{0x0069},                         // Additional folding
+	0x1D541: []rune{0x006A},                         // Additional folding
+	0x1D542: []rune{0x006B},                         // Additional folding
+	0x1D543: []rune{0x006C},                         // Additional folding
+	0x1D544: []rune{0x006D},                         // Additional folding
+	0x1D546: []rune{0x006F},                         // Additional folding
+	0x1D54A: []rune{0x0073},                         // Additional folding
+	0x1D54B: []rune{0x0074},                         // Additional folding
+	0x1D54C: []rune{0x0075},                         // Additional folding
+	0x1D54D: []rune{0x0076},                         // Additional folding
+	0x1D54E: []rune{0x0077},                         // Additional folding
+	0x1D54F: []rune{0x0078},                         // Additional folding
+	0x1D550: []rune{0x0079},                         // Additional folding
+	0x1D56C: []rune{0x0061},                         // Additional folding
+	0x1D56D: []rune{0x0062},                         // Additional folding
+	0x1D56E: []rune{0x0063},                         // Additional folding
+	0x1D56F: []rune{0x0064},                         // Additional folding
+	0x1D570: []rune{0x0065},                         // Additional folding
+	0x1D571: []rune{0x0066},                         // Additional folding
+	0x1D572: []rune{0x0067},                         // Additional folding
+	0x1D573: []rune{0x0068},                         // Additional folding
+	0x1D574: []rune{0x0069},                         // Additional folding
+	0x1D575: []rune{0x006A},                         // Additional folding
+	0x1D576: []rune{0x006B},                         // Additional folding
+	0x1D577: []rune{0x006C},                         // Additional folding
+	0x1D578: []rune{0x006D},                         // Additional folding
+	0x1D579: []rune{0x006E},                         // Additional folding
+	0x1D57A: []rune{0x006F},                         // Additional folding
+	0x1D57B: []rune{0x0070},                         // Additional folding
+	0x1D57C: []rune{0x0071},                         // Additional folding
+	0x1D57D: []rune{0x0072},                         // Additional folding
+	0x1D57E: []rune{0x0073},                         // Additional folding
+	0x1D57F: []rune{0x0074},                         // Additional folding
+	0x1D580: []rune{0x0075},                         // Additional folding
+	0x1D581: []rune{0x0076},                         // Additional folding
+	0x1D582: []rune{0x0077},                         // Additional folding
+	0x1D583: []rune{0x0078},                         // Additional folding
+	0x1D584: []rune{0x0079},                         // Additional folding
+	0x1D585: []rune{0x007A},                         // Additional folding
+	0x1D5A0: []rune{0x0061},                         // Additional folding
+	0x1D5A1: []rune{0x0062},                         // Additional folding
+	0x1D5A2: []rune{0x0063},                         // Additional folding
+	0x1D5A3: []rune{0x0064},                         // Additional folding
+	0x1D5A4: []rune{0x0065},                         // Additional folding
+	0x1D5A5: []rune{0x0066},                         // Additional folding
+	0x1D5A6: []rune{0x0067},                         // Additional folding
+	0x1D5A7: []rune{0x0068},                         // Additional folding
+	0x1D5A8: []rune{0x0069},                         // Additional folding
+	0x1D5A9: []rune{0x006A},                         // Additional folding
+	0x1D5AA: []rune{0x006B},                         // Additional folding
+	0x1D5AB: []rune{0x006C},                         // Additional folding
+	0x1D5AC: []rune{0x006D},                         // Additional folding
+	0x1D5AD: []rune{0x006E},                         // Additional folding
+	0x1D5AE: []rune{0x006F},                         // Additional folding
+	0x1D5AF: []rune{0x0070},                         // Additional folding
+	0x1D5B0: []rune{0x0071},                         // Additional folding
+	0x1D5B1: []rune{0x0072},                         // Additional folding
+	0x1D5B2: []rune{0x0073},                         // Additional folding
+	0x1D5B3: []rune{0x0074},                         // Additional folding
+	0x1D5B4: []rune{0x0075},                         // Additional folding
+	0x1D5B5: []rune{0x0076},                         // Additional folding
+	0x1D5B6: []rune{0x0077},                         // Additional folding
+	0x1D5B7: []rune{0x0078},                         // Additional folding
+	0x1D5B8: []rune{0x0079},                         // Additional folding
+	0x1D5B9: []rune{0x007A},                         // Additional folding
+	0x1D5D4: []rune{0x0061},                         // Additional folding
+	0x1D5D5: []rune{0x0062},                         // Additional folding
+	0x1D5D6: []rune{0x0063},                         // Additional folding
+	0x1D5D7: []rune{0x0064},                         // Additional folding
+	0x1D5D8: []rune{0x0065},                         // Additional folding
+	0x1D5D9: []rune{0x0066},                         // Additional folding
+	0x1D5DA: []rune{0x0067},                         // Additional folding
+	0x1D5DB: []rune{0x0068},                         // Additional folding
+	0x1D5DC: []rune{0x0069},                         // Additional folding
+	0x1D5DD: []rune{0x006A},                         // Additional folding
+	0x1D5DE: []rune{0x006B},                         // Additional folding
+	0x1D5DF: []rune{0x006C},                         // Additional folding
+	0x1D5E0: []rune{0x006D},                         // Additional folding
+	0x1D5E1: []rune{0x006E},                         // Additional folding
+	0x1D5E2: []rune{0x006F},                         // Additional folding
+	0x1D5E3: []rune{0x0070},                         // Additional folding
+	0x1D5E4: []rune{0x0071},                         // Additional folding
+	0x1D5E5: []rune{0x0072},                         // Additional folding
+	0x1D5E6: []rune{0x0073},                         // Additional folding
+	0x1D5E7: []rune{0x0074},                         // Additional folding
+	0x1D5E8: []rune{0x0075},                         // Additional folding
+	0x1D5E9: []rune{0x0076},                         // Additional folding
+	0x1D5EA: []rune{0x0077},                         // Additional folding
+	0x1D5EB: []rune{0x0078},                         // Additional folding
+	0x1D5EC: []rune{0x0079},                         // Additional folding
+	0x1D5ED: []rune{0x007A},                         // Additional folding
+	0x1D608: []rune{0x0061},                         // Additional folding
+	0x1D609: []rune{0x0062},                         // Additional folding
+	0x1D60A: []rune{0x0063},                         // Additional folding
+	0x1D60B: []rune{0x0064},                         // Additional folding
+	0x1D60C: []rune{0x0065},                         // Additional folding
+	0x1D60D: []rune{0x0066},                         // Additional folding
+	0x1D60E: []rune{0x0067},                         // Additional folding
+	0x1D60F: []rune{0x0068},                         // Additional folding
+	0x1D610: []rune{0x0069},                         // Additional folding
+	0x1D611: []rune{0x006A},                         // Additional folding
+	0x1D612: []rune{0x006B},                         // Additional folding
+	0x1D613: []rune{0x006C},                         // Additional folding
+	0x1D614: []rune{0x006D},                         // Additional folding
+	0x1D615: []rune{0x006E},                         // Additional folding
+	0x1D616: []rune{0x006F},                         // Additional folding
+	0x1D617: []rune{0x0070},                         // Additional folding
+	0x1D618: []rune{0x0071},                         // Additional folding
+	0x1D619: []rune{0x0072},                         // Additional folding
+	0x1D61A: []rune{0x0073},                         // Additional folding
+	0x1D61B: []rune{0x0074},                         // Additional folding
+	0x1D61C: []rune{0x0075},                         // Additional folding
+	0x1D61D: []rune{0x0076},                         // Additional folding
+	0x1D61E: []rune{0x0077},                         // Additional folding
+	0x1D61F: []rune{0x0078},                         // Additional folding
+	0x1D620: []rune{0x0079},                         // Additional folding
+	0x1D621: []rune{0x007A},                         // Additional folding
+	0x1D63C: []rune{0x0061},                         // Additional folding
+	0x1D63D: []rune{0x0062},                         // Additional folding
+	0x1D63E: []rune{0x0063},                         // Additional folding
+	0x1D63F: []rune{0x0064},                         // Additional folding
+	0x1D640: []rune{0x0065},                         // Additional folding
+	0x1D641: []rune{0x0066},                         // Additional folding
+	0x1D642: []rune{0x0067},                         // Additional folding
+	0x1D643: []rune{0x0068},                         // Additional folding
+	0x1D644: []rune{0x0069},                         // Additional folding
+	0x1D645: []rune{0x006A},                         // Additional folding
+	0x1D646: []rune{0x006B},                         // Additional folding
+	0x1D647: []rune{0x006C},                         // Additional folding
+	0x1D648: []rune{0x006D},                         // Additional folding
+	0x1D649: []rune{0x006E},                         // Additional folding
+	0x1D64A: []rune{0x006F},                         // Additional folding
+	0x1D64B: []rune{0x0070},                         // Additional folding
+	0x1D64C: []rune{0x0071},                         // Additional folding
+	0x1D64D: []rune{0x0072},                         // Additional folding
+	0x1D64E: []rune{0x0073},                         // Additional folding
+	0x1D64F: []rune{0x0074},                         // Additional folding
+	0x1D650: []rune{0x0075},                         // Additional folding
+	0x1D651: []rune{0x0076},                         // Additional folding
+	0x1D652: []rune{0x0077},                         // Additional folding
+	0x1D653: []rune{0x0078},                         // Additional folding
+	0x1D654: []rune{0x0079},                         // Additional folding
+	0x1D655: []rune{0x007A},                         // Additional folding
+	0x1D670: []rune{0x0061},                         // Additional folding
+	0x1D671: []rune{0x0062},                         // Additional folding
+	0x1D672: []rune{0x0063},                         // Additional folding
+	0x1D673: []rune{0x0064},                         // Additional folding
+	0x1D674: []rune{0x0065},                         // Additional folding
+	0x1D675: []rune{0x0066},                         // Additional folding
+	0x1D676: []rune{0x0067},                         // Additional folding
+	0x1D677: []rune{0x0068},                         // Additional folding
+	0x1D678: []rune{0x0069},                         // Additional folding
+	0x1D679: []rune{0x006A},                         // Additional folding
+	0x1D67A: []rune{0x006B},                         // Additional folding
+	0x1D67B: []rune{0x006C},                         // Additional folding
+	0x1D67C: []rune{0x006D},                         // Additional folding
+	0x1D67D: []rune{0x006E},                         // Additional folding
+	0x1D67E: []rune{0x006F},                         // Additional folding
+	0x1D67F: []rune{0x0070},                         // Additional folding
+	0x1D680: []rune{0x0071},                         // Additional folding
+	0x1D681: []rune{0x0072},                         // Additional folding
+	0x1D682: []rune{0x0073},                         // Additional folding
+	0x1D683: []rune{0x0074},                         // Additional folding
+	0x1D684: []rune{0x0075},                         // Additional folding
+	0x1D685: []rune{0x0076},                         // Additional folding
+	0x1D686: []rune{0x0077},                         // Additional folding
+	0x1D687: []rune{0x0078},                         // Additional folding
+	0x1D688: []rune{0x0079},                         // Additional folding
+	0x1D689: []rune{0x007A},                         // Additional folding
+	0x1D6A8: []rune{0x03B1},                         // Additional folding
+	0x1D6A9: []rune{0x03B2},                         // Additional folding
+	0x1D6AA: []rune{0x03B3},                         // Additional folding
+	0x1D6AB: []rune{0x03B4},                         // Additional folding
+	0x1D6AC: []rune{0x03B5},                         // Additional folding
+	0x1D6AD: []rune{0x03B6},                         // Additional folding
+	0x1D6AE: []rune{0x03B7},                         // Additional folding
+	0x1D6AF: []rune{0x03B8},                         // Additional folding
+	0x1D6B0: []rune{0x03B9},                         // Additional folding
+	0x1D6B1: []rune{0x03BA},                         // Additional folding
+	0x1D6B2: []rune{0x03BB},                         // Additional folding
+	0x1D6B3: []rune{0x03BC},                         // Additional folding
+	0x1D6B4: []rune{0x03BD},                         // Additional folding
+	0x1D6B5: []rune{0x03BE},                         // Additional folding
+	0x1D6B6: []rune{0x03BF},                         // Additional folding
+	0x1D6B7: []rune{0x03C0},                         // Additional folding
+	0x1D6B8: []rune{0x03C1},                         // Additional folding
+	0x1D6B9: []rune{0x03B8},                         // Additional folding
+	0x1D6BA: []rune{0x03C3},                         // Additional folding
+	0x1D6BB: []rune{0x03C4},                         // Additional folding
+	0x1D6BC: []rune{0x03C5},                         // Additional folding
+	0x1D6BD: []rune{0x03C6},                         // Additional folding
+	0x1D6BE: []rune{0x03C7},                         // Additional folding
+	0x1D6BF: []rune{0x03C8},                         // Additional folding
+	0x1D6C0: []rune{0x03C9},                         // Additional folding
+	0x1D6D3: []rune{0x03C3},                         // Additional folding
+	0x1D6E2: []rune{0x03B1},                         // Additional folding
+	0x1D6E3: []rune{0x03B2},                         // Additional folding
+	0x1D6E4: []rune{0x03B3},                         // Additional folding
+	0x1D6E5: []rune{0x03B4},                         // Additional folding
+	0x1D6E6: []rune{0x03B5},                         // Additional folding
+	0x1D6E7: []rune{0x03B6},                         // Additional folding
+	0x1D6E8: []rune{0x03B7},                         // Additional folding
+	0x1D6E9: []rune{0x03B8},                         // Additional folding
+	0x1D6EA: []rune{0x03B9},                         // Additional folding
+	0x1D6EB: []rune{0x03BA},                         // Additional folding
+	0x1D6EC: []rune{0x03BB},                         // Additional folding
+	0x1D6ED: []rune{0x03BC},                         // Additional folding
+	0x1D6EE: []rune{0x03BD},                         // Additional folding
+	0x1D6EF: []rune{0x03BE},                         // Additional folding
+	0x1D6F0: []rune{0x03BF},                         // Additional folding
+	0x1D6F1: []rune{0x03C0},                         // Additional folding
+	0x1D6F2: []rune{0x03C1},                         // Additional folding
+	0x1D6F3: []rune{0x03B8},                         // Additional folding
+	0x1D6F4: []rune{0x03C3},                         // Additional folding
+	0x1D6F5: []rune{0x03C4},                         // Additional folding
+	0x1D6F6: []rune{0x03C5},                         // Additional folding
+	0x1D6F7: []rune{0x03C6},                         // Additional folding
+	0x1D6F8: []rune{0x03C7},                         // Additional folding
+	0x1D6F9: []rune{0x03C8},                         // Additional folding
+	0x1D6FA: []rune{0x03C9},                         // Additional folding
+	0x1D70D: []rune{0x03C3},                         // Additional folding
+	0x1D71C: []rune{0x03B1},                         // Additional folding
+	0x1D71D: []rune{0x03B2},                         // Additional folding
+	0x1D71E: []rune{0x03B3},                         // Additional folding
+	0x1D71F: []rune{0x03B4},                         // Additional folding
+	0x1D720: []rune{0x03B5},                         // Additional folding
+	0x1D721: []rune{0x03B6},                         // Additional folding
+	0x1D722: []rune{0x03B7},                         // Additional folding
+	0x1D723: []rune{0x03B8},                         // Additional folding
+	0x1D724: []rune{0x03B9},                         // Additional folding
+	0x1D725: []rune{0x03BA},                         // Additional folding
+	0x1D726: []rune{0x03BB},                         // Additional folding
+	0x1D727: []rune{0x03BC},                         // Additional folding
+	0x1D728: []rune{0x03BD},                         // Additional folding
+	0x1D729: []rune{0x03BE},                         // Additional folding
+	0x1D72A: []rune{0x03BF},                         // Additional folding
+	0x1D72B: []rune{0x03C0},                         // Additional folding
+	0x1D72C: []rune{0x03C1},                         // Additional folding
+	0x1D72D: []rune{0x03B8},                         // Additional folding
+	0x1D72E: []rune{0x03C3},                         // Additional folding
+	0x1D72F: []rune{0x03C4},                         // Additional folding
+	0x1D730: []rune{0x03C5},                         // Additional folding
+	0x1D731: []rune{0x03C6},                         // Additional folding
+	0x1D732: []rune{0x03C7},                         // Additional folding
+	0x1D733: []rune{0x03C8},                         // Additional folding
+	0x1D734: []rune{0x03C9},                         // Additional folding
+	0x1D747: []rune{0x03C3},                         // Additional folding
+	0x1D756: []rune{0x03B1},                         // Additional folding
+	0x1D757: []rune{0x03B2},                         // Additional folding
+	0x1D758: []rune{0x03B3},                         // Additional folding
+	0x1D759: []rune{0x03B4},                         // Additional folding
+	0x1D75A: []rune{0x03B5},                         // Additional folding
+	0x1D75B: []rune{0x03B6},                         // Additional folding
+	0x1D75C: []rune{0x03B7},                         // Additional folding
+	0x1D75D: []rune{0x03B8},                         // Additional folding
+	0x1D75E: []rune{0x03B9},                         // Additional folding
+	0x1D75F: []rune{0x03BA},                         // Additional folding
+	0x1D760: []rune{0x03BB},                         // Additional folding
+	0x1D761: []rune{0x03BC},                         // Additional folding
+	0x1D762: []rune{0x03BD},                         // Additional folding
+	0x1D763: []rune{0x03BE},                         // Additional folding
+	0x1D764: []rune{0x03BF},                         // Additional folding
+	0x1D765: []rune{0x03C0},                         // Additional folding
+	0x1D766: []rune{0x03C1},                         // Additional folding
+	0x1D767: []rune{0x03B8},                         // Additional folding
+	0x1D768: []rune{0x03C3},                         // Additional folding
+	0x1D769: []rune{0x03C4},                         // Additional folding
+	0x1D76A: []rune{0x03C5},                         // Additional folding
+	0x1D76B: []rune{0x03C6},                         // Additional folding
+	0x1D76C: []rune{0x03C7},                         // Additional folding
+	0x1D76D: []rune{0x03C8},                         // Additional folding
+	0x1D76E: []rune{0x03C9},                         // Additional folding
+	0x1D781: []rune{0x03C3},                         // Additional folding
+	0x1D790: []rune{0x03B1},                         // Additional folding
+	0x1D791: []rune{0x03B2},                         // Additional folding
+	0x1D792: []rune{0x03B3},                         // Additional folding
+	0x1D793: []rune{0x03B4},                         // Additional folding
+	0x1D794: []rune{0x03B5},                         // Additional folding
+	0x1D795: []rune{0x03B6},                         // Additional folding
+	0x1D796: []rune{0x03B7},                         // Additional folding
+	0x1D797: []rune{0x03B8},                         // Additional folding
+	0x1D798: []rune{0x03B9},                         // Additional folding
+	0x1D799: []rune{0x03BA},                         // Additional folding
+	0x1D79A: []rune{0x03BB},                         // Additional folding
+	0x1D79B: []rune{0x03BC},                         // Additional folding
+	0x1D79C: []rune{0x03BD},                         // Additional folding
+	0x1D79D: []rune{0x03BE},                         // Additional folding
+	0x1D79E: []rune{0x03BF},                         // Additional folding
+	0x1D79F: []rune{0x03C0},                         // Additional folding
+	0x1D7A0: []rune{0x03C1},                         // Additional folding
+	0x1D7A1: []rune{0x03B8},                         // Additional folding
+	0x1D7A2: []rune{0x03C3},                         // Additional folding
+	0x1D7A3: []rune{0x03C4},                         // Additional folding
+	0x1D7A4: []rune{0x03C5},                         // Additional folding
+	0x1D7A5: []rune{0x03C6},                         // Additional folding
+	0x1D7A6: []rune{0x03C7},                         // Additional folding
+	0x1D7A7: []rune{0x03C8},                         // Additional folding
+	0x1D7A8: []rune{0x03C9},                         // Additional folding
+	0x1D7BB: []rune{0x03C3},                         // Additional folding
+}
+
+// TableB2 represents RFC-3454 Table B.2.
+var TableB2 Mapping = tableB2
+
+var tableB3 = Mapping{
+	0x0041:  []rune{0x0061},                 // Case map
+	0x0042:  []rune{0x0062},                 // Case map
+	0x0043:  []rune{0x0063},                 // Case map
+	0x0044:  []rune{0x0064},                 // Case map
+	0x0045:  []rune{0x0065},                 // Case map
+	0x0046:  []rune{0x0066},                 // Case map
+	0x0047:  []rune{0x0067},                 // Case map
+	0x0048:  []rune{0x0068},                 // Case map
+	0x0049:  []rune{0x0069},                 // Case map
+	0x004A:  []rune{0x006A},                 // Case map
+	0x004B:  []rune{0x006B},                 // Case map
+	0x004C:  []rune{0x006C},                 // Case map
+	0x004D:  []rune{0x006D},                 // Case map
+	0x004E:  []rune{0x006E},                 // Case map
+	0x004F:  []rune{0x006F},                 // Case map
+	0x0050:  []rune{0x0070},                 // Case map
+	0x0051:  []rune{0x0071},                 // Case map
+	0x0052:  []rune{0x0072},                 // Case map
+	0x0053:  []rune{0x0073},                 // Case map
+	0x0054:  []rune{0x0074},                 // Case map
+	0x0055:  []rune{0x0075},                 // Case map
+	0x0056:  []rune{0x0076},                 // Case map
+	0x0057:  []rune{0x0077},                 // Case map
+	0x0058:  []rune{0x0078},                 // Case map
+	0x0059:  []rune{0x0079},                 // Case map
+	0x005A:  []rune{0x007A},                 // Case map
+	0x00B5:  []rune{0x03BC},                 // Case map
+	0x00C0:  []rune{0x00E0},                 // Case map
+	0x00C1:  []rune{0x00E1},                 // Case map
+	0x00C2:  []rune{0x00E2},                 // Case map
+	0x00C3:  []rune{0x00E3},                 // Case map
+	0x00C4:  []rune{0x00E4},                 // Case map
+	0x00C5:  []rune{0x00E5},                 // Case map
+	0x00C6:  []rune{0x00E6},                 // Case map
+	0x00C7:  []rune{0x00E7},                 // Case map
+	0x00C8:  []rune{0x00E8},                 // Case map
+	0x00C9:  []rune{0x00E9},                 // Case map
+	0x00CA:  []rune{0x00EA},                 // Case map
+	0x00CB:  []rune{0x00EB},                 // Case map
+	0x00CC:  []rune{0x00EC},                 // Case map
+	0x00CD:  []rune{0x00ED},                 // Case map
+	0x00CE:  []rune{0x00EE},                 // Case map
+	0x00CF:  []rune{0x00EF},                 // Case map
+	0x00D0:  []rune{0x00F0},                 // Case map
+	0x00D1:  []rune{0x00F1},                 // Case map
+	0x00D2:  []rune{0x00F2},                 // Case map
+	0x00D3:  []rune{0x00F3},                 // Case map
+	0x00D4:  []rune{0x00F4},                 // Case map
+	0x00D5:  []rune{0x00F5},                 // Case map
+	0x00D6:  []rune{0x00F6},                 // Case map
+	0x00D8:  []rune{0x00F8},                 // Case map
+	0x00D9:  []rune{0x00F9},                 // Case map
+	0x00DA:  []rune{0x00FA},                 // Case map
+	0x00DB:  []rune{0x00FB},                 // Case map
+	0x00DC:  []rune{0x00FC},                 // Case map
+	0x00DD:  []rune{0x00FD},                 // Case map
+	0x00DE:  []rune{0x00FE},                 // Case map
+	0x00DF:  []rune{0x0073, 0x0073},         // Case map
+	0x0100:  []rune{0x0101},                 // Case map
+	0x0102:  []rune{0x0103},                 // Case map
+	0x0104:  []rune{0x0105},                 // Case map
+	0x0106:  []rune{0x0107},                 // Case map
+	0x0108:  []rune{0x0109},                 // Case map
+	0x010A:  []rune{0x010B},                 // Case map
+	0x010C:  []rune{0x010D},                 // Case map
+	0x010E:  []rune{0x010F},                 // Case map
+	0x0110:  []rune{0x0111},                 // Case map
+	0x0112:  []rune{0x0113},                 // Case map
+	0x0114:  []rune{0x0115},                 // Case map
+	0x0116:  []rune{0x0117},                 // Case map
+	0x0118:  []rune{0x0119},                 // Case map
+	0x011A:  []rune{0x011B},                 // Case map
+	0x011C:  []rune{0x011D},                 // Case map
+	0x011E:  []rune{0x011F},                 // Case map
+	0x0120:  []rune{0x0121},                 // Case map
+	0x0122:  []rune{0x0123},                 // Case map
+	0x0124:  []rune{0x0125},                 // Case map
+	0x0126:  []rune{0x0127},                 // Case map
+	0x0128:  []rune{0x0129},                 // Case map
+	0x012A:  []rune{0x012B},                 // Case map
+	0x012C:  []rune{0x012D},                 // Case map
+	0x012E:  []rune{0x012F},                 // Case map
+	0x0130:  []rune{0x0069, 0x0307},         // Case map
+	0x0132:  []rune{0x0133},                 // Case map
+	0x0134:  []rune{0x0135},                 // Case map
+	0x0136:  []rune{0x0137},                 // Case map
+	0x0139:  []rune{0x013A},                 // Case map
+	0x013B:  []rune{0x013C},                 // Case map
+	0x013D:  []rune{0x013E},                 // Case map
+	0x013F:  []rune{0x0140},                 // Case map
+	0x0141:  []rune{0x0142},                 // Case map
+	0x0143:  []rune{0x0144},                 // Case map
+	0x0145:  []rune{0x0146},                 // Case map
+	0x0147:  []rune{0x0148},                 // Case map
+	0x0149:  []rune{0x02BC, 0x006E},         // Case map
+	0x014A:  []rune{0x014B},                 // Case map
+	0x014C:  []rune{0x014D},                 // Case map
+	0x014E:  []rune{0x014F},                 // Case map
+	0x0150:  []rune{0x0151},                 // Case map
+	0x0152:  []rune{0x0153},                 // Case map
+	0x0154:  []rune{0x0155},                 // Case map
+	0x0156:  []rune{0x0157},                 // Case map
+	0x0158:  []rune{0x0159},                 // Case map
+	0x015A:  []rune{0x015B},                 // Case map
+	0x015C:  []rune{0x015D},                 // Case map
+	0x015E:  []rune{0x015F},                 // Case map
+	0x0160:  []rune{0x0161},                 // Case map
+	0x0162:  []rune{0x0163},                 // Case map
+	0x0164:  []rune{0x0165},                 // Case map
+	0x0166:  []rune{0x0167},                 // Case map
+	0x0168:  []rune{0x0169},                 // Case map
+	0x016A:  []rune{0x016B},                 // Case map
+	0x016C:  []rune{0x016D},                 // Case map
+	0x016E:  []rune{0x016F},                 // Case map
+	0x0170:  []rune{0x0171},                 // Case map
+	0x0172:  []rune{0x0173},                 // Case map
+	0x0174:  []rune{0x0175},                 // Case map
+	0x0176:  []rune{0x0177},                 // Case map
+	0x0178:  []rune{0x00FF},                 // Case map
+	0x0179:  []rune{0x017A},                 // Case map
+	0x017B:  []rune{0x017C},                 // Case map
+	0x017D:  []rune{0x017E},                 // Case map
+	0x017F:  []rune{0x0073},                 // Case map
+	0x0181:  []rune{0x0253},                 // Case map
+	0x0182:  []rune{0x0183},                 // Case map
+	0x0184:  []rune{0x0185},                 // Case map
+	0x0186:  []rune{0x0254},                 // Case map
+	0x0187:  []rune{0x0188},                 // Case map
+	0x0189:  []rune{0x0256},                 // Case map
+	0x018A:  []rune{0x0257},                 // Case map
+	0x018B:  []rune{0x018C},                 // Case map
+	0x018E:  []rune{0x01DD},                 // Case map
+	0x018F:  []rune{0x0259},                 // Case map
+	0x0190:  []rune{0x025B},                 // Case map
+	0x0191:  []rune{0x0192},                 // Case map
+	0x0193:  []rune{0x0260},                 // Case map
+	0x0194:  []rune{0x0263},                 // Case map
+	0x0196:  []rune{0x0269},                 // Case map
+	0x0197:  []rune{0x0268},                 // Case map
+	0x0198:  []rune{0x0199},                 // Case map
+	0x019C:  []rune{0x026F},                 // Case map
+	0x019D:  []rune{0x0272},                 // Case map
+	0x019F:  []rune{0x0275},                 // Case map
+	0x01A0:  []rune{0x01A1},                 // Case map
+	0x01A2:  []rune{0x01A3},                 // Case map
+	0x01A4:  []rune{0x01A5},                 // Case map
+	0x01A6:  []rune{0x0280},                 // Case map
+	0x01A7:  []rune{0x01A8},                 // Case map
+	0x01A9:  []rune{0x0283},                 // Case map
+	0x01AC:  []rune{0x01AD},                 // Case map
+	0x01AE:  []rune{0x0288},                 // Case map
+	0x01AF:  []rune{0x01B0},                 // Case map
+	0x01B1:  []rune{0x028A},                 // Case map
+	0x01B2:  []rune{0x028B},                 // Case map
+	0x01B3:  []rune{0x01B4},                 // Case map
+	0x01B5:  []rune{0x01B6},                 // Case map
+	0x01B7:  []rune{0x0292},                 // Case map
+	0x01B8:  []rune{0x01B9},                 // Case map
+	0x01BC:  []rune{0x01BD},                 // Case map
+	0x01C4:  []rune{0x01C6},                 // Case map
+	0x01C5:  []rune{0x01C6},                 // Case map
+	0x01C7:  []rune{0x01C9},                 // Case map
+	0x01C8:  []rune{0x01C9},                 // Case map
+	0x01CA:  []rune{0x01CC},                 // Case map
+	0x01CB:  []rune{0x01CC},                 // Case map
+	0x01CD:  []rune{0x01CE},                 // Case map
+	0x01CF:  []rune{0x01D0},                 // Case map
+	0x01D1:  []rune{0x01D2},                 // Case map
+	0x01D3:  []rune{0x01D4},                 // Case map
+	0x01D5:  []rune{0x01D6},                 // Case map
+	0x01D7:  []rune{0x01D8},                 // Case map
+	0x01D9:  []rune{0x01DA},                 // Case map
+	0x01DB:  []rune{0x01DC},                 // Case map
+	0x01DE:  []rune{0x01DF},                 // Case map
+	0x01E0:  []rune{0x01E1},                 // Case map
+	0x01E2:  []rune{0x01E3},                 // Case map
+	0x01E4:  []rune{0x01E5},                 // Case map
+	0x01E6:  []rune{0x01E7},                 // Case map
+	0x01E8:  []rune{0x01E9},                 // Case map
+	0x01EA:  []rune{0x01EB},                 // Case map
+	0x01EC:  []rune{0x01ED},                 // Case map
+	0x01EE:  []rune{0x01EF},                 // Case map
+	0x01F0:  []rune{0x006A, 0x030C},         // Case map
+	0x01F1:  []rune{0x01F3},                 // Case map
+	0x01F2:  []rune{0x01F3},                 // Case map
+	0x01F4:  []rune{0x01F5},                 // Case map
+	0x01F6:  []rune{0x0195},                 // Case map
+	0x01F7:  []rune{0x01BF},                 // Case map
+	0x01F8:  []rune{0x01F9},                 // Case map
+	0x01FA:  []rune{0x01FB},                 // Case map
+	0x01FC:  []rune{0x01FD},                 // Case map
+	0x01FE:  []rune{0x01FF},                 // Case map
+	0x0200:  []rune{0x0201},                 // Case map
+	0x0202:  []rune{0x0203},                 // Case map
+	0x0204:  []rune{0x0205},                 // Case map
+	0x0206:  []rune{0x0207},                 // Case map
+	0x0208:  []rune{0x0209},                 // Case map
+	0x020A:  []rune{0x020B},                 // Case map
+	0x020C:  []rune{0x020D},                 // Case map
+	0x020E:  []rune{0x020F},                 // Case map
+	0x0210:  []rune{0x0211},                 // Case map
+	0x0212:  []rune{0x0213},                 // Case map
+	0x0214:  []rune{0x0215},                 // Case map
+	0x0216:  []rune{0x0217},                 // Case map
+	0x0218:  []rune{0x0219},                 // Case map
+	0x021A:  []rune{0x021B},                 // Case map
+	0x021C:  []rune{0x021D},                 // Case map
+	0x021E:  []rune{0x021F},                 // Case map
+	0x0220:  []rune{0x019E},                 // Case map
+	0x0222:  []rune{0x0223},                 // Case map
+	0x0224:  []rune{0x0225},                 // Case map
+	0x0226:  []rune{0x0227},                 // Case map
+	0x0228:  []rune{0x0229},                 // Case map
+	0x022A:  []rune{0x022B},                 // Case map
+	0x022C:  []rune{0x022D},                 // Case map
+	0x022E:  []rune{0x022F},                 // Case map
+	0x0230:  []rune{0x0231},                 // Case map
+	0x0232:  []rune{0x0233},                 // Case map
+	0x0345:  []rune{0x03B9},                 // Case map
+	0x0386:  []rune{0x03AC},                 // Case map
+	0x0388:  []rune{0x03AD},                 // Case map
+	0x0389:  []rune{0x03AE},                 // Case map
+	0x038A:  []rune{0x03AF},                 // Case map
+	0x038C:  []rune{0x03CC},                 // Case map
+	0x038E:  []rune{0x03CD},                 // Case map
+	0x038F:  []rune{0x03CE},                 // Case map
+	0x0390:  []rune{0x03B9, 0x0308, 0x0301}, // Case map
+	0x0391:  []rune{0x03B1},                 // Case map
+	0x0392:  []rune{0x03B2},                 // Case map
+	0x0393:  []rune{0x03B3},                 // Case map
+	0x0394:  []rune{0x03B4},                 // Case map
+	0x0395:  []rune{0x03B5},                 // Case map
+	0x0396:  []rune{0x03B6},                 // Case map
+	0x0397:  []rune{0x03B7},                 // Case map
+	0x0398:  []rune{0x03B8},                 // Case map
+	0x0399:  []rune{0x03B9},                 // Case map
+	0x039A:  []rune{0x03BA},                 // Case map
+	0x039B:  []rune{0x03BB},                 // Case map
+	0x039C:  []rune{0x03BC},                 // Case map
+	0x039D:  []rune{0x03BD},                 // Case map
+	0x039E:  []rune{0x03BE},                 // Case map
+	0x039F:  []rune{0x03BF},                 // Case map
+	0x03A0:  []rune{0x03C0},                 // Case map
+	0x03A1:  []rune{0x03C1},                 // Case map
+	0x03A3:  []rune{0x03C3},                 // Case map
+	0x03A4:  []rune{0x03C4},                 // Case map
+	0x03A5:  []rune{0x03C5},                 // Case map
+	0x03A6:  []rune{0x03C6},                 // Case map
+	0x03A7:  []rune{0x03C7},                 // Case map
+	0x03A8:  []rune{0x03C8},                 // Case map
+	0x03A9:  []rune{0x03C9},                 // Case map
+	0x03AA:  []rune{0x03CA},                 // Case map
+	0x03AB:  []rune{0x03CB},                 // Case map
+	0x03B0:  []rune{0x03C5, 0x0308, 0x0301}, // Case map
+	0x03C2:  []rune{0x03C3},                 // Case map
+	0x03D0:  []rune{0x03B2},                 // Case map
+	0x03D1:  []rune{0x03B8},                 // Case map
+	0x03D5:  []rune{0x03C6},                 // Case map
+	0x03D6:  []rune{0x03C0},                 // Case map
+	0x03D8:  []rune{0x03D9},                 // Case map
+	0x03DA:  []rune{0x03DB},                 // Case map
+	0x03DC:  []rune{0x03DD},                 // Case map
+	0x03DE:  []rune{0x03DF},                 // Case map
+	0x03E0:  []rune{0x03E1},                 // Case map
+	0x03E2:  []rune{0x03E3},                 // Case map
+	0x03E4:  []rune{0x03E5},                 // Case map
+	0x03E6:  []rune{0x03E7},                 // Case map
+	0x03E8:  []rune{0x03E9},                 // Case map
+	0x03EA:  []rune{0x03EB},                 // Case map
+	0x03EC:  []rune{0x03ED},                 // Case map
+	0x03EE:  []rune{0x03EF},                 // Case map
+	0x03F0:  []rune{0x03BA},                 // Case map
+	0x03F1:  []rune{0x03C1},                 // Case map
+	0x03F2:  []rune{0x03C3},                 // Case map
+	0x03F4:  []rune{0x03B8},                 // Case map
+	0x03F5:  []rune{0x03B5},                 // Case map
+	0x0400:  []rune{0x0450},                 // Case map
+	0x0401:  []rune{0x0451},                 // Case map
+	0x0402:  []rune{0x0452},                 // Case map
+	0x0403:  []rune{0x0453},                 // Case map
+	0x0404:  []rune{0x0454},                 // Case map
+	0x0405:  []rune{0x0455},                 // Case map
+	0x0406:  []rune{0x0456},                 // Case map
+	0x0407:  []rune{0x0457},                 // Case map
+	0x0408:  []rune{0x0458},                 // Case map
+	0x0409:  []rune{0x0459},                 // Case map
+	0x040A:  []rune{0x045A},                 // Case map
+	0x040B:  []rune{0x045B},                 // Case map
+	0x040C:  []rune{0x045C},                 // Case map
+	0x040D:  []rune{0x045D},                 // Case map
+	0x040E:  []rune{0x045E},                 // Case map
+	0x040F:  []rune{0x045F},                 // Case map
+	0x0410:  []rune{0x0430},                 // Case map
+	0x0411:  []rune{0x0431},                 // Case map
+	0x0412:  []rune{0x0432},                 // Case map
+	0x0413:  []rune{0x0433},                 // Case map
+	0x0414:  []rune{0x0434},                 // Case map
+	0x0415:  []rune{0x0435},                 // Case map
+	0x0416:  []rune{0x0436},                 // Case map
+	0x0417:  []rune{0x0437},                 // Case map
+	0x0418:  []rune{0x0438},                 // Case map
+	0x0419:  []rune{0x0439},                 // Case map
+	0x041A:  []rune{0x043A},                 // Case map
+	0x041B:  []rune{0x043B},                 // Case map
+	0x041C:  []rune{0x043C},                 // Case map
+	0x041D:  []rune{0x043D},                 // Case map
+	0x041E:  []rune{0x043E},                 // Case map
+	0x041F:  []rune{0x043F},                 // Case map
+	0x0420:  []rune{0x0440},                 // Case map
+	0x0421:  []rune{0x0441},                 // Case map
+	0x0422:  []rune{0x0442},                 // Case map
+	0x0423:  []rune{0x0443},                 // Case map
+	0x0424:  []rune{0x0444},                 // Case map
+	0x0425:  []rune{0x0445},                 // Case map
+	0x0426:  []rune{0x0446},                 // Case map
+	0x0427:  []rune{0x0447},                 // Case map
+	0x0428:  []rune{0x0448},                 // Case map
+	0x0429:  []rune{0x0449},                 // Case map
+	0x042A:  []rune{0x044A},                 // Case map
+	0x042B:  []rune{0x044B},                 // Case map
+	0x042C:  []rune{0x044C},                 // Case map
+	0x042D:  []rune{0x044D},                 // Case map
+	0x042E:  []rune{0x044E},                 // Case map
+	0x042F:  []rune{0x044F},                 // Case map
+	0x0460:  []rune{0x0461},                 // Case map
+	0x0462:  []rune{0x0463},                 // Case map
+	0x0464:  []rune{0x0465},                 // Case map
+	0x0466:  []rune{0x0467},                 // Case map
+	0x0468:  []rune{0x0469},                 // Case map
+	0x046A:  []rune{0x046B},                 // Case map
+	0x046C:  []rune{0x046D},                 // Case map
+	0x046E:  []rune{0x046F},                 // Case map
+	0x0470:  []rune{0x0471},                 // Case map
+	0x0472:  []rune{0x0473},                 // Case map
+	0x0474:  []rune{0x0475},                 // Case map
+	0x0476:  []rune{0x0477},                 // Case map
+	0x0478:  []rune{0x0479},                 // Case map
+	0x047A:  []rune{0x047B},                 // Case map
+	0x047C:  []rune{0x047D},                 // Case map
+	0x047E:  []rune{0x047F},                 // Case map
+	0x0480:  []rune{0x0481},                 // Case map
+	0x048A:  []rune{0x048B},                 // Case map
+	0x048C:  []rune{0x048D},                 // Case map
+	0x048E:  []rune{0x048F},                 // Case map
+	0x0490:  []rune{0x0491},                 // Case map
+	0x0492:  []rune{0x0493},                 // Case map
+	0x0494:  []rune{0x0495},                 // Case map
+	0x0496:  []rune{0x0497},                 // Case map
+	0x0498:  []rune{0x0499},                 // Case map
+	0x049A:  []rune{0x049B},                 // Case map
+	0x049C:  []rune{0x049D},                 // Case map
+	0x049E:  []rune{0x049F},                 // Case map
+	0x04A0:  []rune{0x04A1},                 // Case map
+	0x04A2:  []rune{0x04A3},                 // Case map
+	0x04A4:  []rune{0x04A5},                 // Case map
+	0x04A6:  []rune{0x04A7},                 // Case map
+	0x04A8:  []rune{0x04A9},                 // Case map
+	0x04AA:  []rune{0x04AB},                 // Case map
+	0x04AC:  []rune{0x04AD},                 // Case map
+	0x04AE:  []rune{0x04AF},                 // Case map
+	0x04B0:  []rune{0x04B1},                 // Case map
+	0x04B2:  []rune{0x04B3},                 // Case map
+	0x04B4:  []rune{0x04B5},                 // Case map
+	0x04B6:  []rune{0x04B7},                 // Case map
+	0x04B8:  []rune{0x04B9},                 // Case map
+	0x04BA:  []rune{0x04BB},                 // Case map
+	0x04BC:  []rune{0x04BD},                 // Case map
+	0x04BE:  []rune{0x04BF},                 // Case map
+	0x04C1:  []rune{0x04C2},                 // Case map
+	0x04C3:  []rune{0x04C4},                 // Case map
+	0x04C5:  []rune{0x04C6},                 // Case map
+	0x04C7:  []rune{0x04C8},                 // Case map
+	0x04C9:  []rune{0x04CA},                 // Case map
+	0x04CB:  []rune{0x04CC},                 // Case map
+	0x04CD:  []rune{0x04CE},                 // Case map
+	0x04D0:  []rune{0x04D1},                 // Case map
+	0x04D2:  []rune{0x04D3},                 // Case map
+	0x04D4:  []rune{0x04D5},                 // Case map
+	0x04D6:  []rune{0x04D7},                 // Case map
+	0x04D8:  []rune{0x04D9},                 // Case map
+	0x04DA:  []rune{0x04DB},                 // Case map
+	0x04DC:  []rune{0x04DD},                 // Case map
+	0x04DE:  []rune{0x04DF},                 // Case map
+	0x04E0:  []rune{0x04E1},                 // Case map
+	0x04E2:  []rune{0x04E3},                 // Case map
+	0x04E4:  []rune{0x04E5},                 // Case map
+	0x04E6:  []rune{0x04E7},                 // Case map
+	0x04E8:  []rune{0x04E9},                 // Case map
+	0x04EA:  []rune{0x04EB},                 // Case map
+	0x04EC:  []rune{0x04ED},                 // Case map
+	0x04EE:  []rune{0x04EF},                 // Case map
+	0x04F0:  []rune{0x04F1},                 // Case map
+	0x04F2:  []rune{0x04F3},                 // Case map
+	0x04F4:  []rune{0x04F5},                 // Case map
+	0x04F8:  []rune{0x04F9},                 // Case map
+	0x0500:  []rune{0x0501},                 // Case map
+	0x0502:  []rune{0x0503},                 // Case map
+	0x0504:  []rune{0x0505},                 // Case map
+	0x0506:  []rune{0x0507},                 // Case map
+	0x0508:  []rune{0x0509},                 // Case map
+	0x050A:  []rune{0x050B},                 // Case map
+	0x050C:  []rune{0x050D},                 // Case map
+	0x050E:  []rune{0x050F},                 // Case map
+	0x0531:  []rune{0x0561},                 // Case map
+	0x0532:  []rune{0x0562},                 // Case map
+	0x0533:  []rune{0x0563},                 // Case map
+	0x0534:  []rune{0x0564},                 // Case map
+	0x0535:  []rune{0x0565},                 // Case map
+	0x0536:  []rune{0x0566},                 // Case map
+	0x0537:  []rune{0x0567},                 // Case map
+	0x0538:  []rune{0x0568},                 // Case map
+	0x0539:  []rune{0x0569},                 // Case map
+	0x053A:  []rune{0x056A},                 // Case map
+	0x053B:  []rune{0x056B},                 // Case map
+	0x053C:  []rune{0x056C},                 // Case map
+	0x053D:  []rune{0x056D},                 // Case map
+	0x053E:  []rune{0x056E},                 // Case map
+	0x053F:  []rune{0x056F},                 // Case map
+	0x0540:  []rune{0x0570},                 // Case map
+	0x0541:  []rune{0x0571},                 // Case map
+	0x0542:  []rune{0x0572},                 // Case map
+	0x0543:  []rune{0x0573},                 // Case map
+	0x0544:  []rune{0x0574},                 // Case map
+	0x0545:  []rune{0x0575},                 // Case map
+	0x0546:  []rune{0x0576},                 // Case map
+	0x0547:  []rune{0x0577},                 // Case map
+	0x0548:  []rune{0x0578},                 // Case map
+	0x0549:  []rune{0x0579},                 // Case map
+	0x054A:  []rune{0x057A},                 // Case map
+	0x054B:  []rune{0x057B},                 // Case map
+	0x054C:  []rune{0x057C},                 // Case map
+	0x054D:  []rune{0x057D},                 // Case map
+	0x054E:  []rune{0x057E},                 // Case map
+	0x054F:  []rune{0x057F},                 // Case map
+	0x0550:  []rune{0x0580},                 // Case map
+	0x0551:  []rune{0x0581},                 // Case map
+	0x0552:  []rune{0x0582},                 // Case map
+	0x0553:  []rune{0x0583},                 // Case map
+	0x0554:  []rune{0x0584},                 // Case map
+	0x0555:  []rune{0x0585},                 // Case map
+	0x0556:  []rune{0x0586},                 // Case map
+	0x0587:  []rune{0x0565, 0x0582},         // Case map
+	0x1E00:  []rune{0x1E01},                 // Case map
+	0x1E02:  []rune{0x1E03},                 // Case map
+	0x1E04:  []rune{0x1E05},                 // Case map
+	0x1E06:  []rune{0x1E07},                 // Case map
+	0x1E08:  []rune{0x1E09},                 // Case map
+	0x1E0A:  []rune{0x1E0B},                 // Case map
+	0x1E0C:  []rune{0x1E0D},                 // Case map
+	0x1E0E:  []rune{0x1E0F},                 // Case map
+	0x1E10:  []rune{0x1E11},                 // Case map
+	0x1E12:  []rune{0x1E13},                 // Case map
+	0x1E14:  []rune{0x1E15},                 // Case map
+	0x1E16:  []rune{0x1E17},                 // Case map
+	0x1E18:  []rune{0x1E19},                 // Case map
+	0x1E1A:  []rune{0x1E1B},                 // Case map
+	0x1E1C:  []rune{0x1E1D},                 // Case map
+	0x1E1E:  []rune{0x1E1F},                 // Case map
+	0x1E20:  []rune{0x1E21},                 // Case map
+	0x1E22:  []rune{0x1E23},                 // Case map
+	0x1E24:  []rune{0x1E25},                 // Case map
+	0x1E26:  []rune{0x1E27},                 // Case map
+	0x1E28:  []rune{0x1E29},                 // Case map
+	0x1E2A:  []rune{0x1E2B},                 // Case map
+	0x1E2C:  []rune{0x1E2D},                 // Case map
+	0x1E2E:  []rune{0x1E2F},                 // Case map
+	0x1E30:  []rune{0x1E31},                 // Case map
+	0x1E32:  []rune{0x1E33},                 // Case map
+	0x1E34:  []rune{0x1E35},                 // Case map
+	0x1E36:  []rune{0x1E37},                 // Case map
+	0x1E38:  []rune{0x1E39},                 // Case map
+	0x1E3A:  []rune{0x1E3B},                 // Case map
+	0x1E3C:  []rune{0x1E3D},                 // Case map
+	0x1E3E:  []rune{0x1E3F},                 // Case map
+	0x1E40:  []rune{0x1E41},                 // Case map
+	0x1E42:  []rune{0x1E43},                 // Case map
+	0x1E44:  []rune{0x1E45},                 // Case map
+	0x1E46:  []rune{0x1E47},                 // Case map
+	0x1E48:  []rune{0x1E49},                 // Case map
+	0x1E4A:  []rune{0x1E4B},                 // Case map
+	0x1E4C:  []rune{0x1E4D},                 // Case map
+	0x1E4E:  []rune{0x1E4F},                 // Case map
+	0x1E50:  []rune{0x1E51},                 // Case map
+	0x1E52:  []rune{0x1E53},                 // Case map
+	0x1E54:  []rune{0x1E55},                 // Case map
+	0x1E56:  []rune{0x1E57},                 // Case map
+	0x1E58:  []rune{0x1E59},                 // Case map
+	0x1E5A:  []rune{0x1E5B},                 // Case map
+	0x1E5C:  []rune{0x1E5D},                 // Case map
+	0x1E5E:  []rune{0x1E5F},                 // Case map
+	0x1E60:  []rune{0x1E61},                 // Case map
+	0x1E62:  []rune{0x1E63},                 // Case map
+	0x1E64:  []rune{0x1E65},                 // Case map
+	0x1E66:  []rune{0x1E67},                 // Case map
+	0x1E68:  []rune{0x1E69},                 // Case map
+	0x1E6A:  []rune{0x1E6B},                 // Case map
+	0x1E6C:  []rune{0x1E6D},                 // Case map
+	0x1E6E:  []rune{0x1E6F},                 // Case map
+	0x1E70:  []rune{0x1E71},                 // Case map
+	0x1E72:  []rune{0x1E73},                 // Case map
+	0x1E74:  []rune{0x1E75},                 // Case map
+	0x1E76:  []rune{0x1E77},                 // Case map
+	0x1E78:  []rune{0x1E79},                 // Case map
+	0x1E7A:  []rune{0x1E7B},                 // Case map
+	0x1E7C:  []rune{0x1E7D},                 // Case map
+	0x1E7E:  []rune{0x1E7F},                 // Case map
+	0x1E80:  []rune{0x1E81},                 // Case map
+	0x1E82:  []rune{0x1E83},                 // Case map
+	0x1E84:  []rune{0x1E85},                 // Case map
+	0x1E86:  []rune{0x1E87},                 // Case map
+	0x1E88:  []rune{0x1E89},                 // Case map
+	0x1E8A:  []rune{0x1E8B},                 // Case map
+	0x1E8C:  []rune{0x1E8D},                 // Case map
+	0x1E8E:  []rune{0x1E8F},                 // Case map
+	0x1E90:  []rune{0x1E91},                 // Case map
+	0x1E92:  []rune{0x1E93},                 // Case map
+	0x1E94:  []rune{0x1E95},                 // Case map
+	0x1E96:  []rune{0x0068, 0x0331},         // Case map
+	0x1E97:  []rune{0x0074, 0x0308},         // Case map
+	0x1E98:  []rune{0x0077, 0x030A},         // Case map
+	0x1E99:  []rune{0x0079, 0x030A},         // Case map
+	0x1E9A:  []rune{0x0061, 0x02BE},         // Case map
+	0x1E9B:  []rune{0x1E61},                 // Case map
+	0x1EA0:  []rune{0x1EA1},                 // Case map
+	0x1EA2:  []rune{0x1EA3},                 // Case map
+	0x1EA4:  []rune{0x1EA5},                 // Case map
+	0x1EA6:  []rune{0x1EA7},                 // Case map
+	0x1EA8:  []rune{0x1EA9},                 // Case map
+	0x1EAA:  []rune{0x1EAB},                 // Case map
+	0x1EAC:  []rune{0x1EAD},                 // Case map
+	0x1EAE:  []rune{0x1EAF},                 // Case map
+	0x1EB0:  []rune{0x1EB1},                 // Case map
+	0x1EB2:  []rune{0x1EB3},                 // Case map
+	0x1EB4:  []rune{0x1EB5},                 // Case map
+	0x1EB6:  []rune{0x1EB7},                 // Case map
+	0x1EB8:  []rune{0x1EB9},                 // Case map
+	0x1EBA:  []rune{0x1EBB},                 // Case map
+	0x1EBC:  []rune{0x1EBD},                 // Case map
+	0x1EBE:  []rune{0x1EBF},                 // Case map
+	0x1EC0:  []rune{0x1EC1},                 // Case map
+	0x1EC2:  []rune{0x1EC3},                 // Case map
+	0x1EC4:  []rune{0x1EC5},                 // Case map
+	0x1EC6:  []rune{0x1EC7},                 // Case map
+	0x1EC8:  []rune{0x1EC9},                 // Case map
+	0x1ECA:  []rune{0x1ECB},                 // Case map
+	0x1ECC:  []rune{0x1ECD},                 // Case map
+	0x1ECE:  []rune{0x1ECF},                 // Case map
+	0x1ED0:  []rune{0x1ED1},                 // Case map
+	0x1ED2:  []rune{0x1ED3},                 // Case map
+	0x1ED4:  []rune{0x1ED5},                 // Case map
+	0x1ED6:  []rune{0x1ED7},                 // Case map
+	0x1ED8:  []rune{0x1ED9},                 // Case map
+	0x1EDA:  []rune{0x1EDB},                 // Case map
+	0x1EDC:  []rune{0x1EDD},                 // Case map
+	0x1EDE:  []rune{0x1EDF},                 // Case map
+	0x1EE0:  []rune{0x1EE1},                 // Case map
+	0x1EE2:  []rune{0x1EE3},                 // Case map
+	0x1EE4:  []rune{0x1EE5},                 // Case map
+	0x1EE6:  []rune{0x1EE7},                 // Case map
+	0x1EE8:  []rune{0x1EE9},                 // Case map
+	0x1EEA:  []rune{0x1EEB},                 // Case map
+	0x1EEC:  []rune{0x1EED},                 // Case map
+	0x1EEE:  []rune{0x1EEF},                 // Case map
+	0x1EF0:  []rune{0x1EF1},                 // Case map
+	0x1EF2:  []rune{0x1EF3},                 // Case map
+	0x1EF4:  []rune{0x1EF5},                 // Case map
+	0x1EF6:  []rune{0x1EF7},                 // Case map
+	0x1EF8:  []rune{0x1EF9},                 // Case map
+	0x1F08:  []rune{0x1F00},                 // Case map
+	0x1F09:  []rune{0x1F01},                 // Case map
+	0x1F0A:  []rune{0x1F02},                 // Case map
+	0x1F0B:  []rune{0x1F03},                 // Case map
+	0x1F0C:  []rune{0x1F04},                 // Case map
+	0x1F0D:  []rune{0x1F05},                 // Case map
+	0x1F0E:  []rune{0x1F06},                 // Case map
+	0x1F0F:  []rune{0x1F07},                 // Case map
+	0x1F18:  []rune{0x1F10},                 // Case map
+	0x1F19:  []rune{0x1F11},                 // Case map
+	0x1F1A:  []rune{0x1F12},                 // Case map
+	0x1F1B:  []rune{0x1F13},                 // Case map
+	0x1F1C:  []rune{0x1F14},                 // Case map
+	0x1F1D:  []rune{0x1F15},                 // Case map
+	0x1F28:  []rune{0x1F20},                 // Case map
+	0x1F29:  []rune{0x1F21},                 // Case map
+	0x1F2A:  []rune{0x1F22},                 // Case map
+	0x1F2B:  []rune{0x1F23},                 // Case map
+	0x1F2C:  []rune{0x1F24},                 // Case map
+	0x1F2D:  []rune{0x1F25},                 // Case map
+	0x1F2E:  []rune{0x1F26},                 // Case map
+	0x1F2F:  []rune{0x1F27},                 // Case map
+	0x1F38:  []rune{0x1F30},                 // Case map
+	0x1F39:  []rune{0x1F31},                 // Case map
+	0x1F3A:  []rune{0x1F32},                 // Case map
+	0x1F3B:  []rune{0x1F33},                 // Case map
+	0x1F3C:  []rune{0x1F34},                 // Case map
+	0x1F3D:  []rune{0x1F35},                 // Case map
+	0x1F3E:  []rune{0x1F36},                 // Case map
+	0x1F3F:  []rune{0x1F37},                 // Case map
+	0x1F48:  []rune{0x1F40},                 // Case map
+	0x1F49:  []rune{0x1F41},                 // Case map
+	0x1F4A:  []rune{0x1F42},                 // Case map
+	0x1F4B:  []rune{0x1F43},                 // Case map
+	0x1F4C:  []rune{0x1F44},                 // Case map
+	0x1F4D:  []rune{0x1F45},                 // Case map
+	0x1F50:  []rune{0x03C5, 0x0313},         // Case map
+	0x1F52:  []rune{0x03C5, 0x0313, 0x0300}, // Case map
+	0x1F54:  []rune{0x03C5, 0x0313, 0x0301}, // Case map
+	0x1F56:  []rune{0x03C5, 0x0313, 0x0342}, // Case map
+	0x1F59:  []rune{0x1F51},                 // Case map
+	0x1F5B:  []rune{0x1F53},                 // Case map
+	0x1F5D:  []rune{0x1F55},                 // Case map
+	0x1F5F:  []rune{0x1F57},                 // Case map
+	0x1F68:  []rune{0x1F60},                 // Case map
+	0x1F69:  []rune{0x1F61},                 // Case map
+	0x1F6A:  []rune{0x1F62},                 // Case map
+	0x1F6B:  []rune{0x1F63},                 // Case map
+	0x1F6C:  []rune{0x1F64},                 // Case map
+	0x1F6D:  []rune{0x1F65},                 // Case map
+	0x1F6E:  []rune{0x1F66},                 // Case map
+	0x1F6F:  []rune{0x1F67},                 // Case map
+	0x1F80:  []rune{0x1F00, 0x03B9},         // Case map
+	0x1F81:  []rune{0x1F01, 0x03B9},         // Case map
+	0x1F82:  []rune{0x1F02, 0x03B9},         // Case map
+	0x1F83:  []rune{0x1F03, 0x03B9},         // Case map
+	0x1F84:  []rune{0x1F04, 0x03B9},         // Case map
+	0x1F85:  []rune{0x1F05, 0x03B9},         // Case map
+	0x1F86:  []rune{0x1F06, 0x03B9},         // Case map
+	0x1F87:  []rune{0x1F07, 0x03B9},         // Case map
+	0x1F88:  []rune{0x1F00, 0x03B9},         // Case map
+	0x1F89:  []rune{0x1F01, 0x03B9},         // Case map
+	0x1F8A:  []rune{0x1F02, 0x03B9},         // Case map
+	0x1F8B:  []rune{0x1F03, 0x03B9},         // Case map
+	0x1F8C:  []rune{0x1F04, 0x03B9},         // Case map
+	0x1F8D:  []rune{0x1F05, 0x03B9},         // Case map
+	0x1F8E:  []rune{0x1F06, 0x03B9},         // Case map
+	0x1F8F:  []rune{0x1F07, 0x03B9},         // Case map
+	0x1F90:  []rune{0x1F20, 0x03B9},         // Case map
+	0x1F91:  []rune{0x1F21, 0x03B9},         // Case map
+	0x1F92:  []rune{0x1F22, 0x03B9},         // Case map
+	0x1F93:  []rune{0x1F23, 0x03B9},         // Case map
+	0x1F94:  []rune{0x1F24, 0x03B9},         // Case map
+	0x1F95:  []rune{0x1F25, 0x03B9},         // Case map
+	0x1F96:  []rune{0x1F26, 0x03B9},         // Case map
+	0x1F97:  []rune{0x1F27, 0x03B9},         // Case map
+	0x1F98:  []rune{0x1F20, 0x03B9},         // Case map
+	0x1F99:  []rune{0x1F21, 0x03B9},         // Case map
+	0x1F9A:  []rune{0x1F22, 0x03B9},         // Case map
+	0x1F9B:  []rune{0x1F23, 0x03B9},         // Case map
+	0x1F9C:  []rune{0x1F24, 0x03B9},         // Case map
+	0x1F9D:  []rune{0x1F25, 0x03B9},         // Case map
+	0x1F9E:  []rune{0x1F26, 0x03B9},         // Case map
+	0x1F9F:  []rune{0x1F27, 0x03B9},         // Case map
+	0x1FA0:  []rune{0x1F60, 0x03B9},         // Case map
+	0x1FA1:  []rune{0x1F61, 0x03B9},         // Case map
+	0x1FA2:  []rune{0x1F62, 0x03B9},         // Case map
+	0x1FA3:  []rune{0x1F63, 0x03B9},         // Case map
+	0x1FA4:  []rune{0x1F64, 0x03B9},         // Case map
+	0x1FA5:  []rune{0x1F65, 0x03B9},         // Case map
+	0x1FA6:  []rune{0x1F66, 0x03B9},         // Case map
+	0x1FA7:  []rune{0x1F67, 0x03B9},         // Case map
+	0x1FA8:  []rune{0x1F60, 0x03B9},         // Case map
+	0x1FA9:  []rune{0x1F61, 0x03B9},         // Case map
+	0x1FAA:  []rune{0x1F62, 0x03B9},         // Case map
+	0x1FAB:  []rune{0x1F63, 0x03B9},         // Case map
+	0x1FAC:  []rune{0x1F64, 0x03B9},         // Case map
+	0x1FAD:  []rune{0x1F65, 0x03B9},         // Case map
+	0x1FAE:  []rune{0x1F66, 0x03B9},         // Case map
+	0x1FAF:  []rune{0x1F67, 0x03B9},         // Case map
+	0x1FB2:  []rune{0x1F70, 0x03B9},         // Case map
+	0x1FB3:  []rune{0x03B1, 0x03B9},         // Case map
+	0x1FB4:  []rune{0x03AC, 0x03B9},         // Case map
+	0x1FB6:  []rune{0x03B1, 0x0342},         // Case map
+	0x1FB7:  []rune{0x03B1, 0x0342, 0x03B9}, // Case map
+	0x1FB8:  []rune{0x1FB0},                 // Case map
+	0x1FB9:  []rune{0x1FB1},                 // Case map
+	0x1FBA:  []rune{0x1F70},                 // Case map
+	0x1FBB:  []rune{0x1F71},                 // Case map
+	0x1FBC:  []rune{0x03B1, 0x03B9},         // Case map
+	0x1FBE:  []rune{0x03B9},                 // Case map
+	0x1FC2:  []rune{0x1F74, 0x03B9},         // Case map
+	0x1FC3:  []rune{0x03B7, 0x03B9},         // Case map
+	0x1FC4:  []rune{0x03AE, 0x03B9},         // Case map
+	0x1FC6:  []rune{0x03B7, 0x0342},         // Case map
+	0x1FC7:  []rune{0x03B7, 0x0342, 0x03B9}, // Case map
+	0x1FC8:  []rune{0x1F72},                 // Case map
+	0x1FC9:  []rune{0x1F73},                 // Case map
+	0x1FCA:  []rune{0x1F74},                 // Case map
+	0x1FCB:  []rune{0x1F75},                 // Case map
+	0x1FCC:  []rune{0x03B7, 0x03B9},         // Case map
+	0x1FD2:  []rune{0x03B9, 0x0308, 0x0300}, // Case map
+	0x1FD3:  []rune{0x03B9, 0x0308, 0x0301}, // Case map
+	0x1FD6:  []rune{0x03B9, 0x0342},         // Case map
+	0x1FD7:  []rune{0x03B9, 0x0308, 0x0342}, // Case map
+	0x1FD8:  []rune{0x1FD0},                 // Case map
+	0x1FD9:  []rune{0x1FD1},                 // Case map
+	0x1FDA:  []rune{0x1F76},                 // Case map
+	0x1FDB:  []rune{0x1F77},                 // Case map
+	0x1FE2:  []rune{0x03C5, 0x0308, 0x0300}, // Case map
+	0x1FE3:  []rune{0x03C5, 0x0308, 0x0301}, // Case map
+	0x1FE4:  []rune{0x03C1, 0x0313},         // Case map
+	0x1FE6:  []rune{0x03C5, 0x0342},         // Case map
+	0x1FE7:  []rune{0x03C5, 0x0308, 0x0342}, // Case map
+	0x1FE8:  []rune{0x1FE0},                 // Case map
+	0x1FE9:  []rune{0x1FE1},                 // Case map
+	0x1FEA:  []rune{0x1F7A},                 // Case map
+	0x1FEB:  []rune{0x1F7B},                 // Case map
+	0x1FEC:  []rune{0x1FE5},                 // Case map
+	0x1FF2:  []rune{0x1F7C, 0x03B9},         // Case map
+	0x1FF3:  []rune{0x03C9, 0x03B9},         // Case map
+	0x1FF4:  []rune{0x03CE, 0x03B9},         // Case map
+	0x1FF6:  []rune{0x03C9, 0x0342},         // Case map
+	0x1FF7:  []rune{0x03C9, 0x0342, 0x03B9}, // Case map
+	0x1FF8:  []rune{0x1F78},                 // Case map
+	0x1FF9:  []rune{0x1F79},                 // Case map
+	0x1FFA:  []rune{0x1F7C},                 // Case map
+	0x1FFB:  []rune{0x1F7D},                 // Case map
+	0x1FFC:  []rune{0x03C9, 0x03B9},         // Case map
+	0x2126:  []rune{0x03C9},                 // Case map
+	0x212A:  []rune{0x006B},                 // Case map
+	0x212B:  []rune{0x00E5},                 // Case map
+	0x2160:  []rune{0x2170},                 // Case map
+	0x2161:  []rune{0x2171},                 // Case map
+	0x2162:  []rune{0x2172},                 // Case map
+	0x2163:  []rune{0x2173},                 // Case map
+	0x2164:  []rune{0x2174},                 // Case map
+	0x2165:  []rune{0x2175},                 // Case map
+	0x2166:  []rune{0x2176},                 // Case map
+	0x2167:  []rune{0x2177},                 // Case map
+	0x2168:  []rune{0x2178},                 // Case map
+	0x2169:  []rune{0x2179},                 // Case map
+	0x216A:  []rune{0x217A},                 // Case map
+	0x216B:  []rune{0x217B},                 // Case map
+	0x216C:  []rune{0x217C},                 // Case map
+	0x216D:  []rune{0x217D},                 // Case map
+	0x216E:  []rune{0x217E},                 // Case map
+	0x216F:  []rune{0x217F},                 // Case map
+	0x24B6:  []rune{0x24D0},                 // Case map
+	0x24B7:  []rune{0x24D1},                 // Case map
+	0x24B8:  []rune{0x24D2},                 // Case map
+	0x24B9:  []rune{0x24D3},                 // Case map
+	0x24BA:  []rune{0x24D4},                 // Case map
+	0x24BB:  []rune{0x24D5},                 // Case map
+	0x24BC:  []rune{0x24D6},                 // Case map
+	0x24BD:  []rune{0x24D7},                 // Case map
+	0x24BE:  []rune{0x24D8},                 // Case map
+	0x24BF:  []rune{0x24D9},                 // Case map
+	0x24C0:  []rune{0x24DA},                 // Case map
+	0x24C1:  []rune{0x24DB},                 // Case map
+	0x24C2:  []rune{0x24DC},                 // Case map
+	0x24C3:  []rune{0x24DD},                 // Case map
+	0x24C4:  []rune{0x24DE},                 // Case map
+	0x24C5:  []rune{0x24DF},                 // Case map
+	0x24C6:  []rune{0x24E0},                 // Case map
+	0x24C7:  []rune{0x24E1},                 // Case map
+	0x24C8:  []rune{0x24E2},                 // Case map
+	0x24C9:  []rune{0x24E3},                 // Case map
+	0x24CA:  []rune{0x24E4},                 // Case map
+	0x24CB:  []rune{0x24E5},                 // Case map
+	0x24CC:  []rune{0x24E6},                 // Case map
+	0x24CD:  []rune{0x24E7},                 // Case map
+	0x24CE:  []rune{0x24E8},                 // Case map
+	0x24CF:  []rune{0x24E9},                 // Case map
+	0xFB00:  []rune{0x0066, 0x0066},         // Case map
+	0xFB01:  []rune{0x0066, 0x0069},         // Case map
+	0xFB02:  []rune{0x0066, 0x006C},         // Case map
+	0xFB03:  []rune{0x0066, 0x0066, 0x0069}, // Case map
+	0xFB04:  []rune{0x0066, 0x0066, 0x006C}, // Case map
+	0xFB05:  []rune{0x0073, 0x0074},         // Case map
+	0xFB06:  []rune{0x0073, 0x0074},         // Case map
+	0xFB13:  []rune{0x0574, 0x0576},         // Case map
+	0xFB14:  []rune{0x0574, 0x0565},         // Case map
+	0xFB15:  []rune{0x0574, 0x056B},         // Case map
+	0xFB16:  []rune{0x057E, 0x0576},         // Case map
+	0xFB17:  []rune{0x0574, 0x056D},         // Case map
+	0xFF21:  []rune{0xFF41},                 // Case map
+	0xFF22:  []rune{0xFF42},                 // Case map
+	0xFF23:  []rune{0xFF43},                 // Case map
+	0xFF24:  []rune{0xFF44},                 // Case map
+	0xFF25:  []rune{0xFF45},                 // Case map
+	0xFF26:  []rune{0xFF46},                 // Case map
+	0xFF27:  []rune{0xFF47},                 // Case map
+	0xFF28:  []rune{0xFF48},                 // Case map
+	0xFF29:  []rune{0xFF49},                 // Case map
+	0xFF2A:  []rune{0xFF4A},                 // Case map
+	0xFF2B:  []rune{0xFF4B},                 // Case map
+	0xFF2C:  []rune{0xFF4C},                 // Case map
+	0xFF2D:  []rune{0xFF4D},                 // Case map
+	0xFF2E:  []rune{0xFF4E},                 // Case map
+	0xFF2F:  []rune{0xFF4F},                 // Case map
+	0xFF30:  []rune{0xFF50},                 // Case map
+	0xFF31:  []rune{0xFF51},                 // Case map
+	0xFF32:  []rune{0xFF52},                 // Case map
+	0xFF33:  []rune{0xFF53},                 // Case map
+	0xFF34:  []rune{0xFF54},                 // Case map
+	0xFF35:  []rune{0xFF55},                 // Case map
+	0xFF36:  []rune{0xFF56},                 // Case map
+	0xFF37:  []rune{0xFF57},                 // Case map
+	0xFF38:  []rune{0xFF58},                 // Case map
+	0xFF39:  []rune{0xFF59},                 // Case map
+	0xFF3A:  []rune{0xFF5A},                 // Case map
+	0x10400: []rune{0x10428},                // Case map
+	0x10401: []rune{0x10429},                // Case map
+	0x10402: []rune{0x1042A},                // Case map
+	0x10403: []rune{0x1042B},                // Case map
+	0x10404: []rune{0x1042C},                // Case map
+	0x10405: []rune{0x1042D},                // Case map
+	0x10406: []rune{0x1042E},                // Case map
+	0x10407: []rune{0x1042F},                // Case map
+	0x10408: []rune{0x10430},                // Case map
+	0x10409: []rune{0x10431},                // Case map
+	0x1040A: []rune{0x10432},                // Case map
+	0x1040B: []rune{0x10433},                // Case map
+	0x1040C: []rune{0x10434},                // Case map
+	0x1040D: []rune{0x10435},                // Case map
+	0x1040E: []rune{0x10436},                // Case map
+	0x1040F: []rune{0x10437},                // Case map
+	0x10410: []rune{0x10438},                // Case map
+	0x10411: []rune{0x10439},                // Case map
+	0x10412: []rune{0x1043A},                // Case map
+	0x10413: []rune{0x1043B},                // Case map
+	0x10414: []rune{0x1043C},                // Case map
+	0x10415: []rune{0x1043D},                // Case map
+	0x10416: []rune{0x1043E},                // Case map
+	0x10417: []rune{0x1043F},                // Case map
+	0x10418: []rune{0x10440},                // Case map
+	0x10419: []rune{0x10441},                // Case map
+	0x1041A: []rune{0x10442},                // Case map
+	0x1041B: []rune{0x10443},                // Case map
+	0x1041C: []rune{0x10444},                // Case map
+	0x1041D: []rune{0x10445},                // Case map
+	0x1041E: []rune{0x10446},                // Case map
+	0x1041F: []rune{0x10447},                // Case map
+	0x10420: []rune{0x10448},                // Case map
+	0x10421: []rune{0x10449},                // Case map
+	0x10422: []rune{0x1044A},                // Case map
+	0x10423: []rune{0x1044B},                // Case map
+	0x10424: []rune{0x1044C},                // Case map
+	0x10425: []rune{0x1044D},                // Case map
+}
+
+// TableB3 represents RFC-3454 Table B.3.
+var TableB3 Mapping = tableB3
+
+var tableC1_1 = Set{
+	RuneRange{0x0020, 0x0020}, // SPACE
+}
+
+// TableC1_1 represents RFC-3454 Table C.1.1.
+var TableC1_1 Set = tableC1_1
+
+var tableC1_2 = Set{
+	RuneRange{0x00A0, 0x00A0}, // NO-BREAK SPACE
+	RuneRange{0x1680, 0x1680}, // OGHAM SPACE MARK
+	RuneRange{0x2000, 0x2000}, // EN QUAD
+	RuneRange{0x2001, 0x2001}, // EM QUAD
+	RuneRange{0x2002, 0x2002}, // EN SPACE
+	RuneRange{0x2003, 0x2003}, // EM SPACE
+	RuneRange{0x2004, 0x2004}, // THREE-PER-EM SPACE
+	RuneRange{0x2005, 0x2005}, // FOUR-PER-EM SPACE
+	RuneRange{0x2006, 0x2006}, // SIX-PER-EM SPACE
+	RuneRange{0x2007, 0x2007}, // FIGURE SPACE
+	RuneRange{0x2008, 0x2008}, // PUNCTUATION SPACE
+	RuneRange{0x2009, 0x2009}, // THIN SPACE
+	RuneRange{0x200A, 0x200A}, // HAIR SPACE
+	RuneRange{0x200B, 0x200B}, // ZERO WIDTH SPACE
+	RuneRange{0x202F, 0x202F}, // NARROW NO-BREAK SPACE
+	RuneRange{0x205F, 0x205F}, // MEDIUM MATHEMATICAL SPACE
+	RuneRange{0x3000, 0x3000}, // IDEOGRAPHIC SPACE
+}
+
+// TableC1_2 represents RFC-3454 Table C.1.2.
+var TableC1_2 Set = tableC1_2
+
+var tableC2_1 = Set{
+	RuneRange{0x0000, 0x001F}, // [CONTROL CHARACTERS]
+	RuneRange{0x007F, 0x007F}, // DELETE
+}
+
+// TableC2_1 represents RFC-3454 Table C.2.1.
+var TableC2_1 Set = tableC2_1
+
+var tableC2_2 = Set{
+	RuneRange{0x0080, 0x009F},   // [CONTROL CHARACTERS]
+	RuneRange{0x06DD, 0x06DD},   // ARABIC END OF AYAH
+	RuneRange{0x070F, 0x070F},   // SYRIAC ABBREVIATION MARK
+	RuneRange{0x180E, 0x180E},   // MONGOLIAN VOWEL SEPARATOR
+	RuneRange{0x200C, 0x200C},   // ZERO WIDTH NON-JOINER
+	RuneRange{0x200D, 0x200D},   // ZERO WIDTH JOINER
+	RuneRange{0x2028, 0x2028},   // LINE SEPARATOR
+	RuneRange{0x2029, 0x2029},   // PARAGRAPH SEPARATOR
+	RuneRange{0x2060, 0x2060},   // WORD JOINER
+	RuneRange{0x2061, 0x2061},   // FUNCTION APPLICATION
+	RuneRange{0x2062, 0x2062},   // INVISIBLE TIMES
+	RuneRange{0x2063, 0x2063},   // INVISIBLE SEPARATOR
+	RuneRange{0x206A, 0x206F},   // [CONTROL CHARACTERS]
+	RuneRange{0xFEFF, 0xFEFF},   // ZERO WIDTH NO-BREAK SPACE
+	RuneRange{0xFFF9, 0xFFFC},   // [CONTROL CHARACTERS]
+	RuneRange{0x1D173, 0x1D17A}, // [MUSICAL CONTROL CHARACTERS]
+}
+
+// TableC2_2 represents RFC-3454 Table C.2.2.
+var TableC2_2 Set = tableC2_2
+
+var tableC3 = Set{
+	RuneRange{0xE000, 0xF8FF},     // [PRIVATE USE, PLANE 0]
+	RuneRange{0xF0000, 0xFFFFD},   // [PRIVATE USE, PLANE 15]
+	RuneRange{0x100000, 0x10FFFD}, // [PRIVATE USE, PLANE 16]
+}
+
+// TableC3 represents RFC-3454 Table C.3.
+var TableC3 Set = tableC3
+
+var tableC4 = Set{
+	RuneRange{0xFDD0, 0xFDEF},     // [NONCHARACTER CODE POINTS]
+	RuneRange{0xFFFE, 0xFFFF},     // [NONCHARACTER CODE POINTS]
+	RuneRange{0x1FFFE, 0x1FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x2FFFE, 0x2FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x3FFFE, 0x3FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x4FFFE, 0x4FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x5FFFE, 0x5FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x6FFFE, 0x6FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x7FFFE, 0x7FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x8FFFE, 0x8FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x9FFFE, 0x9FFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xAFFFE, 0xAFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xBFFFE, 0xBFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xCFFFE, 0xCFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xDFFFE, 0xDFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xEFFFE, 0xEFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0xFFFFE, 0xFFFFF},   // [NONCHARACTER CODE POINTS]
+	RuneRange{0x10FFFE, 0x10FFFF}, // [NONCHARACTER CODE POINTS]
+}
+
+// TableC4 represents RFC-3454 Table C.4.
+var TableC4 Set = tableC4
+
+var tableC5 = Set{
+	RuneRange{0xD800, 0xDFFF}, // [SURROGATE CODES]
+}
+
+// TableC5 represents RFC-3454 Table C.5.
+var TableC5 Set = tableC5
+
+var tableC6 = Set{
+	RuneRange{0xFFF9, 0xFFF9}, // INTERLINEAR ANNOTATION ANCHOR
+	RuneRange{0xFFFA, 0xFFFA}, // INTERLINEAR ANNOTATION SEPARATOR
+	RuneRange{0xFFFB, 0xFFFB}, // INTERLINEAR ANNOTATION TERMINATOR
+	RuneRange{0xFFFC, 0xFFFC}, // OBJECT REPLACEMENT CHARACTER
+	RuneRange{0xFFFD, 0xFFFD}, // REPLACEMENT CHARACTER
+}
+
+// TableC6 represents RFC-3454 Table C.6.
+var TableC6 Set = tableC6
+
+var tableC7 = Set{
+	RuneRange{0x2FF0, 0x2FFB}, // [IDEOGRAPHIC DESCRIPTION CHARACTERS]
+}
+
+// TableC7 represents RFC-3454 Table C.7.
+var TableC7 Set = tableC7
+
+var tableC8 = Set{
+	RuneRange{0x0340, 0x0340}, // COMBINING GRAVE TONE MARK
+	RuneRange{0x0341, 0x0341}, // COMBINING ACUTE TONE MARK
+	RuneRange{0x200E, 0x200E}, // LEFT-TO-RIGHT MARK
+	RuneRange{0x200F, 0x200F}, // RIGHT-TO-LEFT MARK
+	RuneRange{0x202A, 0x202A}, // LEFT-TO-RIGHT EMBEDDING
+	RuneRange{0x202B, 0x202B}, // RIGHT-TO-LEFT EMBEDDING
+	RuneRange{0x202C, 0x202C}, // POP DIRECTIONAL FORMATTING
+	RuneRange{0x202D, 0x202D}, // LEFT-TO-RIGHT OVERRIDE
+	RuneRange{0x202E, 0x202E}, // RIGHT-TO-LEFT OVERRIDE
+	RuneRange{0x206A, 0x206A}, // INHIBIT SYMMETRIC SWAPPING
+	RuneRange{0x206B, 0x206B}, // ACTIVATE SYMMETRIC SWAPPING
+	RuneRange{0x206C, 0x206C}, // INHIBIT ARABIC FORM SHAPING
+	RuneRange{0x206D, 0x206D}, // ACTIVATE ARABIC FORM SHAPING
+	RuneRange{0x206E, 0x206E}, // NATIONAL DIGIT SHAPES
+	RuneRange{0x206F, 0x206F}, // NOMINAL DIGIT SHAPES
+}
+
+// TableC8 represents RFC-3454 Table C.8.
+var TableC8 Set = tableC8
+
+var tableC9 = Set{
+	RuneRange{0xE0001, 0xE0001}, // LANGUAGE TAG
+	RuneRange{0xE0020, 0xE007F}, // [TAGGING CHARACTERS]
+}
+
+// TableC9 represents RFC-3454 Table C.9.
+var TableC9 Set = tableC9
+
+var tableD1 = Set{
+	RuneRange{0x05BE, 0x05BE},
+	RuneRange{0x05C0, 0x05C0},
+	RuneRange{0x05C3, 0x05C3},
+	RuneRange{0x05D0, 0x05EA},
+	RuneRange{0x05F0, 0x05F4},
+	RuneRange{0x061B, 0x061B},
+	RuneRange{0x061F, 0x061F},
+	RuneRange{0x0621, 0x063A},
+	RuneRange{0x0640, 0x064A},
+	RuneRange{0x066D, 0x066F},
+	RuneRange{0x0671, 0x06D5},
+	RuneRange{0x06DD, 0x06DD},
+	RuneRange{0x06E5, 0x06E6},
+	RuneRange{0x06FA, 0x06FE},
+	RuneRange{0x0700, 0x070D},
+	RuneRange{0x0710, 0x0710},
+	RuneRange{0x0712, 0x072C},
+	RuneRange{0x0780, 0x07A5},
+	RuneRange{0x07B1, 0x07B1},
+	RuneRange{0x200F, 0x200F},
+	RuneRange{0xFB1D, 0xFB1D},
+	RuneRange{0xFB1F, 0xFB28},
+	RuneRange{0xFB2A, 0xFB36},
+	RuneRange{0xFB38, 0xFB3C},
+	RuneRange{0xFB3E, 0xFB3E},
+	RuneRange{0xFB40, 0xFB41},
+	RuneRange{0xFB43, 0xFB44},
+	RuneRange{0xFB46, 0xFBB1},
+	RuneRange{0xFBD3, 0xFD3D},
+	RuneRange{0xFD50, 0xFD8F},
+	RuneRange{0xFD92, 0xFDC7},
+	RuneRange{0xFDF0, 0xFDFC},
+	RuneRange{0xFE70, 0xFE74},
+	RuneRange{0xFE76, 0xFEFC},
+}
+
+// TableD1 represents RFC-3454 Table D.1.
+var TableD1 Set = tableD1
+
+var tableD2 = Set{
+	RuneRange{0x0041, 0x005A},
+	RuneRange{0x0061, 0x007A},
+	RuneRange{0x00AA, 0x00AA},
+	RuneRange{0x00B5, 0x00B5},
+	RuneRange{0x00BA, 0x00BA},
+	RuneRange{0x00C0, 0x00D6},
+	RuneRange{0x00D8, 0x00F6},
+	RuneRange{0x00F8, 0x0220},
+	RuneRange{0x0222, 0x0233},
+	RuneRange{0x0250, 0x02AD},
+	RuneRange{0x02B0, 0x02B8},
+	RuneRange{0x02BB, 0x02C1},
+	RuneRange{0x02D0, 0x02D1},
+	RuneRange{0x02E0, 0x02E4},
+	RuneRange{0x02EE, 0x02EE},
+	RuneRange{0x037A, 0x037A},
+	RuneRange{0x0386, 0x0386},
+	RuneRange{0x0388, 0x038A},
+	RuneRange{0x038C, 0x038C},
+	RuneRange{0x038E, 0x03A1},
+	RuneRange{0x03A3, 0x03CE},
+	RuneRange{0x03D0, 0x03F5},
+	RuneRange{0x0400, 0x0482},
+	RuneRange{0x048A, 0x04CE},
+	RuneRange{0x04D0, 0x04F5},
+	RuneRange{0x04F8, 0x04F9},
+	RuneRange{0x0500, 0x050F},
+	RuneRange{0x0531, 0x0556},
+	RuneRange{0x0559, 0x055F},
+	RuneRange{0x0561, 0x0587},
+	RuneRange{0x0589, 0x0589},
+	RuneRange{0x0903, 0x0903},
+	RuneRange{0x0905, 0x0939},
+	RuneRange{0x093D, 0x0940},
+	RuneRange{0x0949, 0x094C},
+	RuneRange{0x0950, 0x0950},
+	RuneRange{0x0958, 0x0961},
+	RuneRange{0x0964, 0x0970},
+	RuneRange{0x0982, 0x0983},
+	RuneRange{0x0985, 0x098C},
+	RuneRange{0x098F, 0x0990},
+	RuneRange{0x0993, 0x09A8},
+	RuneRange{0x09AA, 0x09B0},
+	RuneRange{0x09B2, 0x09B2},
+	RuneRange{0x09B6, 0x09B9},
+	RuneRange{0x09BE, 0x09C0},
+	RuneRange{0x09C7, 0x09C8},
+	RuneRange{0x09CB, 0x09CC},
+	RuneRange{0x09D7, 0x09D7},
+	RuneRange{0x09DC, 0x09DD},
+	RuneRange{0x09DF, 0x09E1},
+	RuneRange{0x09E6, 0x09F1},
+	RuneRange{0x09F4, 0x09FA},
+	RuneRange{0x0A05, 0x0A0A},
+	RuneRange{0x0A0F, 0x0A10},
+	RuneRange{0x0A13, 0x0A28},
+	RuneRange{0x0A2A, 0x0A30},
+	RuneRange{0x0A32, 0x0A33},
+	RuneRange{0x0A35, 0x0A36},
+	RuneRange{0x0A38, 0x0A39},
+	RuneRange{0x0A3E, 0x0A40},
+	RuneRange{0x0A59, 0x0A5C},
+	RuneRange{0x0A5E, 0x0A5E},
+	RuneRange{0x0A66, 0x0A6F},
+	RuneRange{0x0A72, 0x0A74},
+	RuneRange{0x0A83, 0x0A83},
+	RuneRange{0x0A85, 0x0A8B},
+	RuneRange{0x0A8D, 0x0A8D},
+	RuneRange{0x0A8F, 0x0A91},
+	RuneRange{0x0A93, 0x0AA8},
+	RuneRange{0x0AAA, 0x0AB0},
+	RuneRange{0x0AB2, 0x0AB3},
+	RuneRange{0x0AB5, 0x0AB9},
+	RuneRange{0x0ABD, 0x0AC0},
+	RuneRange{0x0AC9, 0x0AC9},
+	RuneRange{0x0ACB, 0x0ACC},
+	RuneRange{0x0AD0, 0x0AD0},
+	RuneRange{0x0AE0, 0x0AE0},
+	RuneRange{0x0AE6, 0x0AEF},
+	RuneRange{0x0B02, 0x0B03},
+	RuneRange{0x0B05, 0x0B0C},
+	RuneRange{0x0B0F, 0x0B10},
+	RuneRange{0x0B13, 0x0B28},
+	RuneRange{0x0B2A, 0x0B30},
+	RuneRange{0x0B32, 0x0B33},
+	RuneRange{0x0B36, 0x0B39},
+	RuneRange{0x0B3D, 0x0B3E},
+	RuneRange{0x0B40, 0x0B40},
+	RuneRange{0x0B47, 0x0B48},
+	RuneRange{0x0B4B, 0x0B4C},
+	RuneRange{0x0B57, 0x0B57},
+	RuneRange{0x0B5C, 0x0B5D},
+	RuneRange{0x0B5F, 0x0B61},
+	RuneRange{0x0B66, 0x0B70},
+	RuneRange{0x0B83, 0x0B83},
+	RuneRange{0x0B85, 0x0B8A},
+	RuneRange{0x0B8E, 0x0B90},
+	RuneRange{0x0B92, 0x0B95},
+	RuneRange{0x0B99, 0x0B9A},
+	RuneRange{0x0B9C, 0x0B9C},
+	RuneRange{0x0B9E, 0x0B9F},
+	RuneRange{0x0BA3, 0x0BA4},
+	RuneRange{0x0BA8, 0x0BAA},
+	RuneRange{0x0BAE, 0x0BB5},
+	RuneRange{0x0BB7, 0x0BB9},
+	RuneRange{0x0BBE, 0x0BBF},
+	RuneRange{0x0BC1, 0x0BC2},
+	RuneRange{0x0BC6, 0x0BC8},
+	RuneRange{0x0BCA, 0x0BCC},
+	RuneRange{0x0BD7, 0x0BD7},
+	RuneRange{0x0BE7, 0x0BF2},
+	RuneRange{0x0C01, 0x0C03},
+	RuneRange{0x0C05, 0x0C0C},
+	RuneRange{0x0C0E, 0x0C10},
+	RuneRange{0x0C12, 0x0C28},
+	RuneRange{0x0C2A, 0x0C33},
+	RuneRange{0x0C35, 0x0C39},
+	RuneRange{0x0C41, 0x0C44},
+	RuneRange{0x0C60, 0x0C61},
+	RuneRange{0x0C66, 0x0C6F},
+	RuneRange{0x0C82, 0x0C83},
+	RuneRange{0x0C85, 0x0C8C},
+	RuneRange{0x0C8E, 0x0C90},
+	RuneRange{0x0C92, 0x0CA8},
+	RuneRange{0x0CAA, 0x0CB3},
+	RuneRange{0x0CB5, 0x0CB9},
+	RuneRange{0x0CBE, 0x0CBE},
+	RuneRange{0x0CC0, 0x0CC4},
+	RuneRange{0x0CC7, 0x0CC8},
+	RuneRange{0x0CCA, 0x0CCB},
+	RuneRange{0x0CD5, 0x0CD6},
+	RuneRange{0x0CDE, 0x0CDE},
+	RuneRange{0x0CE0, 0x0CE1},
+	RuneRange{0x0CE6, 0x0CEF},
+	RuneRange{0x0D02, 0x0D03},
+	RuneRange{0x0D05, 0x0D0C},
+	RuneRange{0x0D0E, 0x0D10},
+	RuneRange{0x0D12, 0x0D28},
+	RuneRange{0x0D2A, 0x0D39},
+	RuneRange{0x0D3E, 0x0D40},
+	RuneRange{0x0D46, 0x0D48},
+	RuneRange{0x0D4A, 0x0D4C},
+	RuneRange{0x0D57, 0x0D57},
+	RuneRange{0x0D60, 0x0D61},
+	RuneRange{0x0D66, 0x0D6F},
+	RuneRange{0x0D82, 0x0D83},
+	RuneRange{0x0D85, 0x0D96},
+	RuneRange{0x0D9A, 0x0DB1},
+	RuneRange{0x0DB3, 0x0DBB},
+	RuneRange{0x0DBD, 0x0DBD},
+	RuneRange{0x0DC0, 0x0DC6},
+	RuneRange{0x0DCF, 0x0DD1},
+	RuneRange{0x0DD8, 0x0DDF},
+	RuneRange{0x0DF2, 0x0DF4},
+	RuneRange{0x0E01, 0x0E30},
+	RuneRange{0x0E32, 0x0E33},
+	RuneRange{0x0E40, 0x0E46},
+	RuneRange{0x0E4F, 0x0E5B},
+	RuneRange{0x0E81, 0x0E82},
+	RuneRange{0x0E84, 0x0E84},
+	RuneRange{0x0E87, 0x0E88},
+	RuneRange{0x0E8A, 0x0E8A},
+	RuneRange{0x0E8D, 0x0E8D},
+	RuneRange{0x0E94, 0x0E97},
+	RuneRange{0x0E99, 0x0E9F},
+	RuneRange{0x0EA1, 0x0EA3},
+	RuneRange{0x0EA5, 0x0EA5},
+	RuneRange{0x0EA7, 0x0EA7},
+	RuneRange{0x0EAA, 0x0EAB},
+	RuneRange{0x0EAD, 0x0EB0},
+	RuneRange{0x0EB2, 0x0EB3},
+	RuneRange{0x0EBD, 0x0EBD},
+	RuneRange{0x0EC0, 0x0EC4},
+	RuneRange{0x0EC6, 0x0EC6},
+	RuneRange{0x0ED0, 0x0ED9},
+	RuneRange{0x0EDC, 0x0EDD},
+	RuneRange{0x0F00, 0x0F17},
+	RuneRange{0x0F1A, 0x0F34},
+	RuneRange{0x0F36, 0x0F36},
+	RuneRange{0x0F38, 0x0F38},
+	RuneRange{0x0F3E, 0x0F47},
+	RuneRange{0x0F49, 0x0F6A},
+	RuneRange{0x0F7F, 0x0F7F},
+	RuneRange{0x0F85, 0x0F85},
+	RuneRange{0x0F88, 0x0F8B},
+	RuneRange{0x0FBE, 0x0FC5},
+	RuneRange{0x0FC7, 0x0FCC},
+	RuneRange{0x0FCF, 0x0FCF},
+	RuneRange{0x1000, 0x1021},
+	RuneRange{0x1023, 0x1027},
+	RuneRange{0x1029, 0x102A},
+	RuneRange{0x102C, 0x102C},
+	RuneRange{0x1031, 0x1031},
+	RuneRange{0x1038, 0x1038},
+	RuneRange{0x1040, 0x1057},
+	RuneRange{0x10A0, 0x10C5},
+	RuneRange{0x10D0, 0x10F8},
+	RuneRange{0x10FB, 0x10FB},
+	RuneRange{0x1100, 0x1159},
+	RuneRange{0x115F, 0x11A2},
+	RuneRange{0x11A8, 0x11F9},
+	RuneRange{0x1200, 0x1206},
+	RuneRange{0x1208, 0x1246},
+	RuneRange{0x1248, 0x1248},
+	RuneRange{0x124A, 0x124D},
+	RuneRange{0x1250, 0x1256},
+	RuneRange{0x1258, 0x1258},
+	RuneRange{0x125A, 0x125D},
+	RuneRange{0x1260, 0x1286},
+	RuneRange{0x1288, 0x1288},
+	RuneRange{0x128A, 0x128D},
+	RuneRange{0x1290, 0x12AE},
+	RuneRange{0x12B0, 0x12B0},
+	RuneRange{0x12B2, 0x12B5},
+	RuneRange{0x12B8, 0x12BE},
+	RuneRange{0x12C0, 0x12C0},
+	RuneRange{0x12C2, 0x12C5},
+	RuneRange{0x12C8, 0x12CE},
+	RuneRange{0x12D0, 0x12D6},
+	RuneRange{0x12D8, 0x12EE},
+	RuneRange{0x12F0, 0x130E},
+	RuneRange{0x1310, 0x1310},
+	RuneRange{0x1312, 0x1315},
+	RuneRange{0x1318, 0x131E},
+	RuneRange{0x1320, 0x1346},
+	RuneRange{0x1348, 0x135A},
+	RuneRange{0x1361, 0x137C},
+	RuneRange{0x13A0, 0x13F4},
+	RuneRange{0x1401, 0x1676},
+	RuneRange{0x1681, 0x169A},
+	RuneRange{0x16A0, 0x16F0},
+	RuneRange{0x1700, 0x170C},
+	RuneRange{0x170E, 0x1711},
+	RuneRange{0x1720, 0x1731},
+	RuneRange{0x1735, 0x1736},
+	RuneRange{0x1740, 0x1751},
+	RuneRange{0x1760, 0x176C},
+	RuneRange{0x176E, 0x1770},
+	RuneRange{0x1780, 0x17B6},
+	RuneRange{0x17BE, 0x17C5},
+	RuneRange{0x17C7, 0x17C8},
+	RuneRange{0x17D4, 0x17DA},
+	RuneRange{0x17DC, 0x17DC},
+	RuneRange{0x17E0, 0x17E9},
+	RuneRange{0x1810, 0x1819},
+	RuneRange{0x1820, 0x1877},
+	RuneRange{0x1880, 0x18A8},
+	RuneRange{0x1E00, 0x1E9B},
+	RuneRange{0x1EA0, 0x1EF9},
+	RuneRange{0x1F00, 0x1F15},
+	RuneRange{0x1F18, 0x1F1D},
+	RuneRange{0x1F20, 0x1F45},
+	RuneRange{0x1F48, 0x1F4D},
+	RuneRange{0x1F50, 0x1F57},
+	RuneRange{0x1F59, 0x1F59},
+	RuneRange{0x1F5B, 0x1F5B},
+	RuneRange{0x1F5D, 0x1F5D},
+	RuneRange{0x1F5F, 0x1F7D},
+	RuneRange{0x1F80, 0x1FB4},
+	RuneRange{0x1FB6, 0x1FBC},
+	RuneRange{0x1FBE, 0x1FBE},
+	RuneRange{0x1FC2, 0x1FC4},
+	RuneRange{0x1FC6, 0x1FCC},
+	RuneRange{0x1FD0, 0x1FD3},
+	RuneRange{0x1FD6, 0x1FDB},
+	RuneRange{0x1FE0, 0x1FEC},
+	RuneRange{0x1FF2, 0x1FF4},
+	RuneRange{0x1FF6, 0x1FFC},
+	RuneRange{0x200E, 0x200E},
+	RuneRange{0x2071, 0x2071},
+	RuneRange{0x207F, 0x207F},
+	RuneRange{0x2102, 0x2102},
+	RuneRange{0x2107, 0x2107},
+	RuneRange{0x210A, 0x2113},
+	RuneRange{0x2115, 0x2115},
+	RuneRange{0x2119, 0x211D},
+	RuneRange{0x2124, 0x2124},
+	RuneRange{0x2126, 0x2126},
+	RuneRange{0x2128, 0x2128},
+	RuneRange{0x212A, 0x212D},
+	RuneRange{0x212F, 0x2131},
+	RuneRange{0x2133, 0x2139},
+	RuneRange{0x213D, 0x213F},
+	RuneRange{0x2145, 0x2149},
+	RuneRange{0x2160, 0x2183},
+	RuneRange{0x2336, 0x237A},
+	RuneRange{0x2395, 0x2395},
+	RuneRange{0x249C, 0x24E9},
+	RuneRange{0x3005, 0x3007},
+	RuneRange{0x3021, 0x3029},
+	RuneRange{0x3031, 0x3035},
+	RuneRange{0x3038, 0x303C},
+	RuneRange{0x3041, 0x3096},
+	RuneRange{0x309D, 0x309F},
+	RuneRange{0x30A1, 0x30FA},
+	RuneRange{0x30FC, 0x30FF},
+	RuneRange{0x3105, 0x312C},
+	RuneRange{0x3131, 0x318E},
+	RuneRange{0x3190, 0x31B7},
+	RuneRange{0x31F0, 0x321C},
+	RuneRange{0x3220, 0x3243},
+	RuneRange{0x3260, 0x327B},
+	RuneRange{0x327F, 0x32B0},
+	RuneRange{0x32C0, 0x32CB},
+	RuneRange{0x32D0, 0x32FE},
+	RuneRange{0x3300, 0x3376},
+	RuneRange{0x337B, 0x33DD},
+	RuneRange{0x33E0, 0x33FE},
+	RuneRange{0x3400, 0x4DB5},
+	RuneRange{0x4E00, 0x9FA5},
+	RuneRange{0xA000, 0xA48C},
+	RuneRange{0xAC00, 0xD7A3},
+	RuneRange{0xD800, 0xFA2D},
+	RuneRange{0xFA30, 0xFA6A},
+	RuneRange{0xFB00, 0xFB06},
+	RuneRange{0xFB13, 0xFB17},
+	RuneRange{0xFF21, 0xFF3A},
+	RuneRange{0xFF41, 0xFF5A},
+	RuneRange{0xFF66, 0xFFBE},
+	RuneRange{0xFFC2, 0xFFC7},
+	RuneRange{0xFFCA, 0xFFCF},
+	RuneRange{0xFFD2, 0xFFD7},
+	RuneRange{0xFFDA, 0xFFDC},
+	RuneRange{0x10300, 0x1031E},
+	RuneRange{0x10320, 0x10323},
+	RuneRange{0x10330, 0x1034A},
+	RuneRange{0x10400, 0x10425},
+	RuneRange{0x10428, 0x1044D},
+	RuneRange{0x1D000, 0x1D0F5},
+	RuneRange{0x1D100, 0x1D126},
+	RuneRange{0x1D12A, 0x1D166},
+	RuneRange{0x1D16A, 0x1D172},
+	RuneRange{0x1D183, 0x1D184},
+	RuneRange{0x1D18C, 0x1D1A9},
+	RuneRange{0x1D1AE, 0x1D1DD},
+	RuneRange{0x1D400, 0x1D454},
+	RuneRange{0x1D456, 0x1D49C},
+	RuneRange{0x1D49E, 0x1D49F},
+	RuneRange{0x1D4A2, 0x1D4A2},
+	RuneRange{0x1D4A5, 0x1D4A6},
+	RuneRange{0x1D4A9, 0x1D4AC},
+	RuneRange{0x1D4AE, 0x1D4B9},
+	RuneRange{0x1D4BB, 0x1D4BB},
+	RuneRange{0x1D4BD, 0x1D4C0},
+	RuneRange{0x1D4C2, 0x1D4C3},
+	RuneRange{0x1D4C5, 0x1D505},
+	RuneRange{0x1D507, 0x1D50A},
+	RuneRange{0x1D50D, 0x1D514},
+	RuneRange{0x1D516, 0x1D51C},
+	RuneRange{0x1D51E, 0x1D539},
+	RuneRange{0x1D53B, 0x1D53E},
+	RuneRange{0x1D540, 0x1D544},
+	RuneRange{0x1D546, 0x1D546},
+	RuneRange{0x1D54A, 0x1D550},
+	RuneRange{0x1D552, 0x1D6A3},
+	RuneRange{0x1D6A8, 0x1D7C9},
+	RuneRange{0x20000, 0x2A6D6},
+	RuneRange{0x2F800, 0x2FA1D},
+	RuneRange{0xF0000, 0xFFFFD},
+	RuneRange{0x100000, 0x10FFFD},
+}
+
+// TableD2 represents RFC-3454 Table D.2.
+var TableD2 Set = tableD2