seba-365 - implemented dep

Change-Id: Ia6226d50e7615935a0c8876809a687427ff88c22
diff --git a/vendor/github.com/mongodb/mongo-go-driver/LICENSE b/vendor/github.com/mongodb/mongo-go-driver/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES b/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES
new file mode 100644
index 0000000..6e6cd4b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/THIRD-PARTY-NOTICES
@@ -0,0 +1,445 @@
+---------------------------------------------------------------------
+License notice for gopkg.in/mgo.v2/bson
+---------------------------------------------------------------------
+
+BSON library for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---------------------------------------------------------------------
+License notice for JSON and CSV code from github.com/golang/go
+---------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/buger/jsonparser
+----------------------------------------------------------------------
+
+MIT License
+
+Copyright (c) 2016 Leonid Bugaev
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/davecgh/go-spew
+----------------------------------------------------------------------
+
+ISC License
+
+Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
+
+Permission to use, copy, modify, and distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/golang/snappy
+----------------------------------------------------------------------
+
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/google/go-cmp
+----------------------------------------------------------------------
+
+Copyright (c) 2017 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/kr/pretty
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/kr/text
+----------------------------------------------------------------------
+
+Copyright 2012 Keith Rarick
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/montanaflynn/stats
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2014-2015 Montana Flynn (https://anonfunction.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/pmezard/go-difflib
+----------------------------------------------------------------------
+
+Copyright (c) 2013, Patrick Mezard
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+    The names of its contributors may not be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for github.com/stretchr/testify
+----------------------------------------------------------------------
+
+Copyright (c) 2012 - 2013 Mat Ryer and Tyler Bunnell
+
+Please consider promoting this project if you find it useful.
+
+Permission is hereby granted, free of charge, to any person
+obtaining a copy of this software and associated documentation
+files (the "Software"), to deal in the Software without restriction,
+including without limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of the Software,
+and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT
+OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
+OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for github.com/tidwall/pretty
+----------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2017 Josh Baker
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/crypto
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/net
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for golang.org/x/sync
+----------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+----------------------------------------------------------------------
+License notice for gopkg.in/yaml.v2
+----------------------------------------------------------------------
+
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go
new file mode 100644
index 0000000..44cdec8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bson.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+// +build go1.9
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D = primitive.D
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E = primitive.E
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		bson.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M = primitive.M
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		bson.A{"bar", "world", 3.14159, bson.D{{"qux", 12345}}}
+//
+type A = primitive.A
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go
new file mode 100644
index 0000000..beac40b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bson_1_8.go
@@ -0,0 +1,91 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// +build !go1.9
+
+package bson
+
+import (
+	"math"
+	"strconv"
+	"strings"
+)
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D []E
+
+// Map creates a map from the elements of the D.
+func (d D) Map() M {
+	m := make(M, len(d))
+	for _, e := range d {
+		m[e.Key] = e.Value
+	}
+	return m
+}
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E struct {
+	Key   string
+	Value interface{}
+}
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M map[string]interface{}
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
+//
+type A []interface{}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go
new file mode 100644
index 0000000..4c5530c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/bsoncodec.go
@@ -0,0 +1,163 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representaiton of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// ValueEncoderError is an error returned from a ValueEncoder when the provided value can't be
+// encoded by the ValueEncoder.
+type ValueEncoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vee ValueEncoderError) Error() string {
+	typeKinds := make([]string, 0, len(vee.Types)+len(vee.Kinds))
+	for _, t := range vee.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vee.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vee.Received.Kind().String()
+	if vee.Received.IsValid() {
+		received = vee.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only encode valid %s, but got %s", vee.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// ValueDecoderError is an error returned from a ValueDecoder when the provided value can't be
+// decoded by the ValueDecoder.
+type ValueDecoderError struct {
+	Name     string
+	Types    []reflect.Type
+	Kinds    []reflect.Kind
+	Received reflect.Value
+}
+
+func (vde ValueDecoderError) Error() string {
+	typeKinds := make([]string, 0, len(vde.Types)+len(vde.Kinds))
+	for _, t := range vde.Types {
+		typeKinds = append(typeKinds, t.String())
+	}
+	for _, k := range vde.Kinds {
+		if k == reflect.Map {
+			typeKinds = append(typeKinds, "map[string]*")
+			continue
+		}
+		typeKinds = append(typeKinds, k.String())
+	}
+	received := vde.Received.Kind().String()
+	if vde.Received.IsValid() {
+		received = vde.Received.Type().String()
+	}
+	return fmt.Sprintf("%s can only decode valid and settable %s, but got %s", vde.Name, strings.Join(typeKinds, ", "), received)
+}
+
+// EncodeContext is the contextual information required for a Codec to encode a
+// value.
+type EncodeContext struct {
+	*Registry
+	MinSize bool
+}
+
+// DecodeContext is the contextual information required for a Codec to decode a
+// value.
+type DecodeContext struct {
+	*Registry
+	Truncate bool
+	// Ancestor is the type of a containing document. This is mainly used to determine what type
+	// should be used when decoding an embedded document into an empty interface. For example, if
+	// Ancestor is a bson.M, BSON embedded document values being decoded into an empty interface
+	// will be decoded into a bson.M.
+	Ancestor reflect.Type
+}
+
+// ValueCodec is the interface that groups the methods to encode and decode
+// values.
+type ValueCodec interface {
+	ValueEncoder
+	ValueDecoder
+}
+
+// ValueEncoder is the interface implemented by types that can handle the encoding of a value.
+type ValueEncoder interface {
+	EncodeValue(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+}
+
+// ValueEncoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueEncoder.
+type ValueEncoderFunc func(EncodeContext, bsonrw.ValueWriter, reflect.Value) error
+
+// EncodeValue implements the ValueEncoder interface.
+func (fn ValueEncoderFunc) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	return fn(ec, vw, val)
+}
+
+// ValueDecoder is the interface implemented by types that can handle the decoding of a value.
+type ValueDecoder interface {
+	DecodeValue(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+}
+
+// ValueDecoderFunc is an adapter function that allows a function with the correct signature to be
+// used as a ValueDecoder.
+type ValueDecoderFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) error
+
+// DecodeValue implements the ValueDecoder interface.
+func (fn ValueDecoderFunc) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	return fn(dc, vr, val)
+}
+
+// CodecZeroer is the interface implemented by Codecs that can also determine if
+// a value of the type that would be encoded is zero.
+type CodecZeroer interface {
+	IsTypeZero(interface{}) bool
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go
new file mode 100644
index 0000000..645f89e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_decoders.go
@@ -0,0 +1,1014 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueDecoders DefaultValueDecoders
+
+// DefaultValueDecoders is a namespace type for the default ValueDecoders used
+// when creating a registry.
+type DefaultValueDecoders struct{}
+
+// RegisterDefaultDecoders will register the decoder methods attached to DefaultValueDecoders with
+// the provided RegistryBuilder.
+//
+// There is no support for decoding map[string]interface{} becuase there is no decoder for
+// interface{}, so users must either register this decoder themselves or use the
+// EmptyInterfaceDecoder avaialble in the bson package.
+func (dvd DefaultValueDecoders) RegisterDefaultDecoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultDecoders must not be nil"))
+	}
+
+	rb.
+		RegisterDecoder(tBinary, ValueDecoderFunc(dvd.BinaryDecodeValue)).
+		RegisterDecoder(tUndefined, ValueDecoderFunc(dvd.UndefinedDecodeValue)).
+		RegisterDecoder(tDateTime, ValueDecoderFunc(dvd.DateTimeDecodeValue)).
+		RegisterDecoder(tNull, ValueDecoderFunc(dvd.NullDecodeValue)).
+		RegisterDecoder(tRegex, ValueDecoderFunc(dvd.RegexDecodeValue)).
+		RegisterDecoder(tDBPointer, ValueDecoderFunc(dvd.DBPointerDecodeValue)).
+		RegisterDecoder(tTimestamp, ValueDecoderFunc(dvd.TimestampDecodeValue)).
+		RegisterDecoder(tMinKey, ValueDecoderFunc(dvd.MinKeyDecodeValue)).
+		RegisterDecoder(tMaxKey, ValueDecoderFunc(dvd.MaxKeyDecodeValue)).
+		RegisterDecoder(tJavaScript, ValueDecoderFunc(dvd.JavaScriptDecodeValue)).
+		RegisterDecoder(tSymbol, ValueDecoderFunc(dvd.SymbolDecodeValue)).
+		RegisterDecoder(tByteSlice, ValueDecoderFunc(dvd.ByteSliceDecodeValue)).
+		RegisterDecoder(tTime, ValueDecoderFunc(dvd.TimeDecodeValue)).
+		RegisterDecoder(tEmpty, ValueDecoderFunc(dvd.EmptyInterfaceDecodeValue)).
+		RegisterDecoder(tOID, ValueDecoderFunc(dvd.ObjectIDDecodeValue)).
+		RegisterDecoder(tDecimal, ValueDecoderFunc(dvd.Decimal128DecodeValue)).
+		RegisterDecoder(tJSONNumber, ValueDecoderFunc(dvd.JSONNumberDecodeValue)).
+		RegisterDecoder(tURL, ValueDecoderFunc(dvd.URLDecodeValue)).
+		RegisterDecoder(tValueUnmarshaler, ValueDecoderFunc(dvd.ValueUnmarshalerDecodeValue)).
+		RegisterDecoder(tUnmarshaler, ValueDecoderFunc(dvd.UnmarshalerDecodeValue)).
+		RegisterDecoder(tCoreDocument, ValueDecoderFunc(dvd.CoreDocumentDecodeValue)).
+		RegisterDecoder(tCodeWithScope, ValueDecoderFunc(dvd.CodeWithScopeDecodeValue)).
+		RegisterDefaultDecoder(reflect.Bool, ValueDecoderFunc(dvd.BooleanDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int8, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int16, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int32, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Int64, ValueDecoderFunc(dvd.IntDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint8, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint16, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint32, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Uint64, ValueDecoderFunc(dvd.UintDecodeValue)).
+		RegisterDefaultDecoder(reflect.Float32, ValueDecoderFunc(dvd.FloatDecodeValue)).
+		RegisterDefaultDecoder(reflect.Float64, ValueDecoderFunc(dvd.FloatDecodeValue)).
+		RegisterDefaultDecoder(reflect.Array, ValueDecoderFunc(dvd.ArrayDecodeValue)).
+		RegisterDefaultDecoder(reflect.Map, ValueDecoderFunc(dvd.MapDecodeValue)).
+		RegisterDefaultDecoder(reflect.Slice, ValueDecoderFunc(dvd.SliceDecodeValue)).
+		RegisterDefaultDecoder(reflect.String, ValueDecoderFunc(dvd.StringDecodeValue)).
+		RegisterDefaultDecoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}).
+		RegisterDefaultDecoder(reflect.Ptr, NewPointerCodec()).
+		RegisterTypeMapEntry(bsontype.Double, tFloat64).
+		RegisterTypeMapEntry(bsontype.String, tString).
+		RegisterTypeMapEntry(bsontype.Array, tA).
+		RegisterTypeMapEntry(bsontype.Binary, tBinary).
+		RegisterTypeMapEntry(bsontype.Undefined, tUndefined).
+		RegisterTypeMapEntry(bsontype.ObjectID, tOID).
+		RegisterTypeMapEntry(bsontype.Boolean, tBool).
+		RegisterTypeMapEntry(bsontype.DateTime, tDateTime).
+		RegisterTypeMapEntry(bsontype.Regex, tRegex).
+		RegisterTypeMapEntry(bsontype.DBPointer, tDBPointer).
+		RegisterTypeMapEntry(bsontype.JavaScript, tJavaScript).
+		RegisterTypeMapEntry(bsontype.Symbol, tSymbol).
+		RegisterTypeMapEntry(bsontype.CodeWithScope, tCodeWithScope).
+		RegisterTypeMapEntry(bsontype.Int32, tInt32).
+		RegisterTypeMapEntry(bsontype.Int64, tInt64).
+		RegisterTypeMapEntry(bsontype.Timestamp, tTimestamp).
+		RegisterTypeMapEntry(bsontype.Decimal128, tDecimal).
+		RegisterTypeMapEntry(bsontype.MinKey, tMinKey).
+		RegisterTypeMapEntry(bsontype.MaxKey, tMaxKey).
+		RegisterTypeMapEntry(bsontype.Type(0), tD)
+}
+
+// BooleanDecodeValue is the ValueDecoderFunc for bool types.
+func (dvd DefaultValueDecoders) BooleanDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Boolean {
+		return fmt.Errorf("cannot decode %v into a boolean", vr.Type())
+	}
+	if !val.IsValid() || !val.CanSet() || val.Kind() != reflect.Bool {
+		return ValueDecoderError{Name: "BooleanDecodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+
+	b, err := vr.ReadBoolean()
+	val.SetBool(b)
+	return err
+}
+
+// IntDecodeValue is the ValueDecoderFunc for bool types.
+func (dvd DefaultValueDecoders) IntDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var i64 int64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return errors.New("IntDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+		}
+		if f64 > float64(math.MaxInt64) {
+			return fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	default:
+		return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: val,
+		}
+	}
+
+	switch val.Kind() {
+	case reflect.Int8:
+		if i64 < math.MinInt8 || i64 > math.MaxInt8 {
+			return fmt.Errorf("%d overflows int8", i64)
+		}
+	case reflect.Int16:
+		if i64 < math.MinInt16 || i64 > math.MaxInt16 {
+			return fmt.Errorf("%d overflows int16", i64)
+		}
+	case reflect.Int32:
+		if i64 < math.MinInt32 || i64 > math.MaxInt32 {
+			return fmt.Errorf("%d overflows int32", i64)
+		}
+	case reflect.Int64:
+	case reflect.Int:
+		if int64(int(i64)) != i64 { // Can we fit this inside of an int
+			return fmt.Errorf("%d overflows int", i64)
+		}
+	default:
+		return ValueDecoderError{
+			Name:     "IntDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+			Received: val,
+		}
+	}
+
+	val.SetInt(i64)
+	return nil
+}
+
+// UintDecodeValue is the ValueDecoderFunc for uint types.
+func (dvd DefaultValueDecoders) UintDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var i64 int64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		i64 = int64(i32)
+	case bsontype.Int64:
+		i64, err = vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		if !dc.Truncate && math.Floor(f64) != f64 {
+			return errors.New("UintDecodeValue can only truncate float64 to an integer type when truncation is enabled")
+		}
+		if f64 > float64(math.MaxInt64) {
+			return fmt.Errorf("%g overflows int64", f64)
+		}
+		i64 = int64(f64)
+	default:
+		return fmt.Errorf("cannot decode %v into an integer type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	switch val.Kind() {
+	case reflect.Uint8:
+		if i64 < 0 || i64 > math.MaxUint8 {
+			return fmt.Errorf("%d overflows uint8", i64)
+		}
+	case reflect.Uint16:
+		if i64 < 0 || i64 > math.MaxUint16 {
+			return fmt.Errorf("%d overflows uint16", i64)
+		}
+	case reflect.Uint32:
+		if i64 < 0 || i64 > math.MaxUint32 {
+			return fmt.Errorf("%d overflows uint32", i64)
+		}
+	case reflect.Uint64:
+		if i64 < 0 {
+			return fmt.Errorf("%d overflows uint64", i64)
+		}
+	case reflect.Uint:
+		if i64 < 0 || int64(uint(i64)) != i64 { // Can we fit this inside of an uint
+			return fmt.Errorf("%d overflows uint", i64)
+		}
+	default:
+		return ValueDecoderError{
+			Name:     "UintDecodeValue",
+			Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+			Received: val,
+		}
+	}
+
+	val.SetUint(uint64(i64))
+	return nil
+}
+
+// FloatDecodeValue is the ValueDecoderFunc for float types.
+func (dvd DefaultValueDecoders) FloatDecodeValue(ec DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var f float64
+	var err error
+	switch vr.Type() {
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		f = float64(i32)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		f = float64(i64)
+	case bsontype.Double:
+		f, err = vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a float32 or float64 type", vr.Type())
+	}
+
+	if !val.CanSet() {
+		return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+	}
+
+	switch val.Kind() {
+	case reflect.Float32:
+		if !ec.Truncate && float64(float32(f)) != f {
+			return errors.New("FloatDecodeValue can only convert float64 to float32 when truncation is allowed")
+		}
+	case reflect.Float64:
+	default:
+		return ValueDecoderError{Name: "FloatDecodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+	}
+
+	val.SetFloat(f)
+	return nil
+}
+
+// StringDecodeValue is the ValueDecoderFunc for string types.
+func (dvd DefaultValueDecoders) StringDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	var str string
+	var err error
+	switch vr.Type() {
+	// TODO(GODRIVER-577): Handle JavaScript and Symbol BSON types when allowed.
+	case bsontype.String:
+		str, err = vr.ReadString()
+		if err != nil {
+			return err
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a string type", vr.Type())
+	}
+	if !val.CanSet() || val.Kind() != reflect.String {
+		return ValueDecoderError{Name: "StringDecodeValue", Kinds: []reflect.Kind{reflect.String}, Received: val}
+	}
+
+	val.SetString(str)
+	return nil
+}
+
+// JavaScriptDecodeValue is the ValueDecoderFunc for the primitive.JavaScript type.
+func (DefaultValueDecoders) JavaScriptDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJavaScript {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	if vr.Type() != bsontype.JavaScript {
+		return fmt.Errorf("cannot decode %v into a primitive.JavaScript", vr.Type())
+	}
+
+	js, err := vr.ReadJavascript()
+	if err != nil {
+		return err
+	}
+
+	val.SetString(js)
+	return nil
+}
+
+// SymbolDecodeValue is the ValueDecoderFunc for the primitive.Symbol type.
+func (DefaultValueDecoders) SymbolDecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tSymbol {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Symbol {
+		return fmt.Errorf("cannot decode %v into a primitive.Symbol", vr.Type())
+	}
+
+	symbol, err := vr.ReadSymbol()
+	if err != nil {
+		return err
+	}
+
+	val.SetString(symbol)
+	return nil
+}
+
+// BinaryDecodeValue is the ValueDecoderFunc for Binary.
+func (DefaultValueDecoders) BinaryDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tBinary {
+		return ValueDecoderError{Name: "BinaryDecodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Binary {
+		return fmt.Errorf("cannot decode %v into a Binary", vr.Type())
+	}
+
+	data, subtype, err := vr.ReadBinary()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Binary{Subtype: subtype, Data: data}))
+	return nil
+}
+
+// UndefinedDecodeValue is the ValueDecoderFunc for Undefined.
+func (DefaultValueDecoders) UndefinedDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tUndefined {
+		return ValueDecoderError{Name: "UndefinedDecodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Undefined {
+		return fmt.Errorf("cannot decode %v into an Undefined", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.Undefined{}))
+	return vr.ReadUndefined()
+}
+
+// ObjectIDDecodeValue is the ValueDecoderFunc for primitive.ObjectID.
+func (dvd DefaultValueDecoders) ObjectIDDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tOID {
+		return ValueDecoderError{Name: "ObjectIDDecodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+
+	if vr.Type() != bsontype.ObjectID {
+		return fmt.Errorf("cannot decode %v into an ObjectID", vr.Type())
+	}
+	oid, err := vr.ReadObjectID()
+	val.Set(reflect.ValueOf(oid))
+	return err
+}
+
+// DateTimeDecodeValue is the ValueDecoderFunc for DateTime.
+func (DefaultValueDecoders) DateTimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDateTime {
+		return ValueDecoderError{Name: "DateTimeDecodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	if vr.Type() != bsontype.DateTime {
+		return fmt.Errorf("cannot decode %v into a DateTime", vr.Type())
+	}
+
+	dt, err := vr.ReadDateTime()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.DateTime(dt)))
+	return nil
+}
+
+// NullDecodeValue is the ValueDecoderFunc for Null.
+func (DefaultValueDecoders) NullDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tNull {
+		return ValueDecoderError{Name: "NullDecodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Null {
+		return fmt.Errorf("cannot decode %v into a Null", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.Null{}))
+	return vr.ReadNull()
+}
+
+// RegexDecodeValue is the ValueDecoderFunc for Regex.
+func (DefaultValueDecoders) RegexDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRegex {
+		return ValueDecoderError{Name: "RegexDecodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Regex {
+		return fmt.Errorf("cannot decode %v into a Regex", vr.Type())
+	}
+
+	pattern, options, err := vr.ReadRegex()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Regex{Pattern: pattern, Options: options}))
+	return nil
+}
+
+// DBPointerDecodeValue is the ValueDecoderFunc for DBPointer.
+func (DefaultValueDecoders) DBPointerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDBPointer {
+		return ValueDecoderError{Name: "DBPointerDecodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	if vr.Type() != bsontype.DBPointer {
+		return fmt.Errorf("cannot decode %v into a DBPointer", vr.Type())
+	}
+
+	ns, pointer, err := vr.ReadDBPointer()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.DBPointer{DB: ns, Pointer: pointer}))
+	return nil
+}
+
+// TimestampDecodeValue is the ValueDecoderFunc for Timestamp.
+func (DefaultValueDecoders) TimestampDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tTimestamp {
+		return ValueDecoderError{Name: "TimestampDecodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	if vr.Type() != bsontype.Timestamp {
+		return fmt.Errorf("cannot decode %v into a Timestamp", vr.Type())
+	}
+
+	t, incr, err := vr.ReadTimestamp()
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(primitive.Timestamp{T: t, I: incr}))
+	return nil
+}
+
+// MinKeyDecodeValue is the ValueDecoderFunc for MinKey.
+func (DefaultValueDecoders) MinKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMinKey {
+		return ValueDecoderError{Name: "MinKeyDecodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	if vr.Type() != bsontype.MinKey {
+		return fmt.Errorf("cannot decode %v into a MinKey", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.MinKey{}))
+	return vr.ReadMinKey()
+}
+
+// MaxKeyDecodeValue is the ValueDecoderFunc for MaxKey.
+func (DefaultValueDecoders) MaxKeyDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tMaxKey {
+		return ValueDecoderError{Name: "MaxKeyDecodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	if vr.Type() != bsontype.MaxKey {
+		return fmt.Errorf("cannot decode %v into a MaxKey", vr.Type())
+	}
+
+	val.Set(reflect.ValueOf(primitive.MaxKey{}))
+	return vr.ReadMaxKey()
+}
+
+// Decimal128DecodeValue is the ValueDecoderFunc for primitive.Decimal128.
+func (dvd DefaultValueDecoders) Decimal128DecodeValue(dctx DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Decimal128 {
+		return fmt.Errorf("cannot decode %v into a primitive.Decimal128", vr.Type())
+	}
+
+	if !val.CanSet() || val.Type() != tDecimal {
+		return ValueDecoderError{Name: "Decimal128DecodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+	d128, err := vr.ReadDecimal128()
+	val.Set(reflect.ValueOf(d128))
+	return err
+}
+
+// JSONNumberDecodeValue is the ValueDecoderFunc for json.Number.
+func (dvd DefaultValueDecoders) JSONNumberDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tJSONNumber {
+		return ValueDecoderError{Name: "JSONNumberDecodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatFloat(f64, 'g', -1, 64))))
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(int64(i32), 10))))
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		val.Set(reflect.ValueOf(json.Number(strconv.FormatInt(i64, 10))))
+	default:
+		return fmt.Errorf("cannot decode %v into a json.Number", vr.Type())
+	}
+
+	return nil
+}
+
+// URLDecodeValue is the ValueDecoderFunc for url.URL.
+func (dvd DefaultValueDecoders) URLDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.String {
+		return fmt.Errorf("cannot decode %v into a *url.URL", vr.Type())
+	}
+
+	str, err := vr.ReadString()
+	if err != nil {
+		return err
+	}
+
+	u, err := url.Parse(str)
+	if err != nil {
+		return err
+	}
+
+	if !val.CanSet() || val.Type() != tURL {
+		return ValueDecoderError{Name: "URLDecodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+
+	val.Set(reflect.ValueOf(u).Elem())
+	return nil
+}
+
+// TimeDecodeValue is the ValueDecoderFunc for time.Time.
+func (dvd DefaultValueDecoders) TimeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.DateTime {
+		return fmt.Errorf("cannot decode %v into a time.Time", vr.Type())
+	}
+
+	dt, err := vr.ReadDateTime()
+	if err != nil {
+		return err
+	}
+
+	if !val.CanSet() || val.Type() != tTime {
+		return ValueDecoderError{Name: "TimeDecodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+
+	val.Set(reflect.ValueOf(time.Unix(dt/1000, dt%1000*1000000)))
+	return nil
+}
+
+// ByteSliceDecodeValue is the ValueDecoderFunc for []byte.
+func (dvd DefaultValueDecoders) ByteSliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if vr.Type() != bsontype.Binary && vr.Type() != bsontype.Null {
+		return fmt.Errorf("cannot decode %v into a []byte", vr.Type())
+	}
+
+	if !val.CanSet() || val.Type() != tByteSlice {
+		return ValueDecoderError{Name: "ByteSliceDecodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	}
+
+	data, subtype, err := vr.ReadBinary()
+	if err != nil {
+		return err
+	}
+	if subtype != 0x00 {
+		return fmt.Errorf("ByteSliceDecodeValue can only be used to decode subtype 0x00 for %s, got %v", bsontype.Binary, subtype)
+	}
+
+	val.Set(reflect.ValueOf(data))
+	return nil
+}
+
+// MapDecodeValue is the ValueDecoderFunc for map[string]* types.
+func (dvd DefaultValueDecoders) MapDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueDecoderError{Name: "MapDecodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeMap(val.Type()))
+	}
+
+	eType := val.Type().Elem()
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return err
+	}
+
+	if eType == tEmpty {
+		dc.Ancestor = val.Type()
+	}
+
+	keyType := val.Type().Key()
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		elem := reflect.New(eType).Elem()
+
+		err = decoder.DecodeValue(dc, vr, elem)
+		if err != nil {
+			return err
+		}
+
+		val.SetMapIndex(reflect.ValueOf(key).Convert(keyType), elem)
+	}
+	return nil
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for array types.
+func (dvd DefaultValueDecoders) ArrayDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueDecoderError{Name: "ArrayDecodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Array:
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into an array", vr.Type())
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if len(elems) > val.Len() {
+		return fmt.Errorf("more elements returned in array than can fit inside %s", val.Type())
+	}
+
+	for idx, elem := range elems {
+		val.Index(idx).Set(elem)
+	}
+
+	return nil
+}
+
+// SliceDecodeValue is the ValueDecoderFunc for slice types.
+func (dvd DefaultValueDecoders) SliceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Slice {
+		return ValueDecoderError{Name: "SliceDecodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Array:
+	case bsontype.Null:
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+		if val.Type().Elem() != tE {
+			return fmt.Errorf("cannot decode document into %s", val.Type())
+		}
+	default:
+		return fmt.Errorf("cannot decode %v into a slice", vr.Type())
+	}
+
+	var elemsFunc func(DecodeContext, bsonrw.ValueReader, reflect.Value) ([]reflect.Value, error)
+	switch val.Type().Elem() {
+	case tE:
+		dc.Ancestor = val.Type()
+		elemsFunc = dvd.decodeD
+	default:
+		elemsFunc = dvd.decodeDefault
+	}
+
+	elems, err := elemsFunc(dc, vr, val)
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, len(elems)))
+	}
+
+	val.SetLen(0)
+	val.Set(reflect.Append(val, elems...))
+
+	return nil
+}
+
+// ValueUnmarshalerDecodeValue is the ValueDecoderFunc for ValueUnmarshaler implementations.
+func (dvd DefaultValueDecoders) ValueUnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tValueUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tValueUnmarshaler)) {
+		return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	if !val.Type().Implements(tValueUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "ValueUnmarshalerDecodeValue", Types: []reflect.Type{tValueUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If they type doesn't implement the interface, a pointer to it must.
+	}
+
+	t, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	fn := val.Convert(tValueUnmarshaler).MethodByName("UnmarshalBSONValue")
+	errVal := fn.Call([]reflect.Value{reflect.ValueOf(t), reflect.ValueOf(src)})[0]
+	if !errVal.IsNil() {
+		return errVal.Interface().(error)
+	}
+	return nil
+}
+
+// UnmarshalerDecodeValue is the ValueDecoderFunc for Unmarshaler implementations.
+func (dvd DefaultValueDecoders) UnmarshalerDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.IsValid() || (!val.Type().Implements(tUnmarshaler) && !reflect.PtrTo(val.Type()).Implements(tUnmarshaler)) {
+		return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+	}
+
+	if val.Kind() == reflect.Ptr && val.IsNil() {
+		if !val.CanSet() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	if !val.Type().Implements(tUnmarshaler) {
+		if !val.CanAddr() {
+			return ValueDecoderError{Name: "UnmarshalerDecodeValue", Types: []reflect.Type{tUnmarshaler}, Received: val}
+		}
+		val = val.Addr() // If they type doesn't implement the interface, a pointer to it must.
+	}
+
+	_, src, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	fn := val.Convert(tUnmarshaler).MethodByName("UnmarshalBSON")
+	errVal := fn.Call([]reflect.Value{reflect.ValueOf(src)})[0]
+	if !errVal.IsNil() {
+		return errVal.Interface().(error)
+	}
+	return nil
+}
+
+// EmptyInterfaceDecodeValue is the ValueDecoderFunc for interface{}.
+func (dvd DefaultValueDecoders) EmptyInterfaceDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tEmpty {
+		return ValueDecoderError{Name: "EmptyInterfaceDecodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	rtype, err := dc.LookupTypeMapEntry(vr.Type())
+	if err != nil {
+		switch vr.Type() {
+		case bsontype.EmbeddedDocument:
+			if dc.Ancestor != nil {
+				rtype = dc.Ancestor
+				break
+			}
+			rtype = tD
+		case bsontype.Null:
+			val.Set(reflect.Zero(val.Type()))
+			return vr.ReadNull()
+		default:
+			return err
+		}
+	}
+
+	decoder, err := dc.LookupDecoder(rtype)
+	if err != nil {
+		return err
+	}
+
+	elem := reflect.New(rtype).Elem()
+	err = decoder.DecodeValue(dc, vr, elem)
+	if err != nil {
+		return err
+	}
+
+	val.Set(elem)
+	return nil
+}
+
+// CoreDocumentDecodeValue is the ValueDecoderFunc for bsoncore.Document.
+func (DefaultValueDecoders) CoreDocumentDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCoreDocument {
+		return ValueDecoderError{Name: "CoreDocumentDecodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	cdoc, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(bsoncore.Document), vr)
+	val.Set(reflect.ValueOf(cdoc))
+	return err
+}
+
+func (dvd DefaultValueDecoders) decodeDefault(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) ([]reflect.Value, error) {
+	elems := make([]reflect.Value, 0)
+
+	ar, err := vr.ReadArray()
+	if err != nil {
+		return nil, err
+	}
+
+	eType := val.Type().Elem()
+
+	decoder, err := dc.LookupDecoder(eType)
+	if err != nil {
+		return nil, err
+	}
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == bsonrw.ErrEOA {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		elem := reflect.New(eType).Elem()
+
+		err = decoder.DecodeValue(dc, vr, elem)
+		if err != nil {
+			return nil, err
+		}
+		elems = append(elems, elem)
+	}
+
+	return elems, nil
+}
+
+// CodeWithScopeDecodeValue is the ValueDecoderFunc for CodeWithScope.
+func (dvd DefaultValueDecoders) CodeWithScopeDecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tCodeWithScope {
+		return ValueDecoderError{Name: "CodeWithScopeDecodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	if vr.Type() != bsontype.CodeWithScope {
+		return fmt.Errorf("cannot decode %v into a primitive.CodeWithScope", vr.Type())
+	}
+
+	code, dr, err := vr.ReadCodeWithScope()
+	if err != nil {
+		return err
+	}
+
+	scope := reflect.New(tD).Elem()
+
+	elems, err := dvd.decodeElemsFromDocumentReader(dc, dr)
+	if err != nil {
+		return err
+	}
+
+	scope.Set(reflect.MakeSlice(tD, 0, len(elems)))
+	scope.Set(reflect.Append(scope, elems...))
+
+	val.Set(reflect.ValueOf(primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope.Interface().(primitive.D)}))
+	return nil
+}
+
+func (dvd DefaultValueDecoders) decodeD(dc DecodeContext, vr bsonrw.ValueReader, _ reflect.Value) ([]reflect.Value, error) {
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	default:
+		return nil, fmt.Errorf("cannot decode %v into a D", vr.Type())
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return dvd.decodeElemsFromDocumentReader(dc, dr)
+}
+
+func (DefaultValueDecoders) decodeElemsFromDocumentReader(dc DecodeContext, dr bsonrw.DocumentReader) ([]reflect.Value, error) {
+	decoder, err := dc.LookupDecoder(tEmpty)
+	if err != nil {
+		return nil, err
+	}
+
+	elems := make([]reflect.Value, 0)
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return nil, err
+		}
+
+		val := reflect.New(tEmpty).Elem()
+		err = decoder.DecodeValue(dc, vr, val)
+		if err != nil {
+			return nil, err
+		}
+
+		elems = append(elems, reflect.ValueOf(primitive.E{Key: key, Value: val.Interface()}))
+	}
+
+	return elems, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go
new file mode 100644
index 0000000..61295bd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/default_value_encoders.go
@@ -0,0 +1,648 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"sync"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var defaultValueEncoders DefaultValueEncoders
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+
+var sliceWriterPool = sync.Pool{
+	New: func() interface{} {
+		sw := make(bsonrw.SliceWriter, 0, 0)
+		return &sw
+	},
+}
+
+func encodeElement(ec EncodeContext, dw bsonrw.DocumentWriter, e primitive.E) error {
+	vw, err := dw.WriteDocumentElement(e.Key)
+	if err != nil {
+		return err
+	}
+
+	if e.Value == nil {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(e.Value))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, vw, reflect.ValueOf(e.Value))
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// DefaultValueEncoders is a namespace type for the default ValueEncoders used
+// when creating a registry.
+type DefaultValueEncoders struct{}
+
+// RegisterDefaultEncoders will register the encoder methods attached to DefaultValueEncoders with
+// the provided RegistryBuilder.
+func (dve DefaultValueEncoders) RegisterDefaultEncoders(rb *RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterDefaultEncoders must not be nil"))
+	}
+	rb.
+		RegisterEncoder(tByteSlice, ValueEncoderFunc(dve.ByteSliceEncodeValue)).
+		RegisterEncoder(tTime, ValueEncoderFunc(dve.TimeEncodeValue)).
+		RegisterEncoder(tEmpty, ValueEncoderFunc(dve.EmptyInterfaceEncodeValue)).
+		RegisterEncoder(tOID, ValueEncoderFunc(dve.ObjectIDEncodeValue)).
+		RegisterEncoder(tDecimal, ValueEncoderFunc(dve.Decimal128EncodeValue)).
+		RegisterEncoder(tJSONNumber, ValueEncoderFunc(dve.JSONNumberEncodeValue)).
+		RegisterEncoder(tURL, ValueEncoderFunc(dve.URLEncodeValue)).
+		RegisterEncoder(tValueMarshaler, ValueEncoderFunc(dve.ValueMarshalerEncodeValue)).
+		RegisterEncoder(tMarshaler, ValueEncoderFunc(dve.MarshalerEncodeValue)).
+		RegisterEncoder(tProxy, ValueEncoderFunc(dve.ProxyEncodeValue)).
+		RegisterEncoder(tJavaScript, ValueEncoderFunc(dve.JavaScriptEncodeValue)).
+		RegisterEncoder(tSymbol, ValueEncoderFunc(dve.SymbolEncodeValue)).
+		RegisterEncoder(tBinary, ValueEncoderFunc(dve.BinaryEncodeValue)).
+		RegisterEncoder(tUndefined, ValueEncoderFunc(dve.UndefinedEncodeValue)).
+		RegisterEncoder(tDateTime, ValueEncoderFunc(dve.DateTimeEncodeValue)).
+		RegisterEncoder(tNull, ValueEncoderFunc(dve.NullEncodeValue)).
+		RegisterEncoder(tRegex, ValueEncoderFunc(dve.RegexEncodeValue)).
+		RegisterEncoder(tDBPointer, ValueEncoderFunc(dve.DBPointerEncodeValue)).
+		RegisterEncoder(tTimestamp, ValueEncoderFunc(dve.TimestampEncodeValue)).
+		RegisterEncoder(tMinKey, ValueEncoderFunc(dve.MinKeyEncodeValue)).
+		RegisterEncoder(tMaxKey, ValueEncoderFunc(dve.MaxKeyEncodeValue)).
+		RegisterEncoder(tCoreDocument, ValueEncoderFunc(dve.CoreDocumentEncodeValue)).
+		RegisterEncoder(tCodeWithScope, ValueEncoderFunc(dve.CodeWithScopeEncodeValue)).
+		RegisterDefaultEncoder(reflect.Bool, ValueEncoderFunc(dve.BooleanEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int8, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int16, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int32, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Int64, ValueEncoderFunc(dve.IntEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint8, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint16, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint32, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Uint64, ValueEncoderFunc(dve.UintEncodeValue)).
+		RegisterDefaultEncoder(reflect.Float32, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Float64, ValueEncoderFunc(dve.FloatEncodeValue)).
+		RegisterDefaultEncoder(reflect.Array, ValueEncoderFunc(dve.ArrayEncodeValue)).
+		RegisterDefaultEncoder(reflect.Map, ValueEncoderFunc(dve.MapEncodeValue)).
+		RegisterDefaultEncoder(reflect.Slice, ValueEncoderFunc(dve.SliceEncodeValue)).
+		RegisterDefaultEncoder(reflect.String, ValueEncoderFunc(dve.StringEncodeValue)).
+		RegisterDefaultEncoder(reflect.Struct, &StructCodec{cache: make(map[reflect.Type]*structDescription), parser: DefaultStructTagParser}).
+		RegisterDefaultEncoder(reflect.Ptr, NewPointerCodec())
+}
+
+// BooleanEncodeValue is the ValueEncoderFunc for bool types.
+func (dve DefaultValueEncoders) BooleanEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Bool {
+		return ValueEncoderError{Name: "BooleanEncodeValue", Kinds: []reflect.Kind{reflect.Bool}, Received: val}
+	}
+	return vw.WriteBoolean(val.Bool())
+}
+
+func fitsIn32Bits(i int64) bool {
+	return math.MinInt32 <= i && i <= math.MaxInt32
+}
+
+// IntEncodeValue is the ValueEncoderFunc for int types.
+func (dve DefaultValueEncoders) IntEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Int8, reflect.Int16, reflect.Int32:
+		return vw.WriteInt32(int32(val.Int()))
+	case reflect.Int:
+		i64 := val.Int()
+		if fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	case reflect.Int64:
+		i64 := val.Int()
+		if ec.MinSize && fitsIn32Bits(i64) {
+			return vw.WriteInt32(int32(i64))
+		}
+		return vw.WriteInt64(i64)
+	}
+
+	return ValueEncoderError{
+		Name:     "IntEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int},
+		Received: val,
+	}
+}
+
+// UintEncodeValue is the ValueEncoderFunc for uint types.
+func (dve DefaultValueEncoders) UintEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Uint8, reflect.Uint16:
+		return vw.WriteInt32(int32(val.Uint()))
+	case reflect.Uint, reflect.Uint32, reflect.Uint64:
+		u64 := val.Uint()
+		if ec.MinSize && u64 <= math.MaxInt32 {
+			return vw.WriteInt32(int32(u64))
+		}
+		if u64 > math.MaxInt64 {
+			return fmt.Errorf("%d overflows int64", u64)
+		}
+		return vw.WriteInt64(int64(u64))
+	}
+
+	return ValueEncoderError{
+		Name:     "UintEncodeValue",
+		Kinds:    []reflect.Kind{reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint},
+		Received: val,
+	}
+}
+
+// FloatEncodeValue is the ValueEncoderFunc for float types.
+func (dve DefaultValueEncoders) FloatEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	switch val.Kind() {
+	case reflect.Float32, reflect.Float64:
+		return vw.WriteDouble(val.Float())
+	}
+
+	return ValueEncoderError{Name: "FloatEncodeValue", Kinds: []reflect.Kind{reflect.Float32, reflect.Float64}, Received: val}
+}
+
+// StringEncodeValue is the ValueEncoderFunc for string types.
+func (dve DefaultValueEncoders) StringEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.String {
+		return ValueEncoderError{
+			Name:     "StringEncodeValue",
+			Kinds:    []reflect.Kind{reflect.String},
+			Received: val,
+		}
+	}
+
+	return vw.WriteString(val.String())
+}
+
+// ObjectIDEncodeValue is the ValueEncoderFunc for primitive.ObjectID.
+func (dve DefaultValueEncoders) ObjectIDEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tOID {
+		return ValueEncoderError{Name: "ObjectIDEncodeValue", Types: []reflect.Type{tOID}, Received: val}
+	}
+	return vw.WriteObjectID(val.Interface().(primitive.ObjectID))
+}
+
+// Decimal128EncodeValue is the ValueEncoderFunc for primitive.Decimal128.
+func (dve DefaultValueEncoders) Decimal128EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDecimal {
+		return ValueEncoderError{Name: "Decimal128EncodeValue", Types: []reflect.Type{tDecimal}, Received: val}
+	}
+	return vw.WriteDecimal128(val.Interface().(primitive.Decimal128))
+}
+
+// JSONNumberEncodeValue is the ValueEncoderFunc for json.Number.
+func (dve DefaultValueEncoders) JSONNumberEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJSONNumber {
+		return ValueEncoderError{Name: "JSONNumberEncodeValue", Types: []reflect.Type{tJSONNumber}, Received: val}
+	}
+	jsnum := val.Interface().(json.Number)
+
+	// Attempt int first, then float64
+	if i64, err := jsnum.Int64(); err == nil {
+		return dve.IntEncodeValue(ec, vw, reflect.ValueOf(i64))
+	}
+
+	f64, err := jsnum.Float64()
+	if err != nil {
+		return err
+	}
+
+	return dve.FloatEncodeValue(ec, vw, reflect.ValueOf(f64))
+}
+
+// URLEncodeValue is the ValueEncoderFunc for url.URL.
+func (dve DefaultValueEncoders) URLEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tURL {
+		return ValueEncoderError{Name: "URLEncodeValue", Types: []reflect.Type{tURL}, Received: val}
+	}
+	u := val.Interface().(url.URL)
+	return vw.WriteString(u.String())
+}
+
+// TimeEncodeValue is the ValueEncoderFunc for time.TIme.
+func (dve DefaultValueEncoders) TimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTime {
+		return ValueEncoderError{Name: "TimeEncodeValue", Types: []reflect.Type{tTime}, Received: val}
+	}
+	tt := val.Interface().(time.Time)
+	return vw.WriteDateTime(tt.Unix()*1000 + int64(tt.Nanosecond()/1e6))
+}
+
+// ByteSliceEncodeValue is the ValueEncoderFunc for []byte.
+func (dve DefaultValueEncoders) ByteSliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tByteSlice {
+		return ValueEncoderError{Name: "ByteSliceEncodeValue", Types: []reflect.Type{tByteSlice}, Received: val}
+	}
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	return vw.WriteBinary(val.Interface().([]byte))
+}
+
+// MapEncodeValue is the ValueEncoderFunc for map[string]* types.
+func (dve DefaultValueEncoders) MapEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Map || val.Type().Key().Kind() != reflect.String {
+		return ValueEncoderError{Name: "MapEncodeValue", Kinds: []reflect.Kind{reflect.Map}, Received: val}
+	}
+
+	if val.IsNil() {
+		// If we have a nill map but we can't WriteNull, that means we're probably trying to encode
+		// to a TopLevel document. We can't currently tell if this is what actually happened, but if
+		// there's a deeper underlying problem, the error will also be returned from WriteDocument,
+		// so just continue. The operations on a map reflection value are valid, so we can call
+		// MapKeys within mapEncodeValue without a problem.
+		err := vw.WriteNull()
+		if err == nil {
+			return nil
+		}
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return dve.mapEncodeValue(ec, dw, val, nil)
+}
+
+// mapEncodeValue handles encoding of the values of a map. The collisionFn returns
+// true if the provided key exists, this is mainly used for inline maps in the
+// struct codec.
+func (dve DefaultValueEncoders) mapEncodeValue(ec EncodeContext, dw bsonrw.DocumentWriter, val reflect.Value, collisionFn func(string) bool) error {
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	keys := val.MapKeys()
+	for _, key := range keys {
+		if collisionFn != nil && collisionFn(key.String()) {
+			return fmt.Errorf("Key %s of inlined map conflicts with a struct field name", key)
+		}
+		vw, err := dw.WriteDocumentElement(key.String())
+		if err != nil {
+			return err
+		}
+
+		if enc, ok := encoder.(ValueEncoder); ok {
+			err = enc.EncodeValue(ec, vw, val.MapIndex(key))
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		err = encoder.EncodeValue(ec, vw, val.MapIndex(key))
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for array types.
+func (dve DefaultValueEncoders) ArrayEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Array {
+		return ValueEncoderError{Name: "ArrayEncodeValue", Kinds: []reflect.Kind{reflect.Array}, Received: val}
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().Elem() == tE {
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for idx := 0; idx < val.Len(); idx++ {
+			e := val.Index(idx).Interface().(primitive.E)
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = encoder.EncodeValue(ec, vw, val.Index(idx))
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// SliceEncodeValue is the ValueEncoderFunc for slice types.
+func (dve DefaultValueEncoders) SliceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Slice {
+		return ValueEncoderError{Name: "SliceEncodeValue", Kinds: []reflect.Kind{reflect.Slice}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	// If we have a []primitive.E we want to treat it as a document instead of as an array.
+	if val.Type().ConvertibleTo(tD) {
+		d := val.Convert(tD).Interface().(primitive.D)
+
+		dw, err := vw.WriteDocument()
+		if err != nil {
+			return err
+		}
+
+		for _, e := range d {
+			err = encodeElement(ec, dw, e)
+			if err != nil {
+				return err
+			}
+		}
+
+		return dw.WriteDocumentEnd()
+	}
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	encoder, err := ec.LookupEncoder(val.Type().Elem())
+	if err != nil {
+		return err
+	}
+
+	for idx := 0; idx < val.Len(); idx++ {
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = encoder.EncodeValue(ec, vw, val.Index(idx))
+		if err != nil {
+			return err
+		}
+	}
+	return aw.WriteArrayEnd()
+}
+
+// EmptyInterfaceEncodeValue is the ValueEncoderFunc for interface{}.
+func (dve DefaultValueEncoders) EmptyInterfaceEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tEmpty {
+		return ValueEncoderError{Name: "EmptyInterfaceEncodeValue", Types: []reflect.Type{tEmpty}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+	encoder, err := ec.LookupEncoder(val.Elem().Type())
+	if err != nil {
+		return err
+	}
+
+	return encoder.EncodeValue(ec, vw, val.Elem())
+}
+
+// ValueMarshalerEncodeValue is the ValueEncoderFunc for ValueMarshaler implementations.
+func (dve DefaultValueEncoders) ValueMarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tValueMarshaler) {
+		return ValueEncoderError{Name: "ValueMarshalerEncodeValue", Types: []reflect.Type{tValueMarshaler}, Received: val}
+	}
+
+	fn := val.Convert(tValueMarshaler).MethodByName("MarshalBSONValue")
+	returns := fn.Call(nil)
+	if !returns[2].IsNil() {
+		return returns[2].Interface().(error)
+	}
+	t, data := returns[0].Interface().(bsontype.Type), returns[1].Interface().([]byte)
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, t, data)
+}
+
+// MarshalerEncodeValue is the ValueEncoderFunc for Marshaler implementations.
+func (dve DefaultValueEncoders) MarshalerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tMarshaler) {
+		return ValueEncoderError{Name: "MarshalerEncodeValue", Types: []reflect.Type{tMarshaler}, Received: val}
+	}
+
+	fn := val.Convert(tMarshaler).MethodByName("MarshalBSON")
+	returns := fn.Call(nil)
+	if !returns[1].IsNil() {
+		return returns[1].Interface().(error)
+	}
+	data := returns[0].Interface().([]byte)
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, bsontype.EmbeddedDocument, data)
+}
+
+// ProxyEncodeValue is the ValueEncoderFunc for Proxy implementations.
+func (dve DefaultValueEncoders) ProxyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || !val.Type().Implements(tProxy) {
+		return ValueEncoderError{Name: "ProxyEncodeValue", Types: []reflect.Type{tProxy}, Received: val}
+	}
+
+	fn := val.Convert(tProxy).MethodByName("ProxyBSON")
+	returns := fn.Call(nil)
+	if !returns[1].IsNil() {
+		return returns[1].Interface().(error)
+	}
+	data := returns[0]
+	var encoder ValueEncoder
+	var err error
+	if data.Elem().IsValid() {
+		encoder, err = ec.LookupEncoder(data.Elem().Type())
+	} else {
+		encoder, err = ec.LookupEncoder(nil)
+	}
+	if err != nil {
+		return err
+	}
+	return encoder.EncodeValue(ec, vw, data.Elem())
+}
+
+// JavaScriptEncodeValue is the ValueEncoderFunc for the primitive.JavaScript type.
+func (DefaultValueEncoders) JavaScriptEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tJavaScript {
+		return ValueEncoderError{Name: "JavaScriptEncodeValue", Types: []reflect.Type{tJavaScript}, Received: val}
+	}
+
+	return vw.WriteJavascript(val.String())
+}
+
+// SymbolEncodeValue is the ValueEncoderFunc for the primitive.Symbol type.
+func (DefaultValueEncoders) SymbolEncodeValue(ectx EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tSymbol {
+		return ValueEncoderError{Name: "SymbolEncodeValue", Types: []reflect.Type{tSymbol}, Received: val}
+	}
+
+	return vw.WriteSymbol(val.String())
+}
+
+// BinaryEncodeValue is the ValueEncoderFunc for Binary.
+func (DefaultValueEncoders) BinaryEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tBinary {
+		return ValueEncoderError{Name: "BinaryEncodeValue", Types: []reflect.Type{tBinary}, Received: val}
+	}
+	b := val.Interface().(primitive.Binary)
+
+	return vw.WriteBinaryWithSubtype(b.Data, b.Subtype)
+}
+
+// UndefinedEncodeValue is the ValueEncoderFunc for Undefined.
+func (DefaultValueEncoders) UndefinedEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tUndefined {
+		return ValueEncoderError{Name: "UndefinedEncodeValue", Types: []reflect.Type{tUndefined}, Received: val}
+	}
+
+	return vw.WriteUndefined()
+}
+
+// DateTimeEncodeValue is the ValueEncoderFunc for DateTime.
+func (DefaultValueEncoders) DateTimeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDateTime {
+		return ValueEncoderError{Name: "DateTimeEncodeValue", Types: []reflect.Type{tDateTime}, Received: val}
+	}
+
+	return vw.WriteDateTime(val.Int())
+}
+
+// NullEncodeValue is the ValueEncoderFunc for Null.
+func (DefaultValueEncoders) NullEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tNull {
+		return ValueEncoderError{Name: "NullEncodeValue", Types: []reflect.Type{tNull}, Received: val}
+	}
+
+	return vw.WriteNull()
+}
+
+// RegexEncodeValue is the ValueEncoderFunc for Regex.
+func (DefaultValueEncoders) RegexEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRegex {
+		return ValueEncoderError{Name: "RegexEncodeValue", Types: []reflect.Type{tRegex}, Received: val}
+	}
+
+	regex := val.Interface().(primitive.Regex)
+
+	return vw.WriteRegex(regex.Pattern, regex.Options)
+}
+
+// DBPointerEncodeValue is the ValueEncoderFunc for DBPointer.
+func (DefaultValueEncoders) DBPointerEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDBPointer {
+		return ValueEncoderError{Name: "DBPointerEncodeValue", Types: []reflect.Type{tDBPointer}, Received: val}
+	}
+
+	dbp := val.Interface().(primitive.DBPointer)
+
+	return vw.WriteDBPointer(dbp.DB, dbp.Pointer)
+}
+
+// TimestampEncodeValue is the ValueEncoderFunc for Timestamp.
+func (DefaultValueEncoders) TimestampEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tTimestamp {
+		return ValueEncoderError{Name: "TimestampEncodeValue", Types: []reflect.Type{tTimestamp}, Received: val}
+	}
+
+	ts := val.Interface().(primitive.Timestamp)
+
+	return vw.WriteTimestamp(ts.T, ts.I)
+}
+
+// MinKeyEncodeValue is the ValueEncoderFunc for MinKey.
+func (DefaultValueEncoders) MinKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMinKey {
+		return ValueEncoderError{Name: "MinKeyEncodeValue", Types: []reflect.Type{tMinKey}, Received: val}
+	}
+
+	return vw.WriteMinKey()
+}
+
+// MaxKeyEncodeValue is the ValueEncoderFunc for MaxKey.
+func (DefaultValueEncoders) MaxKeyEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tMaxKey {
+		return ValueEncoderError{Name: "MaxKeyEncodeValue", Types: []reflect.Type{tMaxKey}, Received: val}
+	}
+
+	return vw.WriteMaxKey()
+}
+
+// CoreDocumentEncodeValue is the ValueEncoderFunc for bsoncore.Document.
+func (DefaultValueEncoders) CoreDocumentEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCoreDocument {
+		return ValueEncoderError{Name: "CoreDocumentEncodeValue", Types: []reflect.Type{tCoreDocument}, Received: val}
+	}
+
+	cdoc := val.Interface().(bsoncore.Document)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, cdoc)
+}
+
+// CodeWithScopeEncodeValue is the ValueEncoderFunc for CodeWithScope.
+func (dve DefaultValueEncoders) CodeWithScopeEncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tCodeWithScope {
+		return ValueEncoderError{Name: "CodeWithScopeEncodeValue", Types: []reflect.Type{tCodeWithScope}, Received: val}
+	}
+
+	cws := val.Interface().(primitive.CodeWithScope)
+
+	dw, err := vw.WriteCodeWithScope(string(cws.Code))
+	if err != nil {
+		return err
+	}
+
+	sw := sliceWriterPool.Get().(*bsonrw.SliceWriter)
+	defer sliceWriterPool.Put(sw)
+	*sw = (*sw)[:0]
+
+	scopeVW := bvwPool.Get(sw)
+	defer bvwPool.Put(scopeVW)
+
+	encoder, err := ec.LookupEncoder(reflect.TypeOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = encoder.EncodeValue(ec, scopeVW, reflect.ValueOf(cws.Scope))
+	if err != nil {
+		return err
+	}
+
+	err = bsonrw.Copier{}.CopyBytesToDocumentWriter(dw, *sw)
+	if err != nil {
+		return err
+	}
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go
new file mode 100644
index 0000000..978511c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/doc.go
@@ -0,0 +1,61 @@
+// Package bsoncodec provides a system for encoding values to BSON representations and decoding
+// values from BSON representations. This package considers both binary BSON and ExtendedJSON as
+// BSON representations. The types in this package enable a flexible system for handling this
+// encoding and decoding.
+//
+// The codec system is composed of two parts:
+//
+// 1) ValueEncoders and ValueDecoders that handle encoding and decoding Go values to and from BSON
+// representations.
+//
+// 2) A Registry that holds these ValueEncoders and ValueDecoders and provides methods for
+// retrieving them.
+//
+// ValueEncoders and ValueDecoders
+//
+// The ValueEncoder interface is implemented by types that can encode a provided Go type to BSON.
+// The value to encode is provided as a reflect.Value and a bsonrw.ValueWriter is used within the
+// EncodeValue method to actually create the BSON representation. For convenience, ValueEncoderFunc
+// is provided to allow use of a function with the correct signature as a ValueEncoder. An
+// EncodeContext instance is provided to allow implementations to lookup further ValueEncoders and
+// to provide configuration information.
+//
+// The ValueDecoder interface is the inverse of the ValueEncoder. Implementations should ensure that
+// the value they receive is settable. Similar to ValueEncoderFunc, ValueDecoderFunc is provided to
+// allow the use of a function with the correct signature as a ValueDecoder. A DecodeContext
+// instance is provided and serves similar functionality to the EncodeContext.
+//
+// Registry and RegistryBuilder
+//
+// A Registry is an immutable store for ValueEncoders, ValueDecoders, and a type map. For looking up
+// ValueEncoders and Decoders the Registry first attempts to find a ValueEncoder or ValueDecoder for
+// the type provided; if one cannot be found it then checks to see if a registered ValueEncoder or
+// ValueDecoder exists for an interface the type implements. Finally, the reflect.Kind of the type
+// is used to lookup a default ValueEncoder or ValueDecoder for that kind. If no ValueEncoder or
+// ValueDecoder can be found, an error is returned.
+//
+// The Registry also holds a type map. This allows users to retrieve the Go type that should be used
+// when decoding a BSON value into an empty interface. This is primarily only used for the empty
+// interface ValueDecoder.
+//
+// A RegistryBuilder is used to construct a Registry. The Register methods are used to associate
+// either a reflect.Type or a reflect.Kind with a ValueEncoder or ValueDecoder. A RegistryBuilder
+// returned from NewRegistryBuilder contains no registered ValueEncoders nor ValueDecoders and
+// contains an empty type map.
+//
+// The RegisterTypeMapEntry method handles associating a BSON type with a Go type. For example, if
+// you want to decode BSON int64 and int32 values into Go int instances, you would do the following:
+//
+//  var regbuilder *RegistryBuilder = ... intType := reflect.TypeOf(int(0))
+//  regbuilder.RegisterTypeMapEntry(bsontype.Int64, intType).RegisterTypeMapEntry(bsontype.Int32,
+//  intType)
+//
+// DefaultValueEncoders and DefaultValueDecoders
+//
+// The DefaultValueEncoders and DefaultValueDecoders types provide a full set of ValueEncoders and
+// ValueDecoders for handling a wide range of Go types, including all of the types within the
+// primitive package. To make registering these codecs easier, a helper method on each type is
+// provided. For the DefaultValueEncoders type the method is called RegisterDefaultEncoders and for
+// the DefaultValueDecoders type the method is called RegisterDefaultDecoders, this method also
+// handles registering type map entries for each BSON type.
+package bsoncodec
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go
new file mode 100644
index 0000000..fbd9f0a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/mode.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import "fmt"
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+type TransitionError struct {
+	parent      mode
+	current     mode
+	destination mode
+}
+
+func (te TransitionError) Error() string {
+	if te.destination == mode(0) {
+		return fmt.Sprintf("invalid state transition: cannot read/write value while in %s", te.current)
+	}
+	if te.parent == mode(0) {
+		return fmt.Sprintf("invalid state transition: %s -> %s", te.current, te.destination)
+	}
+	return fmt.Sprintf("invalid state transition: %s -> %s; parent %s", te.current, te.destination, te.parent)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go
new file mode 100644
index 0000000..b78ec0a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/pointer_codec.go
@@ -0,0 +1,110 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var defaultPointerCodec = &PointerCodec{
+	ecache: make(map[reflect.Type]ValueEncoder),
+	dcache: make(map[reflect.Type]ValueDecoder),
+}
+
+var _ ValueEncoder = &PointerCodec{}
+var _ ValueDecoder = &PointerCodec{}
+
+// PointerCodec is the Codec used for pointers.
+type PointerCodec struct {
+	ecache map[reflect.Type]ValueEncoder
+	dcache map[reflect.Type]ValueDecoder
+	l      sync.RWMutex
+}
+
+// NewPointerCodec returns a PointerCodec that has been initialized.
+func NewPointerCodec() *PointerCodec {
+	return &PointerCodec{
+		ecache: make(map[reflect.Type]ValueEncoder),
+		dcache: make(map[reflect.Type]ValueDecoder),
+	}
+}
+
+// EncodeValue handles encoding a pointer by either encoding it to BSON Null if the pointer is nil
+// or looking up an encoder for the type of value the pointer points to.
+func (pc *PointerCodec) EncodeValue(ec EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if val.Kind() != reflect.Ptr {
+		if !val.IsValid() {
+			return vw.WriteNull()
+		}
+		return ValueEncoderError{Name: "PointerCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	pc.l.RLock()
+	enc, ok := pc.ecache[val.Type()]
+	pc.l.RUnlock()
+	if ok {
+		if enc == nil {
+			return ErrNoEncoder{Type: val.Type()}
+		}
+		return enc.EncodeValue(ec, vw, val.Elem())
+	}
+
+	enc, err := ec.LookupEncoder(val.Type().Elem())
+	pc.l.Lock()
+	pc.ecache[val.Type()] = enc
+	pc.l.Unlock()
+	if err != nil {
+		return err
+	}
+
+	return enc.EncodeValue(ec, vw, val.Elem())
+}
+
+// DecodeValue handles decoding a pointer by looking up a decoder for the type it points to and
+// using that to decode. If the BSON value is Null, this method will set the pointer to nil.
+func (pc *PointerCodec) DecodeValue(dc DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Ptr {
+		return ValueDecoderError{Name: "PointerCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Ptr}, Received: val}
+	}
+
+	if vr.Type() == bsontype.Null {
+		val.Set(reflect.Zero(val.Type()))
+		return vr.ReadNull()
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.New(val.Type().Elem()))
+	}
+
+	pc.l.RLock()
+	dec, ok := pc.dcache[val.Type()]
+	pc.l.RUnlock()
+	if ok {
+		if dec == nil {
+			return ErrNoDecoder{Type: val.Type()}
+		}
+		return dec.DecodeValue(dc, vr, val.Elem())
+	}
+
+	dec, err := dc.LookupDecoder(val.Type().Elem())
+	pc.l.Lock()
+	pc.dcache[val.Type()] = dec
+	pc.l.Unlock()
+	if err != nil {
+		return err
+	}
+
+	return dec.DecodeValue(dc, vr, val.Elem())
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go
new file mode 100644
index 0000000..4cf2b01
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/proxy.go
@@ -0,0 +1,14 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+// Proxy is an interface implemented by types that cannot themselves be directly encoded. Types
+// that implement this interface with have ProxyBSON called during the encoding process and that
+// value will be encoded in place for the implementer.
+type Proxy interface {
+	ProxyBSON() (interface{}, error)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go
new file mode 100644
index 0000000..741deb8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/registry.go
@@ -0,0 +1,384 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// ErrNilType is returned when nil is passed to either LookupEncoder or LookupDecoder.
+var ErrNilType = errors.New("cannot perform a decoder lookup on <nil>")
+
+// ErrNotPointer is returned when a non-pointer type is provided to LookupDecoder.
+var ErrNotPointer = errors.New("non-pointer provided to LookupDecoder")
+
+// ErrNoEncoder is returned when there wasn't an encoder available for a type.
+type ErrNoEncoder struct {
+	Type reflect.Type
+}
+
+func (ene ErrNoEncoder) Error() string {
+	if ene.Type == nil {
+		return "no encoder found for <nil>"
+	}
+	return "no encoder found for " + ene.Type.String()
+}
+
+// ErrNoDecoder is returned when there wasn't a decoder available for a type.
+type ErrNoDecoder struct {
+	Type reflect.Type
+}
+
+func (end ErrNoDecoder) Error() string {
+	return "no decoder found for " + end.Type.String()
+}
+
+// ErrNoTypeMapEntry is returned when there wasn't a type available for the provided BSON type.
+type ErrNoTypeMapEntry struct {
+	Type bsontype.Type
+}
+
+func (entme ErrNoTypeMapEntry) Error() string {
+	return "no type map entry found for " + entme.Type.String()
+}
+
+// ErrNotInterface is returned when the provided type is not an interface.
+var ErrNotInterface = errors.New("The provided type is not an interface")
+
+var defaultRegistry *Registry
+
+func init() {
+	defaultRegistry = buildDefaultRegistry()
+}
+
+// A RegistryBuilder is used to build a Registry. This type is not goroutine
+// safe.
+type RegistryBuilder struct {
+	typeEncoders      map[reflect.Type]ValueEncoder
+	interfaceEncoders []interfaceValueEncoder
+	kindEncoders      map[reflect.Kind]ValueEncoder
+
+	typeDecoders      map[reflect.Type]ValueDecoder
+	interfaceDecoders []interfaceValueDecoder
+	kindDecoders      map[reflect.Kind]ValueDecoder
+
+	typeMap map[bsontype.Type]reflect.Type
+}
+
+// A Registry is used to store and retrieve codecs for types and interfaces. This type is the main
+// typed passed around and Encoders and Decoders are constructed from it.
+type Registry struct {
+	typeEncoders map[reflect.Type]ValueEncoder
+	typeDecoders map[reflect.Type]ValueDecoder
+
+	interfaceEncoders []interfaceValueEncoder
+	interfaceDecoders []interfaceValueDecoder
+
+	kindEncoders map[reflect.Kind]ValueEncoder
+	kindDecoders map[reflect.Kind]ValueDecoder
+
+	typeMap map[bsontype.Type]reflect.Type
+
+	mu sync.RWMutex
+}
+
+// NewRegistryBuilder creates a new empty RegistryBuilder.
+func NewRegistryBuilder() *RegistryBuilder {
+	return &RegistryBuilder{
+		typeEncoders: make(map[reflect.Type]ValueEncoder),
+		typeDecoders: make(map[reflect.Type]ValueDecoder),
+
+		interfaceEncoders: make([]interfaceValueEncoder, 0),
+		interfaceDecoders: make([]interfaceValueDecoder, 0),
+
+		kindEncoders: make(map[reflect.Kind]ValueEncoder),
+		kindDecoders: make(map[reflect.Kind]ValueDecoder),
+
+		typeMap: make(map[bsontype.Type]reflect.Type),
+	}
+}
+
+func buildDefaultRegistry() *Registry {
+	rb := NewRegistryBuilder()
+	defaultValueEncoders.RegisterDefaultEncoders(rb)
+	defaultValueDecoders.RegisterDefaultDecoders(rb)
+	return rb.Build()
+}
+
+// RegisterCodec will register the provided ValueCodec for the provided type.
+func (rb *RegistryBuilder) RegisterCodec(t reflect.Type, codec ValueCodec) *RegistryBuilder {
+	rb.RegisterEncoder(t, codec)
+	rb.RegisterDecoder(t, codec)
+	return rb
+}
+
+// RegisterEncoder will register the provided ValueEncoder to the provided type.
+//
+// The type registered will be used directly, so an encoder can be registered for a type and a
+// different encoder can be registered for a pointer to that type.
+func (rb *RegistryBuilder) RegisterEncoder(t reflect.Type, enc ValueEncoder) *RegistryBuilder {
+	if t == tEmpty {
+		rb.typeEncoders[t] = enc
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		for idx, ir := range rb.interfaceEncoders {
+			if ir.i == t {
+				rb.interfaceEncoders[idx].ve = enc
+				return rb
+			}
+		}
+
+		rb.interfaceEncoders = append(rb.interfaceEncoders, interfaceValueEncoder{i: t, ve: enc})
+	default:
+		rb.typeEncoders[t] = enc
+	}
+	return rb
+}
+
+// RegisterDecoder will register the provided ValueDecoder to the provided type.
+//
+// The type registered will be used directly, so a decoder can be registered for a type and a
+// different decoder can be registered for a pointer to that type.
+func (rb *RegistryBuilder) RegisterDecoder(t reflect.Type, dec ValueDecoder) *RegistryBuilder {
+	if t == nil {
+		rb.typeDecoders[nil] = dec
+		return rb
+	}
+	if t == tEmpty {
+		rb.typeDecoders[t] = dec
+		return rb
+	}
+	switch t.Kind() {
+	case reflect.Interface:
+		for idx, ir := range rb.interfaceDecoders {
+			if ir.i == t {
+				rb.interfaceDecoders[idx].vd = dec
+				return rb
+			}
+		}
+
+		rb.interfaceDecoders = append(rb.interfaceDecoders, interfaceValueDecoder{i: t, vd: dec})
+	default:
+		rb.typeDecoders[t] = dec
+	}
+	return rb
+}
+
+// RegisterDefaultEncoder will registr the provided ValueEncoder to the provided
+// kind.
+func (rb *RegistryBuilder) RegisterDefaultEncoder(kind reflect.Kind, enc ValueEncoder) *RegistryBuilder {
+	rb.kindEncoders[kind] = enc
+	return rb
+}
+
+// RegisterDefaultDecoder will register the provided ValueDecoder to the
+// provided kind.
+func (rb *RegistryBuilder) RegisterDefaultDecoder(kind reflect.Kind, dec ValueDecoder) *RegistryBuilder {
+	rb.kindDecoders[kind] = dec
+	return rb
+}
+
+// RegisterTypeMapEntry will register the provided type to the BSON type. The primary usage for this
+// mapping is decoding situations where an empty interface is used and a default type needs to be
+// created and decoded into.
+//
+// NOTE: It is unlikely that registering a type for BSON Embedded Document is actually desired. By
+// registering a type map entry for BSON Embedded Document the type registered will be used in any
+// case where a BSON Embedded Document will be decoded into an empty interface. For example, if you
+// register primitive.M, the EmptyInterface decoder will always use primitive.M, even if an ancestor
+// was a primitive.D.
+func (rb *RegistryBuilder) RegisterTypeMapEntry(bt bsontype.Type, rt reflect.Type) *RegistryBuilder {
+	rb.typeMap[bt] = rt
+	return rb
+}
+
+// Build creates a Registry from the current state of this RegistryBuilder.
+func (rb *RegistryBuilder) Build() *Registry {
+	registry := new(Registry)
+
+	registry.typeEncoders = make(map[reflect.Type]ValueEncoder)
+	for t, enc := range rb.typeEncoders {
+		registry.typeEncoders[t] = enc
+	}
+
+	registry.typeDecoders = make(map[reflect.Type]ValueDecoder)
+	for t, dec := range rb.typeDecoders {
+		registry.typeDecoders[t] = dec
+	}
+
+	registry.interfaceEncoders = make([]interfaceValueEncoder, len(rb.interfaceEncoders))
+	copy(registry.interfaceEncoders, rb.interfaceEncoders)
+
+	registry.interfaceDecoders = make([]interfaceValueDecoder, len(rb.interfaceDecoders))
+	copy(registry.interfaceDecoders, rb.interfaceDecoders)
+
+	registry.kindEncoders = make(map[reflect.Kind]ValueEncoder)
+	for kind, enc := range rb.kindEncoders {
+		registry.kindEncoders[kind] = enc
+	}
+
+	registry.kindDecoders = make(map[reflect.Kind]ValueDecoder)
+	for kind, dec := range rb.kindDecoders {
+		registry.kindDecoders[kind] = dec
+	}
+
+	registry.typeMap = make(map[bsontype.Type]reflect.Type)
+	for bt, rt := range rb.typeMap {
+		registry.typeMap[bt] = rt
+	}
+
+	return registry
+}
+
+// LookupEncoder will inspect the registry for an encoder that satisfies the
+// type provided. An encoder registered for a specific type will take
+// precedence over an encoder registered for an interface the type satisfies,
+// which takes precedence over an encoder for the reflect.Kind of the value. If
+// no encoder can be found, an error is returned.
+func (r *Registry) LookupEncoder(t reflect.Type) (ValueEncoder, error) {
+	encodererr := ErrNoEncoder{Type: t}
+	r.mu.RLock()
+	enc, found := r.lookupTypeEncoder(t)
+	r.mu.RUnlock()
+	if found {
+		if enc == nil {
+			return nil, ErrNoEncoder{Type: t}
+		}
+		return enc, nil
+	}
+
+	enc, found = r.lookupInterfaceEncoder(t)
+	if found {
+		r.mu.Lock()
+		r.typeEncoders[t] = enc
+		r.mu.Unlock()
+		return enc, nil
+	}
+
+	if t == nil {
+		r.mu.Lock()
+		r.typeEncoders[t] = nil
+		r.mu.Unlock()
+		return nil, encodererr
+	}
+
+	enc, found = r.kindEncoders[t.Kind()]
+	if !found {
+		r.mu.Lock()
+		r.typeEncoders[t] = nil
+		r.mu.Unlock()
+		return nil, encodererr
+	}
+
+	r.mu.Lock()
+	r.typeEncoders[t] = enc
+	r.mu.Unlock()
+	return enc, nil
+}
+
+func (r *Registry) lookupTypeEncoder(t reflect.Type) (ValueEncoder, bool) {
+	enc, found := r.typeEncoders[t]
+	return enc, found
+}
+
+func (r *Registry) lookupInterfaceEncoder(t reflect.Type) (ValueEncoder, bool) {
+	if t == nil {
+		return nil, false
+	}
+	for _, ienc := range r.interfaceEncoders {
+		if !t.Implements(ienc.i) {
+			continue
+		}
+
+		return ienc.ve, true
+	}
+	return nil, false
+}
+
+// LookupDecoder will inspect the registry for a decoder that satisfies the
+// type provided. A decoder registered for a specific type will take
+// precedence over a decoder registered for an interface the type satisfies,
+// which takes precedence over a decoder for the reflect.Kind of the value. If
+// no decoder can be found, an error is returned.
+func (r *Registry) LookupDecoder(t reflect.Type) (ValueDecoder, error) {
+	if t == nil {
+		return nil, ErrNilType
+	}
+	decodererr := ErrNoDecoder{Type: t}
+	r.mu.RLock()
+	dec, found := r.lookupTypeDecoder(t)
+	r.mu.RUnlock()
+	if found {
+		if dec == nil {
+			return nil, ErrNoDecoder{Type: t}
+		}
+		return dec, nil
+	}
+
+	dec, found = r.lookupInterfaceDecoder(t)
+	if found {
+		r.mu.Lock()
+		r.typeDecoders[t] = dec
+		r.mu.Unlock()
+		return dec, nil
+	}
+
+	dec, found = r.kindDecoders[t.Kind()]
+	if !found {
+		r.mu.Lock()
+		r.typeDecoders[t] = nil
+		r.mu.Unlock()
+		return nil, decodererr
+	}
+
+	r.mu.Lock()
+	r.typeDecoders[t] = dec
+	r.mu.Unlock()
+	return dec, nil
+}
+
+func (r *Registry) lookupTypeDecoder(t reflect.Type) (ValueDecoder, bool) {
+	dec, found := r.typeDecoders[t]
+	return dec, found
+}
+
+func (r *Registry) lookupInterfaceDecoder(t reflect.Type) (ValueDecoder, bool) {
+	for _, idec := range r.interfaceDecoders {
+		if !t.Implements(idec.i) && !reflect.PtrTo(t).Implements(idec.i) {
+			continue
+		}
+
+		return idec.vd, true
+	}
+	return nil, false
+}
+
+// LookupTypeMapEntry inspects the registry's type map for a Go type for the corresponding BSON
+// type. If no type is found, ErrNoTypeMapEntry is returned.
+func (r *Registry) LookupTypeMapEntry(bt bsontype.Type) (reflect.Type, error) {
+	t, ok := r.typeMap[bt]
+	if !ok || t == nil {
+		return nil, ErrNoTypeMapEntry{Type: bt}
+	}
+	return t, nil
+}
+
+type interfaceValueEncoder struct {
+	i  reflect.Type
+	ve ValueEncoder
+}
+
+type interfaceValueDecoder struct {
+	i  reflect.Type
+	vd ValueDecoder
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go
new file mode 100644
index 0000000..34f3b4c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_codec.go
@@ -0,0 +1,357 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var defaultStructCodec = &StructCodec{
+	cache:  make(map[reflect.Type]*structDescription),
+	parser: DefaultStructTagParser,
+}
+
+// Zeroer allows custom struct types to implement a report of zero
+// state. All struct types that don't implement Zeroer or where IsZero
+// returns false are considered to be not zero.
+type Zeroer interface {
+	IsZero() bool
+}
+
+// StructCodec is the Codec used for struct values.
+type StructCodec struct {
+	cache  map[reflect.Type]*structDescription
+	l      sync.RWMutex
+	parser StructTagParser
+}
+
+var _ ValueEncoder = &StructCodec{}
+var _ ValueDecoder = &StructCodec{}
+
+// NewStructCodec returns a StructCodec that uses p for struct tag parsing.
+func NewStructCodec(p StructTagParser) (*StructCodec, error) {
+	if p == nil {
+		return nil, errors.New("a StructTagParser must be provided to NewStructCodec")
+	}
+
+	return &StructCodec{
+		cache:  make(map[reflect.Type]*structDescription),
+		parser: p,
+	}, nil
+}
+
+// EncodeValue handles encoding generic struct types.
+func (sc *StructCodec) EncodeValue(r EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Kind() != reflect.Struct {
+		return ValueEncoderError{Name: "StructCodec.EncodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	sd, err := sc.describeStruct(r.Registry, val.Type())
+	if err != nil {
+		return err
+	}
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+	var rv reflect.Value
+	for _, desc := range sd.fl {
+		if desc.inline == nil {
+			rv = val.Field(desc.idx)
+		} else {
+			rv = val.FieldByIndex(desc.inline)
+		}
+
+		if desc.encoder == nil {
+			return ErrNoEncoder{Type: rv.Type()}
+		}
+
+		encoder := desc.encoder
+
+		iszero := sc.isZero
+		if iz, ok := encoder.(CodecZeroer); ok {
+			iszero = iz.IsTypeZero
+		}
+
+		if desc.omitEmpty && iszero(rv.Interface()) {
+			continue
+		}
+
+		vw2, err := dw.WriteDocumentElement(desc.name)
+		if err != nil {
+			return err
+		}
+
+		ectx := EncodeContext{Registry: r.Registry, MinSize: desc.minSize}
+		err = encoder.EncodeValue(ectx, vw2, rv)
+		if err != nil {
+			return err
+		}
+	}
+
+	if sd.inlineMap >= 0 {
+		rv := val.Field(sd.inlineMap)
+		collisionFn := func(key string) bool {
+			_, exists := sd.fm[key]
+			return exists
+		}
+
+		return defaultValueEncoders.mapEncodeValue(r, dw, rv, collisionFn)
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// DecodeValue implements the Codec interface.
+func (sc *StructCodec) DecodeValue(r DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Kind() != reflect.Struct {
+		return ValueDecoderError{Name: "StructCodec.DecodeValue", Kinds: []reflect.Kind{reflect.Struct}, Received: val}
+	}
+
+	switch vr.Type() {
+	case bsontype.Type(0), bsontype.EmbeddedDocument:
+	default:
+		return fmt.Errorf("cannot decode %v into a %s", vr.Type(), val.Type())
+	}
+
+	sd, err := sc.describeStruct(r.Registry, val.Type())
+	if err != nil {
+		return err
+	}
+
+	var decoder ValueDecoder
+	var inlineMap reflect.Value
+	if sd.inlineMap >= 0 {
+		inlineMap = val.Field(sd.inlineMap)
+		if inlineMap.IsNil() {
+			inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+		}
+		decoder, err = r.LookupDecoder(inlineMap.Type().Elem())
+		if err != nil {
+			return err
+		}
+	}
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	for {
+		name, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		fd, exists := sd.fm[name]
+		if !exists {
+			if sd.inlineMap < 0 {
+				// The encoding/json package requires a flag to return on error for non-existent fields.
+				// This functionality seems appropriate for the struct codec.
+				err = vr.Skip()
+				if err != nil {
+					return err
+				}
+				continue
+			}
+
+			elem := reflect.New(inlineMap.Type().Elem()).Elem()
+			err = decoder.DecodeValue(r, vr, elem)
+			if err != nil {
+				return err
+			}
+			inlineMap.SetMapIndex(reflect.ValueOf(name), elem)
+			continue
+		}
+
+		var field reflect.Value
+		if fd.inline == nil {
+			field = val.Field(fd.idx)
+		} else {
+			field = val.FieldByIndex(fd.inline)
+		}
+
+		if !field.CanSet() { // Being settable is a super set of being addressable.
+			return fmt.Errorf("cannot decode element '%s' into field %v; it is not settable", name, field)
+		}
+		if field.Kind() == reflect.Ptr && field.IsNil() {
+			field.Set(reflect.New(field.Type().Elem()))
+		}
+		field = field.Addr()
+
+		dctx := DecodeContext{Registry: r.Registry, Truncate: fd.truncate}
+		if fd.decoder == nil {
+			return ErrNoDecoder{Type: field.Elem().Type()}
+		}
+
+		if decoder, ok := fd.decoder.(ValueDecoder); ok {
+			err = decoder.DecodeValue(dctx, vr, field.Elem())
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		err = fd.decoder.DecodeValue(dctx, vr, field)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (sc *StructCodec) isZero(i interface{}) bool {
+	v := reflect.ValueOf(i)
+
+	// check the value validity
+	if !v.IsValid() {
+		return true
+	}
+
+	if z, ok := v.Interface().(Zeroer); ok && (v.Kind() != reflect.Ptr || !v.IsNil()) {
+		return z.IsZero()
+	}
+
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+
+	return false
+}
+
+type structDescription struct {
+	fm        map[string]fieldDescription
+	fl        []fieldDescription
+	inlineMap int
+}
+
+type fieldDescription struct {
+	name      string
+	idx       int
+	omitEmpty bool
+	minSize   bool
+	truncate  bool
+	inline    []int
+	encoder   ValueEncoder
+	decoder   ValueDecoder
+}
+
+func (sc *StructCodec) describeStruct(r *Registry, t reflect.Type) (*structDescription, error) {
+	// We need to analyze the struct, including getting the tags, collecting
+	// information about inlining, and create a map of the field name to the field.
+	sc.l.RLock()
+	ds, exists := sc.cache[t]
+	sc.l.RUnlock()
+	if exists {
+		return ds, nil
+	}
+
+	numFields := t.NumField()
+	sd := &structDescription{
+		fm:        make(map[string]fieldDescription, numFields),
+		fl:        make([]fieldDescription, 0, numFields),
+		inlineMap: -1,
+	}
+
+	for i := 0; i < numFields; i++ {
+		sf := t.Field(i)
+		if sf.PkgPath != "" {
+			// unexported, ignore
+			continue
+		}
+
+		encoder, err := r.LookupEncoder(sf.Type)
+		if err != nil {
+			encoder = nil
+		}
+		decoder, err := r.LookupDecoder(sf.Type)
+		if err != nil {
+			decoder = nil
+		}
+
+		description := fieldDescription{idx: i, encoder: encoder, decoder: decoder}
+
+		stags, err := sc.parser.ParseStructTags(sf)
+		if err != nil {
+			return nil, err
+		}
+		if stags.Skip {
+			continue
+		}
+		description.name = stags.Name
+		description.omitEmpty = stags.OmitEmpty
+		description.minSize = stags.MinSize
+		description.truncate = stags.Truncate
+
+		if stags.Inline {
+			switch sf.Type.Kind() {
+			case reflect.Map:
+				if sd.inlineMap >= 0 {
+					return nil, errors.New("(struct " + t.String() + ") multiple inline maps")
+				}
+				if sf.Type.Key() != tString {
+					return nil, errors.New("(struct " + t.String() + ") inline map must have a string keys")
+				}
+				sd.inlineMap = description.idx
+			case reflect.Struct:
+				inlinesf, err := sc.describeStruct(r, sf.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, fd := range inlinesf.fl {
+					if _, exists := sd.fm[fd.name]; exists {
+						return nil, fmt.Errorf("(struct %s) duplicated key %s", t.String(), fd.name)
+					}
+					if fd.inline == nil {
+						fd.inline = []int{i, fd.idx}
+					} else {
+						fd.inline = append([]int{i}, fd.inline...)
+					}
+					sd.fm[fd.name] = fd
+					sd.fl = append(sd.fl, fd)
+				}
+			default:
+				return nil, fmt.Errorf("(struct %s) inline fields must be either a struct or a map", t.String())
+			}
+			continue
+		}
+
+		if _, exists := sd.fm[description.name]; exists {
+			return nil, fmt.Errorf("struct %s) duplicated key %s", t.String(), description.name)
+		}
+
+		sd.fm[description.name] = description
+		sd.fl = append(sd.fl, description)
+	}
+
+	sc.l.Lock()
+	sc.cache[t] = sd
+	sc.l.Unlock()
+
+	return sd, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go
new file mode 100644
index 0000000..69d0ae4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/struct_tag_parser.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"reflect"
+	"strings"
+)
+
+// StructTagParser returns the struct tags for a given struct field.
+type StructTagParser interface {
+	ParseStructTags(reflect.StructField) (StructTags, error)
+}
+
+// StructTagParserFunc is an adapter that allows a generic function to be used
+// as a StructTagParser.
+type StructTagParserFunc func(reflect.StructField) (StructTags, error)
+
+// ParseStructTags implements the StructTagParser interface.
+func (stpf StructTagParserFunc) ParseStructTags(sf reflect.StructField) (StructTags, error) {
+	return stpf(sf)
+}
+
+// StructTags represents the struct tag fields that the StructCodec uses during
+// the encoding and decoding process.
+//
+// In the case of a struct, the lowercased field name is used as the key for each exported
+// field but this behavior may be changed using a struct tag. The tag may also contain flags to
+// adjust the marshalling behavior for the field.
+//
+// The properties are defined below:
+//
+//     OmitEmpty  Only include the field if it's not set to the zero value for the type or to
+//                empty slices or maps.
+//
+//     MinSize    Marshal an integer of a type larger than 32 bits value as an int32, if that's
+//                feasible while preserving the numeric value.
+//
+//     Truncate   When unmarshaling a BSON double, it is permitted to lose precision to fit within
+//                a float32.
+//
+//     Inline     Inline the field, which must be a struct or a map, causing all of its fields
+//                or keys to be processed as if they were part of the outer struct. For maps,
+//                keys must not conflict with the bson keys of other struct fields.
+//
+//     Skip       This struct field should be skipped. This is usually denoted by parsing a "-"
+//                for the name.
+//
+// TODO(skriptble): Add tags for undefined as nil and for null as nil.
+type StructTags struct {
+	Name      string
+	OmitEmpty bool
+	MinSize   bool
+	Truncate  bool
+	Inline    bool
+	Skip      bool
+}
+
+// DefaultStructTagParser is the StructTagParser used by the StructCodec by default.
+// It will handle the bson struct tag. See the documentation for StructTags to see
+// what each of the returned fields means.
+//
+// If there is no name in the struct tag fields, the struct field name is lowercased.
+// The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// An example:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+// A struct tag either consisting entirely of '-' or with a bson key with a
+// value consisting entirely of '-' will return a StructTags with Skip true and
+// the remaining fields will be their default values.
+var DefaultStructTagParser StructTagParserFunc = func(sf reflect.StructField) (StructTags, error) {
+	key := strings.ToLower(sf.Name)
+	tag, ok := sf.Tag.Lookup("bson")
+	if !ok && !strings.Contains(string(sf.Tag), ":") && len(sf.Tag) > 0 {
+		tag = string(sf.Tag)
+	}
+	var st StructTags
+	if tag == "-" {
+		st.Skip = true
+		return st, nil
+	}
+
+	for idx, str := range strings.Split(tag, ",") {
+		if idx == 0 && str != "" {
+			key = str
+		}
+		switch str {
+		case "omitempty":
+			st.OmitEmpty = true
+		case "minsize":
+			st.MinSize = true
+		case "truncate":
+			st.Truncate = true
+		case "inline":
+			st.Inline = true
+		}
+	}
+
+	st.Name = key
+
+	return st, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go
new file mode 100644
index 0000000..c349a94
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsoncodec/types.go
@@ -0,0 +1,80 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncodec
+
+import (
+	"encoding/json"
+	"net/url"
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var ptBool = reflect.TypeOf((*bool)(nil))
+var ptInt8 = reflect.TypeOf((*int8)(nil))
+var ptInt16 = reflect.TypeOf((*int16)(nil))
+var ptInt32 = reflect.TypeOf((*int32)(nil))
+var ptInt64 = reflect.TypeOf((*int64)(nil))
+var ptInt = reflect.TypeOf((*int)(nil))
+var ptUint8 = reflect.TypeOf((*uint8)(nil))
+var ptUint16 = reflect.TypeOf((*uint16)(nil))
+var ptUint32 = reflect.TypeOf((*uint32)(nil))
+var ptUint64 = reflect.TypeOf((*uint64)(nil))
+var ptUint = reflect.TypeOf((*uint)(nil))
+var ptFloat32 = reflect.TypeOf((*float32)(nil))
+var ptFloat64 = reflect.TypeOf((*float64)(nil))
+var ptString = reflect.TypeOf((*string)(nil))
+
+var tBool = reflect.TypeOf(false)
+var tFloat32 = reflect.TypeOf(float32(0))
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt = reflect.TypeOf(int(0))
+var tInt8 = reflect.TypeOf(int8(0))
+var tInt16 = reflect.TypeOf(int16(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tString = reflect.TypeOf("")
+var tTime = reflect.TypeOf(time.Time{})
+var tUint = reflect.TypeOf(uint(0))
+var tUint8 = reflect.TypeOf(uint8(0))
+var tUint16 = reflect.TypeOf(uint16(0))
+var tUint32 = reflect.TypeOf(uint32(0))
+var tUint64 = reflect.TypeOf(uint64(0))
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tByteSlice = reflect.TypeOf([]byte(nil))
+var tByte = reflect.TypeOf(byte(0x00))
+var tURL = reflect.TypeOf(url.URL{})
+var tJSONNumber = reflect.TypeOf(json.Number(""))
+
+var tValueMarshaler = reflect.TypeOf((*ValueMarshaler)(nil)).Elem()
+var tValueUnmarshaler = reflect.TypeOf((*ValueUnmarshaler)(nil)).Elem()
+var tMarshaler = reflect.TypeOf((*Marshaler)(nil)).Elem()
+var tUnmarshaler = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+var tProxy = reflect.TypeOf((*Proxy)(nil)).Elem()
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+var tD = reflect.TypeOf(primitive.D{})
+var tA = reflect.TypeOf(primitive.A{})
+var tE = reflect.TypeOf(primitive.E{})
+
+var tCoreDocument = reflect.TypeOf(bsoncore.Document{})
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go
new file mode 100644
index 0000000..e01a650
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/copier.go
@@ -0,0 +1,389 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// Copier is a type that allows copying between ValueReaders, ValueWriters, and
+// []byte values.
+type Copier struct{}
+
+// NewCopier creates a new copier with the given registry. If a nil registry is provided
+// a default registry is used.
+func NewCopier() Copier {
+	return Copier{}
+}
+
+// CopyDocument handles copying a document from src to dst.
+func CopyDocument(dst ValueWriter, src ValueReader) error {
+	return Copier{}.CopyDocument(dst, src)
+}
+
+// CopyDocument handles copying one document from the src to the dst.
+func (c Copier) CopyDocument(dst ValueWriter, src ValueReader) error {
+	dr, err := src.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return c.copyDocumentCore(dw, dr)
+}
+
+// CopyDocumentFromBytes copies the values from a BSON document represented as a
+// []byte to a ValueWriter.
+func (c Copier) CopyDocumentFromBytes(dst ValueWriter, src []byte) error {
+	dw, err := dst.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	err = c.CopyBytesToDocumentWriter(dw, src)
+	if err != nil {
+		return err
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// CopyBytesToDocumentWriter copies the values from a BSON document represented as a []byte to a
+// DocumentWriter.
+func (c Copier) CopyBytesToDocumentWriter(dst DocumentWriter, src []byte) error {
+	// TODO(skriptble): Create errors types here. Anything thats a tag should be a property.
+	length, rem, ok := bsoncore.ReadLength(src)
+	if !ok {
+		return fmt.Errorf("couldn't read length from src, not enough bytes. length=%d", len(src))
+	}
+	if len(src) < int(length) {
+		return fmt.Errorf("length read exceeds number of bytes available. length=%d bytes=%d", len(src), length)
+	}
+	rem = rem[:length-4]
+
+	var t bsontype.Type
+	var key string
+	var val bsoncore.Value
+	for {
+		t, rem, ok = bsoncore.ReadType(rem)
+		if !ok {
+			return io.EOF
+		}
+		if t == bsontype.Type(0) {
+			if len(rem) != 0 {
+				return fmt.Errorf("document end byte found before end of document. remaining bytes=%v", rem)
+			}
+			break
+		}
+
+		key, rem, ok = bsoncore.ReadKey(rem)
+		if !ok {
+			return fmt.Errorf("invalid key found. remaining bytes=%v", rem)
+		}
+		dvw, err := dst.WriteDocumentElement(key)
+		if err != nil {
+			return err
+		}
+		val, rem, ok = bsoncore.ReadValue(rem, t)
+		if !ok {
+			return fmt.Errorf("not enough bytes available to read type. bytes=%d type=%s", len(rem), t)
+		}
+		err = c.CopyValueFromBytes(dvw, t, val.Data)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CopyDocumentToBytes copies an entire document from the ValueReader and
+// returns it as bytes.
+func (c Copier) CopyDocumentToBytes(src ValueReader) ([]byte, error) {
+	return c.AppendDocumentBytes(nil, src)
+}
+
+// AppendDocumentBytes functions the same as CopyDocumentToBytes, but will
+// append the result to dst.
+func (c Copier) AppendDocumentBytes(dst []byte, src ValueReader) ([]byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		_, dst, err := br.ReadValueBytes(dst)
+		return dst, err
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer vwPool.Put(vw)
+
+	vw.reset(dst)
+
+	err := c.CopyDocument(vw, src)
+	dst = vw.buf
+	return dst, err
+}
+
+// CopyValueFromBytes will write the value represtend by t and src to dst.
+func (c Copier) CopyValueFromBytes(dst ValueWriter, t bsontype.Type, src []byte) error {
+	if wvb, ok := dst.(BytesWriter); ok {
+		return wvb.WriteValueBytes(t, src)
+	}
+
+	vr := vrPool.Get().(*valueReader)
+	defer vrPool.Put(vr)
+
+	vr.reset(src)
+	vr.pushElement(t)
+
+	return c.CopyValue(dst, vr)
+}
+
+// CopyValueToBytes copies a value from src and returns it as a bsontype.Type and a
+// []byte.
+func (c Copier) CopyValueToBytes(src ValueReader) (bsontype.Type, []byte, error) {
+	return c.AppendValueBytes(nil, src)
+}
+
+// AppendValueBytes functions the same as CopyValueToBytes, but will append the
+// result to dst.
+func (c Copier) AppendValueBytes(dst []byte, src ValueReader) (bsontype.Type, []byte, error) {
+	if br, ok := src.(BytesReader); ok {
+		return br.ReadValueBytes(dst)
+	}
+
+	vw := vwPool.Get().(*valueWriter)
+	defer vwPool.Put(vw)
+
+	start := len(dst)
+
+	vw.reset(dst)
+	vw.push(mElement)
+
+	err := c.CopyValue(vw, src)
+	if err != nil {
+		return 0, dst, err
+	}
+
+	return bsontype.Type(vw.buf[start]), vw.buf[start+2:], nil
+}
+
+// CopyValue will copy a single value from src to dst.
+func (c Copier) CopyValue(dst ValueWriter, src ValueReader) error {
+	var err error
+	switch src.Type() {
+	case bsontype.Double:
+		var f64 float64
+		f64, err = src.ReadDouble()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDouble(f64)
+	case bsontype.String:
+		var str string
+		str, err = src.ReadString()
+		if err != nil {
+			return err
+		}
+		err = dst.WriteString(str)
+	case bsontype.EmbeddedDocument:
+		err = c.CopyDocument(dst, src)
+	case bsontype.Array:
+		err = c.copyArray(dst, src)
+	case bsontype.Binary:
+		var data []byte
+		var subtype byte
+		data, subtype, err = src.ReadBinary()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBinaryWithSubtype(data, subtype)
+	case bsontype.Undefined:
+		err = src.ReadUndefined()
+		if err != nil {
+			break
+		}
+		err = dst.WriteUndefined()
+	case bsontype.ObjectID:
+		var oid primitive.ObjectID
+		oid, err = src.ReadObjectID()
+		if err != nil {
+			break
+		}
+		err = dst.WriteObjectID(oid)
+	case bsontype.Boolean:
+		var b bool
+		b, err = src.ReadBoolean()
+		if err != nil {
+			break
+		}
+		err = dst.WriteBoolean(b)
+	case bsontype.DateTime:
+		var dt int64
+		dt, err = src.ReadDateTime()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDateTime(dt)
+	case bsontype.Null:
+		err = src.ReadNull()
+		if err != nil {
+			break
+		}
+		err = dst.WriteNull()
+	case bsontype.Regex:
+		var pattern, options string
+		pattern, options, err = src.ReadRegex()
+		if err != nil {
+			break
+		}
+		err = dst.WriteRegex(pattern, options)
+	case bsontype.DBPointer:
+		var ns string
+		var pointer primitive.ObjectID
+		ns, pointer, err = src.ReadDBPointer()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		var js string
+		js, err = src.ReadJavascript()
+		if err != nil {
+			break
+		}
+		err = dst.WriteJavascript(js)
+	case bsontype.Symbol:
+		var symbol string
+		symbol, err = src.ReadSymbol()
+		if err != nil {
+			break
+		}
+		err = dst.WriteSymbol(symbol)
+	case bsontype.CodeWithScope:
+		var code string
+		var srcScope DocumentReader
+		code, srcScope, err = src.ReadCodeWithScope()
+		if err != nil {
+			break
+		}
+
+		var dstScope DocumentWriter
+		dstScope, err = dst.WriteCodeWithScope(code)
+		if err != nil {
+			break
+		}
+		err = c.copyDocumentCore(dstScope, srcScope)
+	case bsontype.Int32:
+		var i32 int32
+		i32, err = src.ReadInt32()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt32(i32)
+	case bsontype.Timestamp:
+		var t, i uint32
+		t, i, err = src.ReadTimestamp()
+		if err != nil {
+			break
+		}
+		err = dst.WriteTimestamp(t, i)
+	case bsontype.Int64:
+		var i64 int64
+		i64, err = src.ReadInt64()
+		if err != nil {
+			break
+		}
+		err = dst.WriteInt64(i64)
+	case bsontype.Decimal128:
+		var d128 primitive.Decimal128
+		d128, err = src.ReadDecimal128()
+		if err != nil {
+			break
+		}
+		err = dst.WriteDecimal128(d128)
+	case bsontype.MinKey:
+		err = src.ReadMinKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMinKey()
+	case bsontype.MaxKey:
+		err = src.ReadMaxKey()
+		if err != nil {
+			break
+		}
+		err = dst.WriteMaxKey()
+	default:
+		err = fmt.Errorf("Cannot copy unknown BSON type %s", src.Type())
+	}
+
+	return err
+}
+
+func (c Copier) copyArray(dst ValueWriter, src ValueReader) error {
+	ar, err := src.ReadArray()
+	if err != nil {
+		return err
+	}
+
+	aw, err := dst.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == ErrEOA {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+func (c Copier) copyDocumentCore(dw DocumentWriter, dr DocumentReader) error {
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		vw, err := dw.WriteDocumentElement(key)
+		if err != nil {
+			return err
+		}
+
+		err = c.CopyValue(vw, vr)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go
new file mode 100644
index 0000000..21f24b0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/doc.go
@@ -0,0 +1,9 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsonrw contains abstractions for reading and writing
+// BSON and BSON like types from sources.
+package bsonrw
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go
new file mode 100644
index 0000000..689982c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_parser.go
@@ -0,0 +1,687 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const maxNestingDepth = 200
+
+// ErrInvalidJSON indicates the JSON input is invalid
+var ErrInvalidJSON = errors.New("invalid JSON input")
+
+type jsonParseState byte
+
+const (
+	jpsStartState jsonParseState = iota
+	jpsSawBeginObject
+	jpsSawEndObject
+	jpsSawBeginArray
+	jpsSawEndArray
+	jpsSawColon
+	jpsSawComma
+	jpsSawKey
+	jpsSawValue
+	jpsDoneState
+	jpsInvalidState
+)
+
+type jsonParseMode byte
+
+const (
+	jpmInvalidMode jsonParseMode = iota
+	jpmObjectMode
+	jpmArrayMode
+)
+
+type extJSONValue struct {
+	t bsontype.Type
+	v interface{}
+}
+
+type extJSONObject struct {
+	keys   []string
+	values []*extJSONValue
+}
+
+type extJSONParser struct {
+	js *jsonScanner
+	s  jsonParseState
+	m  []jsonParseMode
+	k  string
+	v  *extJSONValue
+
+	err       error
+	canonical bool
+	depth     int
+	maxDepth  int
+
+	emptyObject bool
+}
+
+// newExtJSONParser returns a new extended JSON parser, ready to to begin
+// parsing from the first character of the argued json input. It will not
+// perform any read-ahead and will therefore not report any errors about
+// malformed JSON at this point.
+func newExtJSONParser(r io.Reader, canonical bool) *extJSONParser {
+	return &extJSONParser{
+		js:        &jsonScanner{r: r},
+		s:         jpsStartState,
+		m:         []jsonParseMode{},
+		canonical: canonical,
+		maxDepth:  maxNestingDepth,
+	}
+}
+
+// peekType examines the next value and returns its BSON Type
+func (ejp *extJSONParser) peekType() (bsontype.Type, error) {
+	var t bsontype.Type
+	var err error
+
+	ejp.advanceState()
+	switch ejp.s {
+	case jpsSawValue:
+		t = ejp.v.t
+	case jpsSawBeginArray:
+		t = bsontype.Array
+	case jpsInvalidState:
+		err = ejp.err
+	case jpsSawComma:
+		// in array mode, seeing a comma means we need to progress again to actually observe a type
+		if ejp.peekMode() == jpmArrayMode {
+			return ejp.peekType()
+		}
+	case jpsSawEndArray:
+		// this would only be a valid state if we were in array mode, so return end-of-array error
+		err = ErrEOA
+	case jpsSawBeginObject:
+		// peek key to determine type
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawEndObject: // empty embedded document
+			t = bsontype.EmbeddedDocument
+			ejp.emptyObject = true
+		case jpsInvalidState:
+			err = ejp.err
+		case jpsSawKey:
+			t = wrapperKeyBSONType(ejp.k)
+
+			if t == bsontype.JavaScript {
+				// just saw $code, need to check for $scope at same level
+				_, err := ejp.readValue(bsontype.JavaScript)
+
+				if err != nil {
+					break
+				}
+
+				switch ejp.s {
+				case jpsSawEndObject: // type is TypeJavaScript
+				case jpsSawComma:
+					ejp.advanceState()
+					if ejp.s == jpsSawKey && ejp.k == "$scope" {
+						t = bsontype.CodeWithScope
+					} else {
+						err = fmt.Errorf("invalid extended JSON: unexpected key %s in CodeWithScope object", ejp.k)
+					}
+				case jpsInvalidState:
+					err = ejp.err
+				default:
+					err = ErrInvalidJSON
+				}
+			}
+		}
+	}
+
+	return t, err
+}
+
+// readKey parses the next key and its type and returns them
+func (ejp *extJSONParser) readKey() (string, bsontype.Type, error) {
+	if ejp.emptyObject {
+		ejp.emptyObject = false
+		return "", 0, ErrEOD
+	}
+
+	// advance to key (or return with error)
+	switch ejp.s {
+	case jpsStartState:
+		ejp.advanceState()
+		if ejp.s == jpsSawBeginObject {
+			ejp.advanceState()
+		}
+	case jpsSawBeginObject:
+		ejp.advanceState()
+	case jpsSawValue, jpsSawEndObject, jpsSawEndArray:
+		ejp.advanceState()
+		switch ejp.s {
+		case jpsSawBeginObject, jpsSawComma:
+			ejp.advanceState()
+		case jpsSawEndObject:
+			return "", 0, ErrEOD
+		case jpsDoneState:
+			return "", 0, io.EOF
+		case jpsInvalidState:
+			return "", 0, ejp.err
+		default:
+			return "", 0, ErrInvalidJSON
+		}
+	case jpsSawKey: // do nothing (key was peeked before)
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// read key
+	var key string
+
+	switch ejp.s {
+	case jpsSawKey:
+		key = ejp.k
+	case jpsSawEndObject:
+		return "", 0, ErrEOD
+	case jpsInvalidState:
+		return "", 0, ejp.err
+	default:
+		return "", 0, invalidRequestError("key")
+	}
+
+	// check for colon
+	ejp.advanceState()
+	if err := ensureColon(ejp.s, key); err != nil {
+		return "", 0, err
+	}
+
+	// peek at the value to determine type
+	t, err := ejp.peekType()
+	if err != nil {
+		return "", 0, err
+	}
+
+	return key, t, nil
+}
+
+// readValue returns the value corresponding to the Type returned by peekType
+func (ejp *extJSONParser) readValue(t bsontype.Type) (*extJSONValue, error) {
+	if ejp.s == jpsInvalidState {
+		return nil, ejp.err
+	}
+
+	var v *extJSONValue
+
+	switch t {
+	case bsontype.Null, bsontype.Boolean, bsontype.String:
+		if ejp.s != jpsSawValue {
+			return nil, invalidRequestError(t.String())
+		}
+		v = ejp.v
+	case bsontype.Int32, bsontype.Int64, bsontype.Double:
+		// relaxed version allows these to be literal number values
+		if ejp.s == jpsSawValue {
+			v = ejp.v
+			break
+		}
+		fallthrough
+	case bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID, bsontype.MinKey, bsontype.MaxKey, bsontype.Undefined:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue || !ejp.ensureExtValueType(t) {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+
+			v = ejp.v
+
+			// read end object
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("} after value", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.Binary, bsontype.Regex, bsontype.Timestamp, bsontype.DBPointer:
+		if ejp.s != jpsSawKey {
+			return nil, invalidRequestError(t.String())
+		}
+		// read colon
+		ejp.advanceState()
+		if err := ensureColon(ejp.s, ejp.k); err != nil {
+			return nil, err
+		}
+
+		// read KV pairs
+		keys, vals, err := ejp.readObject(2, false)
+		if err != nil {
+			return nil, err
+		}
+
+		ejp.advanceState()
+		if ejp.s != jpsSawEndObject {
+			return nil, invalidJSONErrorForType("2 key-value pairs and then }", t)
+		}
+
+		v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+	case bsontype.DateTime:
+		switch ejp.s {
+		case jpsSawValue:
+			v = ejp.v
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			ejp.advanceState()
+			switch ejp.s {
+			case jpsSawBeginObject:
+				keys, vals, err := ejp.readObject(1, true)
+				if err != nil {
+					return nil, err
+				}
+				v = &extJSONValue{t: bsontype.EmbeddedDocument, v: &extJSONObject{keys: keys, values: vals}}
+			case jpsSawValue:
+				if ejp.canonical {
+					return nil, invalidJSONError("{")
+				}
+				v = ejp.v
+			default:
+				if ejp.canonical {
+					return nil, invalidJSONErrorForType("object", t)
+				}
+				return nil, invalidJSONErrorForType("ISO-8601 Internet Date/Time Format as decribed in RFC-3339", t)
+			}
+
+			ejp.advanceState()
+			if ejp.s != jpsSawEndObject {
+				return nil, invalidJSONErrorForType("value and then }", t)
+			}
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.JavaScript:
+		switch ejp.s {
+		case jpsSawKey:
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read value
+			ejp.advanceState()
+			if ejp.s != jpsSawValue {
+				return nil, invalidJSONErrorForType("value", t)
+			}
+			v = ejp.v
+
+			// read end object or comma and just return
+			ejp.advanceState()
+		case jpsSawEndObject:
+			v = ejp.v
+		default:
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.CodeWithScope:
+		if ejp.s == jpsSawKey && ejp.k == "$scope" {
+			v = ejp.v // this is the $code string from earlier
+
+			// read colon
+			ejp.advanceState()
+			if err := ensureColon(ejp.s, ejp.k); err != nil {
+				return nil, err
+			}
+
+			// read {
+			ejp.advanceState()
+			if ejp.s != jpsSawBeginObject {
+				return nil, invalidJSONError("$scope to be embedded document")
+			}
+		} else {
+			return nil, invalidRequestError(t.String())
+		}
+	case bsontype.EmbeddedDocument, bsontype.Array:
+		return nil, invalidRequestError(t.String())
+	}
+
+	return v, nil
+}
+
+// readObject is a utility method for reading full objects of known (or expected) size
+// it is useful for extended JSON types such as binary, datetime, regex, and timestamp
+func (ejp *extJSONParser) readObject(numKeys int, started bool) ([]string, []*extJSONValue, error) {
+	keys := make([]string, numKeys)
+	vals := make([]*extJSONValue, numKeys)
+
+	if !started {
+		ejp.advanceState()
+		if ejp.s != jpsSawBeginObject {
+			return nil, nil, invalidJSONError("{")
+		}
+	}
+
+	for i := 0; i < numKeys; i++ {
+		key, t, err := ejp.readKey()
+		if err != nil {
+			return nil, nil, err
+		}
+
+		switch ejp.s {
+		case jpsSawKey:
+			v, err := ejp.readValue(t)
+			if err != nil {
+				return nil, nil, err
+			}
+
+			keys[i] = key
+			vals[i] = v
+		case jpsSawValue:
+			keys[i] = key
+			vals[i] = ejp.v
+		default:
+			return nil, nil, invalidJSONError("value")
+		}
+	}
+
+	ejp.advanceState()
+	if ejp.s != jpsSawEndObject {
+		return nil, nil, invalidJSONError("}")
+	}
+
+	return keys, vals, nil
+}
+
+// advanceState reads the next JSON token from the scanner and transitions
+// from the current state based on that token's type
+func (ejp *extJSONParser) advanceState() {
+	if ejp.s == jpsDoneState || ejp.s == jpsInvalidState {
+		return
+	}
+
+	jt, err := ejp.js.nextToken()
+
+	if err != nil {
+		ejp.err = err
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	valid := ejp.validateToken(jt.t)
+	if !valid {
+		ejp.err = unexpectedTokenError(jt)
+		ejp.s = jpsInvalidState
+		return
+	}
+
+	switch jt.t {
+	case jttBeginObject:
+		ejp.s = jpsSawBeginObject
+		ejp.pushMode(jpmObjectMode)
+		ejp.depth++
+
+		if ejp.depth > ejp.maxDepth {
+			ejp.err = nestingDepthError(jt.p, ejp.depth)
+			ejp.s = jpsInvalidState
+		}
+	case jttEndObject:
+		ejp.s = jpsSawEndObject
+		ejp.depth--
+
+		if ejp.popMode() != jpmObjectMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttBeginArray:
+		ejp.s = jpsSawBeginArray
+		ejp.pushMode(jpmArrayMode)
+	case jttEndArray:
+		ejp.s = jpsSawEndArray
+
+		if ejp.popMode() != jpmArrayMode {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttColon:
+		ejp.s = jpsSawColon
+	case jttComma:
+		ejp.s = jpsSawComma
+	case jttEOF:
+		ejp.s = jpsDoneState
+		if len(ejp.m) != 0 {
+			ejp.err = unexpectedTokenError(jt)
+			ejp.s = jpsInvalidState
+		}
+	case jttString:
+		switch ejp.s {
+		case jpsSawComma:
+			if ejp.peekMode() == jpmArrayMode {
+				ejp.s = jpsSawValue
+				ejp.v = extendJSONToken(jt)
+				return
+			}
+			fallthrough
+		case jpsSawBeginObject:
+			ejp.s = jpsSawKey
+			ejp.k = jt.v.(string)
+			return
+		}
+		fallthrough
+	default:
+		ejp.s = jpsSawValue
+		ejp.v = extendJSONToken(jt)
+	}
+}
+
+var jpsValidTransitionTokens = map[jsonParseState]map[jsonTokenType]bool{
+	jpsStartState: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+		jttEOF:         true,
+	},
+	jpsSawBeginObject: {
+		jttEndObject: true,
+		jttString:    true,
+	},
+	jpsSawEndObject: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawBeginArray: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttEndArray:    true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawEndArray: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsSawColon: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawComma: {
+		jttBeginObject: true,
+		jttBeginArray:  true,
+		jttInt32:       true,
+		jttInt64:       true,
+		jttDouble:      true,
+		jttString:      true,
+		jttBool:        true,
+		jttNull:        true,
+	},
+	jpsSawKey: {
+		jttColon: true,
+	},
+	jpsSawValue: {
+		jttEndObject: true,
+		jttEndArray:  true,
+		jttComma:     true,
+		jttEOF:       true,
+	},
+	jpsDoneState:    {},
+	jpsInvalidState: {},
+}
+
+func (ejp *extJSONParser) validateToken(jtt jsonTokenType) bool {
+	switch ejp.s {
+	case jpsSawEndObject:
+		// if we are at depth zero and the next token is a '{',
+		// we can consider it valid only if we are not in array mode.
+		if jtt == jttBeginObject && ejp.depth == 0 {
+			return ejp.peekMode() != jpmArrayMode
+		}
+	case jpsSawComma:
+		switch ejp.peekMode() {
+		// the only valid next token after a comma inside a document is a string (a key)
+		case jpmObjectMode:
+			return jtt == jttString
+		case jpmInvalidMode:
+			return false
+		}
+	}
+
+	_, ok := jpsValidTransitionTokens[ejp.s][jtt]
+	return ok
+}
+
+// ensureExtValueType returns true if the current value has the expected
+// value type for single-key extended JSON types. For example,
+// {"$numberInt": v} v must be TypeString
+func (ejp *extJSONParser) ensureExtValueType(t bsontype.Type) bool {
+	switch t {
+	case bsontype.MinKey, bsontype.MaxKey:
+		return ejp.v.t == bsontype.Int32
+	case bsontype.Undefined:
+		return ejp.v.t == bsontype.Boolean
+	case bsontype.Int32, bsontype.Int64, bsontype.Double, bsontype.Decimal128, bsontype.Symbol, bsontype.ObjectID:
+		return ejp.v.t == bsontype.String
+	default:
+		return false
+	}
+}
+
+func (ejp *extJSONParser) pushMode(m jsonParseMode) {
+	ejp.m = append(ejp.m, m)
+}
+
+func (ejp *extJSONParser) popMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	m := ejp.m[l-1]
+	ejp.m = ejp.m[:l-1]
+
+	return m
+}
+
+func (ejp *extJSONParser) peekMode() jsonParseMode {
+	l := len(ejp.m)
+	if l == 0 {
+		return jpmInvalidMode
+	}
+
+	return ejp.m[l-1]
+}
+
+func extendJSONToken(jt *jsonToken) *extJSONValue {
+	var t bsontype.Type
+
+	switch jt.t {
+	case jttInt32:
+		t = bsontype.Int32
+	case jttInt64:
+		t = bsontype.Int64
+	case jttDouble:
+		t = bsontype.Double
+	case jttString:
+		t = bsontype.String
+	case jttBool:
+		t = bsontype.Boolean
+	case jttNull:
+		t = bsontype.Null
+	default:
+		return nil
+	}
+
+	return &extJSONValue{t: t, v: jt.v}
+}
+
+func ensureColon(s jsonParseState, key string) error {
+	if s != jpsSawColon {
+		return fmt.Errorf("invalid JSON input: missing colon after key \"%s\"", key)
+	}
+
+	return nil
+}
+
+func invalidRequestError(s string) error {
+	return fmt.Errorf("invalid request to read %s", s)
+}
+
+func invalidJSONError(expected string) error {
+	return fmt.Errorf("invalid JSON input; expected %s", expected)
+}
+
+func invalidJSONErrorForType(expected string, t bsontype.Type) error {
+	return fmt.Errorf("invalid JSON input; expected %s for %s", expected, t)
+}
+
+func unexpectedTokenError(jt *jsonToken) error {
+	switch jt.t {
+	case jttInt32, jttInt64, jttDouble:
+		return fmt.Errorf("invalid JSON input; unexpected number (%v) at position %d", jt.v, jt.p)
+	case jttString:
+		return fmt.Errorf("invalid JSON input; unexpected string (\"%v\") at position %d", jt.v, jt.p)
+	case jttBool:
+		return fmt.Errorf("invalid JSON input; unexpected boolean literal (%v) at position %d", jt.v, jt.p)
+	case jttNull:
+		return fmt.Errorf("invalid JSON input; unexpected null literal at position %d", jt.p)
+	case jttEOF:
+		return fmt.Errorf("invalid JSON input; unexpected end of input at position %d", jt.p)
+	default:
+		return fmt.Errorf("invalid JSON input; unexpected %c at position %d", jt.v.(byte), jt.p)
+	}
+}
+
+func nestingDepthError(p, depth int) error {
+	return fmt.Errorf("invalid JSON input; nesting too deep (%d levels) at position %d", depth, p)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go
new file mode 100644
index 0000000..eebd56e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_reader.go
@@ -0,0 +1,659 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+	"io"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ExtJSONValueReaderPool is a pool for ValueReaders that read ExtJSON.
+type ExtJSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueReaderPool instantiates a new ExtJSONValueReaderPool.
+func NewExtJSONValueReaderPool() *ExtJSONValueReaderPool {
+	return &ExtJSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying ExtJSON.
+func (bvrp *ExtJSONValueReaderPool) Get(r io.Reader, canonical bool) (ValueReader, error) {
+	vr := bvrp.pool.Get().(*extJSONValueReader)
+	return vr.reset(r, canonical)
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a ExtJSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+func (bvrp *ExtJSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*extJSONValueReader)
+	if !ok {
+		return false
+	}
+
+	bvr, _ = bvr.reset(nil, false)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+type ejvrState struct {
+	mode  mode
+	vType bsontype.Type
+	depth int
+}
+
+// extJSONValueReader is for reading extended JSON.
+type extJSONValueReader struct {
+	p *extJSONParser
+
+	stack []ejvrState
+	frame int
+}
+
+// NewExtJSONValueReader creates a new ValueReader from a given io.Reader
+// It will interpret the JSON of r as canonical or relaxed according to the
+// given canonical flag
+func NewExtJSONValueReader(r io.Reader, canonical bool) (ValueReader, error) {
+	return newExtJSONValueReader(r, canonical)
+}
+
+func newExtJSONValueReader(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	ejvr := new(extJSONValueReader)
+	return ejvr.reset(r, canonical)
+}
+
+func (ejvr *extJSONValueReader) reset(r io.Reader, canonical bool) (*extJSONValueReader, error) {
+	p := newExtJSONParser(r, canonical)
+	typ, err := p.peekType()
+
+	if err != nil {
+		return nil, ErrInvalidJSON
+	}
+
+	var m mode
+	switch typ {
+	case bsontype.EmbeddedDocument:
+		m = mTopLevel
+	case bsontype.Array:
+		m = mArray
+	default:
+		m = mValue
+	}
+
+	stack := make([]ejvrState, 1, 5)
+	stack[0] = ejvrState{
+		mode:  m,
+		vType: typ,
+	}
+	return &extJSONValueReader{
+		p:     p,
+		stack: stack,
+	}, nil
+}
+
+func (ejvr *extJSONValueReader) advanceFrame() {
+	if ejvr.frame+1 >= len(ejvr.stack) { // We need to grow the stack
+		length := len(ejvr.stack)
+		if length+1 >= cap(ejvr.stack) {
+			// double it
+			buf := make([]ejvrState, 2*cap(ejvr.stack)+1)
+			copy(buf, ejvr.stack)
+			ejvr.stack = buf
+		}
+		ejvr.stack = ejvr.stack[:length+1]
+	}
+	ejvr.frame++
+
+	// Clean the stack
+	ejvr.stack[ejvr.frame].mode = 0
+	ejvr.stack[ejvr.frame].vType = 0
+	ejvr.stack[ejvr.frame].depth = 0
+}
+
+func (ejvr *extJSONValueReader) pushDocument() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mDocument
+	ejvr.stack[ejvr.frame].depth = ejvr.p.depth
+}
+
+func (ejvr *extJSONValueReader) pushCodeWithScope() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mCodeWithScope
+}
+
+func (ejvr *extJSONValueReader) pushArray() {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = mArray
+}
+
+func (ejvr *extJSONValueReader) push(m mode, t bsontype.Type) {
+	ejvr.advanceFrame()
+
+	ejvr.stack[ejvr.frame].mode = m
+	ejvr.stack[ejvr.frame].vType = t
+}
+
+func (ejvr *extJSONValueReader) pop() {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		ejvr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (ejvr *extJSONValueReader) skipDocument() error {
+	// read entire document until ErrEOD (using readKey and readValue)
+	_, typ, err := ejvr.p.readKey()
+	for err == nil {
+		_, err = ejvr.p.readValue(typ)
+		if err != nil {
+			break
+		}
+
+		_, typ, err = ejvr.p.readKey()
+	}
+
+	return err
+}
+
+func (ejvr *extJSONValueReader) skipArray() error {
+	// read entire array until ErrEOA (using peekType)
+	_, err := ejvr.p.peekType()
+	for err == nil {
+		_, err = ejvr.p.peekType()
+	}
+
+	return err
+}
+
+func (ejvr *extJSONValueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvr.stack[ejvr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if ejvr.frame != 0 {
+		te.parent = ejvr.stack[ejvr.frame-1].mode
+	}
+	return te
+}
+
+func (ejvr *extJSONValueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", ejvr.stack[ejvr.frame].vType, t)
+}
+
+func (ejvr *extJSONValueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string, addModes ...mode) error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != t {
+			return ejvr.typeError(t)
+		}
+	default:
+		modes := []mode{mElement, mValue}
+		if addModes != nil {
+			modes = append(modes, addModes...)
+		}
+		return ejvr.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) Type() bsontype.Type {
+	return ejvr.stack[ejvr.frame].vType
+}
+
+func (ejvr *extJSONValueReader) Skip() error {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mElement, mValue:
+	default:
+		return ejvr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	defer ejvr.pop()
+
+	t := ejvr.stack[ejvr.frame].vType
+	switch t {
+	case bsontype.Array:
+		// read entire array until ErrEOA
+		err := ejvr.skipArray()
+		if err != ErrEOA {
+			return err
+		}
+	case bsontype.EmbeddedDocument:
+		// read entire doc until ErrEOD
+		err := ejvr.skipDocument()
+		if err != ErrEOD {
+			return err
+		}
+	case bsontype.CodeWithScope:
+		// read the code portion and set up parser in document mode
+		_, err := ejvr.p.readValue(t)
+		if err != nil {
+			return err
+		}
+
+		// read until ErrEOD
+		err = ejvr.skipDocument()
+		if err != ErrEOD {
+			return err
+		}
+	default:
+		_, err := ejvr.p.readValue(t)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadArray() (ArrayReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel: // allow reading array from top level
+	case mArray:
+		return ejvr, nil
+	default:
+		if err := ejvr.ensureElementValue(bsontype.Array, mArray, "ReadArray", mTopLevel, mArray); err != nil {
+			return nil, err
+		}
+	}
+
+	ejvr.pushArray()
+
+	return ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := ejvr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Binary)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	b, btype, err = v.parseBinary()
+
+	ejvr.pop()
+	return b, btype, err
+}
+
+func (ejvr *extJSONValueReader) ReadBoolean() (bool, error) {
+	if err := ejvr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Boolean)
+	if err != nil {
+		return false, err
+	}
+
+	if v.t != bsontype.Boolean {
+		return false, fmt.Errorf("expected type bool, but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(bool), nil
+}
+
+func (ejvr *extJSONValueReader) ReadDocument() (DocumentReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel:
+		return ejvr, nil
+	case mElement, mValue:
+		if ejvr.stack[ejvr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, ejvr.typeError(bsontype.EmbeddedDocument)
+		}
+
+		ejvr.pushDocument()
+		return ejvr, nil
+	default:
+		return nil, ejvr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+}
+
+func (ejvr *extJSONValueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err = ejvr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.CodeWithScope)
+	if err != nil {
+		return "", nil, err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pushCodeWithScope()
+	return code, ejvr, err
+}
+
+func (ejvr *extJSONValueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err = ejvr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DBPointer)
+	if err != nil {
+		return "", primitive.NilObjectID, err
+	}
+
+	ns, oid, err = v.parseDBPointer()
+
+	ejvr.pop()
+	return ns, oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadDateTime() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.DateTime)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDateTime()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := ejvr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Decimal128)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	d, err := v.parseDecimal128()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadDouble() (float64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Double)
+	if err != nil {
+		return 0, err
+	}
+
+	d, err := v.parseDouble()
+
+	ejvr.pop()
+	return d, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt32() (int32, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int32)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt32()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadInt64() (int64, error) {
+	if err := ejvr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Int64)
+	if err != nil {
+		return 0, err
+	}
+
+	i, err := v.parseInt64()
+
+	ejvr.pop()
+	return i, err
+}
+
+func (ejvr *extJSONValueReader) ReadJavascript() (code string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.JavaScript)
+	if err != nil {
+		return "", err
+	}
+
+	code, err = v.parseJavascript()
+
+	ejvr.pop()
+	return code, err
+}
+
+func (ejvr *extJSONValueReader) ReadMaxKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MaxKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("max")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadMinKey() error {
+	if err := ejvr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.MinKey)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseMinMaxKey("min")
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadNull() error {
+	if err := ejvr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Null)
+	if err != nil {
+		return err
+	}
+
+	if v.t != bsontype.Null {
+		return fmt.Errorf("expected type null but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return nil
+}
+
+func (ejvr *extJSONValueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := ejvr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.ObjectID)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oid, err := v.parseObjectID()
+
+	ejvr.pop()
+	return oid, err
+}
+
+func (ejvr *extJSONValueReader) ReadRegex() (pattern string, options string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Regex)
+	if err != nil {
+		return "", "", err
+	}
+
+	pattern, options, err = v.parseRegex()
+
+	ejvr.pop()
+	return pattern, options, err
+}
+
+func (ejvr *extJSONValueReader) ReadString() (string, error) {
+	if err := ejvr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.String)
+	if err != nil {
+		return "", err
+	}
+
+	if v.t != bsontype.String {
+		return "", fmt.Errorf("expected type string but got type %s", v.t)
+	}
+
+	ejvr.pop()
+	return v.v.(string), nil
+}
+
+func (ejvr *extJSONValueReader) ReadSymbol() (symbol string, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Symbol)
+	if err != nil {
+		return "", err
+	}
+
+	symbol, err = v.parseSymbol()
+
+	ejvr.pop()
+	return symbol, err
+}
+
+func (ejvr *extJSONValueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err = ejvr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Timestamp)
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, i, err = v.parseTimestamp()
+
+	ejvr.pop()
+	return t, i, err
+}
+
+func (ejvr *extJSONValueReader) ReadUndefined() error {
+	if err := ejvr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	v, err := ejvr.p.readValue(bsontype.Undefined)
+	if err != nil {
+		return err
+	}
+
+	err = v.parseUndefined()
+
+	ejvr.pop()
+	return err
+}
+
+func (ejvr *extJSONValueReader) ReadElement() (string, ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, ejvr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	name, t, err := ejvr.p.readKey()
+
+	if err != nil {
+		if err == ErrEOD {
+			if ejvr.stack[ejvr.frame].mode == mCodeWithScope {
+				_, err := ejvr.p.peekType()
+				if err != nil {
+					return "", nil, err
+				}
+			}
+
+			ejvr.pop()
+		}
+
+		return "", nil, err
+	}
+
+	ejvr.push(mElement, t)
+	return name, ejvr, nil
+}
+
+func (ejvr *extJSONValueReader) ReadValue() (ValueReader, error) {
+	switch ejvr.stack[ejvr.frame].mode {
+	case mArray:
+	default:
+		return nil, ejvr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := ejvr.p.peekType()
+	if err != nil {
+		if err == ErrEOA {
+			ejvr.pop()
+		}
+
+		return nil, err
+	}
+
+	ejvr.push(mValue, t)
+	return ejvr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go
new file mode 100644
index 0000000..ba39c96
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsonrw
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go
new file mode 100644
index 0000000..196662c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_wrappers.go
@@ -0,0 +1,481 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"math"
+	"strconv"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+func wrapperKeyBSONType(key string) bsontype.Type {
+	switch string(key) {
+	case "$numberInt":
+		return bsontype.Int32
+	case "$numberLong":
+		return bsontype.Int64
+	case "$oid":
+		return bsontype.ObjectID
+	case "$symbol":
+		return bsontype.Symbol
+	case "$numberDouble":
+		return bsontype.Double
+	case "$numberDecimal":
+		return bsontype.Decimal128
+	case "$binary":
+		return bsontype.Binary
+	case "$code":
+		return bsontype.JavaScript
+	case "$scope":
+		return bsontype.CodeWithScope
+	case "$timestamp":
+		return bsontype.Timestamp
+	case "$regularExpression":
+		return bsontype.Regex
+	case "$dbPointer":
+		return bsontype.DBPointer
+	case "$date":
+		return bsontype.DateTime
+	case "$ref":
+		fallthrough
+	case "$id":
+		fallthrough
+	case "$db":
+		return bsontype.EmbeddedDocument // dbrefs aren't bson types
+	case "$minKey":
+		return bsontype.MinKey
+	case "$maxKey":
+		return bsontype.MaxKey
+	case "$undefined":
+		return bsontype.Undefined
+	}
+
+	return bsontype.EmbeddedDocument
+}
+
+func (ejv *extJSONValue) parseBinary() (b []byte, subType byte, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return nil, 0, fmt.Errorf("$binary value should be object, but instead is %s", ejv.t)
+	}
+
+	binObj := ejv.v.(*extJSONObject)
+	bFound := false
+	stFound := false
+
+	for i, key := range binObj.keys {
+		val := binObj.values[i]
+
+		switch key {
+		case "base64":
+			if bFound {
+				return nil, 0, errors.New("duplicate base64 key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary base64 value should be string, but instead is %s", val.t)
+			}
+
+			base64Bytes, err := base64.StdEncoding.DecodeString(val.v.(string))
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary base64 string: %s", val.v.(string))
+			}
+
+			b = base64Bytes
+			bFound = true
+		case "subType":
+			if stFound {
+				return nil, 0, errors.New("duplicate subType key in $binary")
+			}
+
+			if val.t != bsontype.String {
+				return nil, 0, fmt.Errorf("$binary subType value should be string, but instead is %s", val.t)
+			}
+
+			i, err := strconv.ParseInt(val.v.(string), 16, 64)
+			if err != nil {
+				return nil, 0, fmt.Errorf("invalid $binary subType string: %s", val.v.(string))
+			}
+
+			subType = byte(i)
+			stFound = true
+		default:
+			return nil, 0, fmt.Errorf("invalid key in $binary object: %s", key)
+		}
+	}
+
+	if !bFound {
+		return nil, 0, errors.New("missing base64 field in $binary object")
+	}
+
+	if !stFound {
+		return nil, 0, errors.New("missing subType field in $binary object")
+
+	}
+
+	return b, subType, nil
+}
+
+func (ejv *extJSONValue) parseDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", primitive.NilObjectID, fmt.Errorf("$dbPointer value should be object, but instead is %s", ejv.t)
+	}
+
+	dbpObj := ejv.v.(*extJSONObject)
+	oidFound := false
+	nsFound := false
+
+	for i, key := range dbpObj.keys {
+		val := dbpObj.values[i]
+
+		switch key {
+		case "$ref":
+			if nsFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $ref key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $ref value should be string, but instead is %s", val.t)
+			}
+
+			ns = val.v.(string)
+			nsFound = true
+		case "$id":
+			if oidFound {
+				return "", primitive.NilObjectID, errors.New("duplicate $id key in $dbPointer")
+			}
+
+			if val.t != bsontype.String {
+				return "", primitive.NilObjectID, fmt.Errorf("$dbPointer $id value should be string, but instead is %s", val.t)
+			}
+
+			oid, err = primitive.ObjectIDFromHex(val.v.(string))
+			if err != nil {
+				return "", primitive.NilObjectID, err
+			}
+
+			oidFound = true
+		default:
+			return "", primitive.NilObjectID, fmt.Errorf("invalid key in $dbPointer object: %s", key)
+		}
+	}
+
+	if !nsFound {
+		return "", oid, errors.New("missing $ref field in $dbPointer object")
+	}
+
+	if !oidFound {
+		return "", oid, errors.New("missing $id field in $dbPointer object")
+	}
+
+	return ns, oid, nil
+}
+
+const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
+
+func (ejv *extJSONValue) parseDateTime() (int64, error) {
+	switch ejv.t {
+	case bsontype.Int32:
+		return int64(ejv.v.(int32)), nil
+	case bsontype.Int64:
+		return ejv.v.(int64), nil
+	case bsontype.String:
+		return parseDatetimeString(ejv.v.(string))
+	case bsontype.EmbeddedDocument:
+		return parseDatetimeObject(ejv.v.(*extJSONObject))
+	default:
+		return 0, fmt.Errorf("$date value should be string or object, but instead is %s", ejv.t)
+	}
+}
+
+func parseDatetimeString(data string) (int64, error) {
+	t, err := time.Parse(rfc3339Milli, data)
+	if err != nil {
+		return 0, fmt.Errorf("invalid $date value string: %s", data)
+	}
+
+	return t.UnixNano() / 1e6, nil
+}
+
+func parseDatetimeObject(data *extJSONObject) (d int64, err error) {
+	dFound := false
+
+	for i, key := range data.keys {
+		val := data.values[i]
+
+		switch key {
+		case "$numberLong":
+			if dFound {
+				return 0, errors.New("duplicate $numberLong key in $date")
+			}
+
+			if val.t != bsontype.String {
+				return 0, fmt.Errorf("$date $numberLong field should be string, but instead is %s", val.t)
+			}
+
+			d, err = val.parseInt64()
+			if err != nil {
+				return 0, err
+			}
+			dFound = true
+		default:
+			return 0, fmt.Errorf("invalid key in $date object: %s", key)
+		}
+	}
+
+	if !dFound {
+		return 0, errors.New("missing $numberLong field in $date object")
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDecimal128() (primitive.Decimal128, error) {
+	if ejv.t != bsontype.String {
+		return primitive.Decimal128{}, fmt.Errorf("$numberDecimal value should be string, but instead is %s", ejv.t)
+	}
+
+	d, err := primitive.ParseDecimal128(ejv.v.(string))
+	if err != nil {
+		return primitive.Decimal128{}, fmt.Errorf("$invalid $numberDecimal string: %s", ejv.v.(string))
+	}
+
+	return d, nil
+}
+
+func (ejv *extJSONValue) parseDouble() (float64, error) {
+	if ejv.t == bsontype.Double {
+		return ejv.v.(float64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberDouble value should be string, but instead is %s", ejv.t)
+	}
+
+	switch string(ejv.v.(string)) {
+	case "Infinity":
+		return math.Inf(1), nil
+	case "-Infinity":
+		return math.Inf(-1), nil
+	case "NaN":
+		return math.NaN(), nil
+	}
+
+	f, err := strconv.ParseFloat(ejv.v.(string), 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return f, nil
+}
+
+func (ejv *extJSONValue) parseInt32() (int32, error) {
+	if ejv.t == bsontype.Int32 {
+		return ejv.v.(int32), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberInt value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	if i < math.MinInt32 || i > math.MaxInt32 {
+		return 0, fmt.Errorf("$numberInt value should be int32 but instead is int64: %d", i)
+	}
+
+	return int32(i), nil
+}
+
+func (ejv *extJSONValue) parseInt64() (int64, error) {
+	if ejv.t == bsontype.Int64 {
+		return ejv.v.(int64), nil
+	}
+
+	if ejv.t != bsontype.String {
+		return 0, fmt.Errorf("$numberLong value should be string, but instead is %s", ejv.t)
+	}
+
+	i, err := strconv.ParseInt(ejv.v.(string), 10, 64)
+	if err != nil {
+		return 0, err
+	}
+
+	return i, nil
+}
+
+func (ejv *extJSONValue) parseJavascript() (code string, err error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$code value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseMinMaxKey(minmax string) error {
+	if ejv.t != bsontype.Int32 {
+		return fmt.Errorf("$%sKey value should be int32, but instead is %s", minmax, ejv.t)
+	}
+
+	if ejv.v.(int32) != 1 {
+		return fmt.Errorf("$%sKey value must be 1, but instead is %d", minmax, ejv.v.(int32))
+	}
+
+	return nil
+}
+
+func (ejv *extJSONValue) parseObjectID() (primitive.ObjectID, error) {
+	if ejv.t != bsontype.String {
+		return primitive.NilObjectID, fmt.Errorf("$oid value should be string, but instead is %s", ejv.t)
+	}
+
+	return primitive.ObjectIDFromHex(ejv.v.(string))
+}
+
+func (ejv *extJSONValue) parseRegex() (pattern, options string, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return "", "", fmt.Errorf("$regularExpression value should be object, but instead is %s", ejv.t)
+	}
+
+	regexObj := ejv.v.(*extJSONObject)
+	patFound := false
+	optFound := false
+
+	for i, key := range regexObj.keys {
+		val := regexObj.values[i]
+
+		switch string(key) {
+		case "pattern":
+			if patFound {
+				return "", "", errors.New("duplicate pattern key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression pattern value should be string, but instead is %s", val.t)
+			}
+
+			pattern = val.v.(string)
+			patFound = true
+		case "options":
+			if optFound {
+				return "", "", errors.New("duplicate options key in $regularExpression")
+			}
+
+			if val.t != bsontype.String {
+				return "", "", fmt.Errorf("$regularExpression options value should be string, but instead is %s", val.t)
+			}
+
+			options = val.v.(string)
+			optFound = true
+		default:
+			return "", "", fmt.Errorf("invalid key in $regularExpression object: %s", key)
+		}
+	}
+
+	if !patFound {
+		return "", "", errors.New("missing pattern field in $regularExpression object")
+	}
+
+	if !optFound {
+		return "", "", errors.New("missing options field in $regularExpression object")
+
+	}
+
+	return pattern, options, nil
+}
+
+func (ejv *extJSONValue) parseSymbol() (string, error) {
+	if ejv.t != bsontype.String {
+		return "", fmt.Errorf("$symbol value should be string, but instead is %s", ejv.t)
+	}
+
+	return ejv.v.(string), nil
+}
+
+func (ejv *extJSONValue) parseTimestamp() (t, i uint32, err error) {
+	if ejv.t != bsontype.EmbeddedDocument {
+		return 0, 0, fmt.Errorf("$timestamp value should be object, but instead is %s", ejv.t)
+	}
+
+	handleKey := func(key string, val *extJSONValue, flag bool) (uint32, error) {
+		if flag {
+			return 0, fmt.Errorf("duplicate %s key in $timestamp", key)
+		}
+
+		switch val.t {
+		case bsontype.Int32:
+			if val.v.(int32) < 0 {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
+			}
+
+			return uint32(val.v.(int32)), nil
+		case bsontype.Int64:
+			if val.v.(int64) < 0 || uint32(val.v.(int64)) > math.MaxUint32 {
+				return 0, fmt.Errorf("$timestamp %s number should be uint32: %s", key, string(val.v.(int32)))
+			}
+
+			return uint32(val.v.(int64)), nil
+		default:
+			return 0, fmt.Errorf("$timestamp %s value should be uint32, but instead is %s", key, val.t)
+		}
+	}
+
+	tsObj := ejv.v.(*extJSONObject)
+	tFound := false
+	iFound := false
+
+	for j, key := range tsObj.keys {
+		val := tsObj.values[j]
+
+		switch key {
+		case "t":
+			if t, err = handleKey(key, val, tFound); err != nil {
+				return 0, 0, err
+			}
+
+			tFound = true
+		case "i":
+			if i, err = handleKey(key, val, iFound); err != nil {
+				return 0, 0, err
+			}
+
+			iFound = true
+		default:
+			return 0, 0, fmt.Errorf("invalid key in $timestamp object: %s", key)
+		}
+	}
+
+	if !tFound {
+		return 0, 0, errors.New("missing t field in $timestamp object")
+	}
+
+	if !iFound {
+		return 0, 0, errors.New("missing i field in $timestamp object")
+	}
+
+	return t, i, nil
+}
+
+func (ejv *extJSONValue) parseUndefined() error {
+	if ejv.t != bsontype.Boolean {
+		return fmt.Errorf("undefined value should be boolean, but instead is %s", ejv.t)
+	}
+
+	if !ejv.v.(bool) {
+		return fmt.Errorf("$undefined balue boolean should be true, but instead is %v", ejv.v.(bool))
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go
new file mode 100644
index 0000000..04fbb56
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/extjson_writer.go
@@ -0,0 +1,734 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"io"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+var ejvwPool = sync.Pool{
+	New: func() interface{} {
+		return new(extJSONValueWriter)
+	},
+}
+
+// ExtJSONValueWriterPool is a pool for ExtJSON ValueWriters.
+type ExtJSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewExtJSONValueWriterPool creates a new pool for ValueWriter instances that write to ExtJSON.
+func NewExtJSONValueWriterPool() *ExtJSONValueWriterPool {
+	return &ExtJSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(extJSONValueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a ExtJSON ValueWriter from the pool and resets it to use w as the destination.
+func (bvwp *ExtJSONValueWriterPool) Get(w io.Writer, canonical, escapeHTML bool) ValueWriter {
+	vw := bvwp.pool.Get().(*extJSONValueWriter)
+	if writer, ok := w.(*SliceWriter); ok {
+		vw.reset(*writer, canonical, escapeHTML)
+		vw.w = writer
+		return vw
+	}
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a ExtJSON ValueWriter, nothing
+// happens and ok will be false.
+func (bvwp *ExtJSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*extJSONValueWriter)
+	if !ok {
+		return false
+	}
+
+	if _, ok := bvw.w.(*SliceWriter); ok {
+		bvw.buf = nil
+	}
+	bvw.w = nil
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+type ejvwState struct {
+	mode mode
+}
+
+type extJSONValueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack      []ejvwState
+	frame      int64
+	canonical  bool
+	escapeHTML bool
+}
+
+// NewExtJSONValueWriter creates a ValueWriter that writes Extended JSON to w.
+func NewExtJSONValueWriter(w io.Writer, canonical, escapeHTML bool) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+
+	return newExtJSONWriter(w, canonical, escapeHTML), nil
+}
+
+func newExtJSONWriter(w io.Writer, canonical, escapeHTML bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		w:          w,
+		buf:        []byte{},
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+	}
+}
+
+func newExtJSONWriterFromSlice(buf []byte, canonical, escapeHTML bool) *extJSONValueWriter {
+	stack := make([]ejvwState, 1, 5)
+	stack[0] = ejvwState{mode: mTopLevel}
+
+	return &extJSONValueWriter{
+		buf:        buf,
+		stack:      stack,
+		canonical:  canonical,
+		escapeHTML: escapeHTML,
+	}
+}
+
+func (ejvw *extJSONValueWriter) reset(buf []byte, canonical, escapeHTML bool) {
+	if ejvw.stack == nil {
+		ejvw.stack = make([]ejvwState, 1, 5)
+	}
+
+	ejvw.stack = ejvw.stack[:1]
+	ejvw.stack[0] = ejvwState{mode: mTopLevel}
+	ejvw.canonical = canonical
+	ejvw.escapeHTML = escapeHTML
+	ejvw.frame = 0
+	ejvw.buf = buf
+	ejvw.w = nil
+}
+
+func (ejvw *extJSONValueWriter) advanceFrame() {
+	if ejvw.frame+1 >= int64(len(ejvw.stack)) { // We need to grow the stack
+		length := len(ejvw.stack)
+		if length+1 >= cap(ejvw.stack) {
+			// double it
+			buf := make([]ejvwState, 2*cap(ejvw.stack)+1)
+			copy(buf, ejvw.stack)
+			ejvw.stack = buf
+		}
+		ejvw.stack = ejvw.stack[:length+1]
+	}
+	ejvw.frame++
+}
+
+func (ejvw *extJSONValueWriter) push(m mode) {
+	ejvw.advanceFrame()
+
+	ejvw.stack[ejvw.frame].mode = m
+}
+
+func (ejvw *extJSONValueWriter) pop() {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+		ejvw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		ejvw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+func (ejvw *extJSONValueWriter) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     ejvw.stack[ejvw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if ejvw.frame != 0 {
+		te.parent = ejvw.stack[ejvw.frame-1].mode
+	}
+	return te
+}
+
+func (ejvw *extJSONValueWriter) ensureElementValue(destination mode, callerName string, addmodes ...mode) error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mElement, mValue:
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return ejvw.invalidTransitionErr(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) writeExtendedSingleValue(key string, value string, quotes bool) {
+	var s string
+	if quotes {
+		s = fmt.Sprintf(`{"$%s":"%s"}`, key, value)
+	} else {
+		s = fmt.Sprintf(`{"$%s":%s}`, key, value)
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(s)...)
+}
+
+func (ejvw *extJSONValueWriter) WriteArray() (ArrayWriter, error) {
+	if err := ejvw.ensureElementValue(mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '[')
+
+	ejvw.push(mArray)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBinary(b []byte) error {
+	return ejvw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (ejvw *extJSONValueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$binary":{"base64":"`)
+	buf.WriteString(base64.StdEncoding.EncodeToString(b))
+	buf.WriteString(fmt.Sprintf(`","subType":"%02x"}},`, btype))
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteBoolean(b bool) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte(strconv.FormatBool(b))...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := ejvw.ensureElementValue(mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$code":`)
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"$scope":{`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.push(mCodeWithScope)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$dbPointer":{"$ref":"`)
+	buf.WriteString(ns)
+	buf.WriteString(`","$id":{"$oid":"`)
+	buf.WriteString(oid.Hex())
+	buf.WriteString(`"}}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDateTime(dt int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	t := time.Unix(dt/1e3, dt%1e3*1e6).UTC()
+
+	if ejvw.canonical || t.Year() < 1970 || t.Year() > 9999 {
+		s := fmt.Sprintf(`{"$numberLong":"%d"}`, dt)
+		ejvw.writeExtendedSingleValue("date", s, false)
+	} else {
+		ejvw.writeExtendedSingleValue("date", t.Format(rfc3339Milli), true)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDecimal128(d primitive.Decimal128) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("numberDecimal", d.String(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocument() (DocumentWriter, error) {
+	if ejvw.stack[ejvw.frame].mode == mTopLevel {
+		ejvw.buf = append(ejvw.buf, '{')
+		return ejvw, nil
+	}
+
+	if err := ejvw.ensureElementValue(mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	ejvw.buf = append(ejvw.buf, '{')
+	ejvw.push(mDocument)
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDouble(f float64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	s := formatDouble(f)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberDouble", s, true)
+	} else {
+		switch s {
+		case "Infinity":
+			fallthrough
+		case "-Infinity":
+			fallthrough
+		case "NaN":
+			s = fmt.Sprintf(`{"$numberDouble":"%s"}`, s)
+		}
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt32(i int32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(int64(i), 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberInt", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteInt64(i int64) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	s := strconv.FormatInt(i, 10)
+
+	if ejvw.canonical {
+		ejvw.writeExtendedSingleValue("numberLong", s, true)
+	} else {
+		ejvw.buf = append(ejvw.buf, []byte(s)...)
+	}
+
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteJavascript(code string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(code, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("code", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMaxKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("maxKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteMinKey() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("minKey", "1", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteNull() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	ejvw.buf = append(ejvw.buf, []byte("null")...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("oid", oid.Hex(), true)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteRegex(pattern string, options string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$regularExpression":{"pattern":`)
+	writeStringWithEscapes(pattern, &buf, ejvw.escapeHTML)
+	buf.WriteString(`,"options":"`)
+	buf.WriteString(sortStringAlphebeticAscending(options))
+	buf.WriteString(`"}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteString(s string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(s, &buf, ejvw.escapeHTML)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteSymbol(symbol string) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	writeStringWithEscapes(symbol, &buf, ejvw.escapeHTML)
+
+	ejvw.writeExtendedSingleValue("symbol", buf.String(), false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	var buf bytes.Buffer
+	buf.WriteString(`{"$timestamp":{"t":`)
+	buf.WriteString(strconv.FormatUint(uint64(t), 10))
+	buf.WriteString(`,"i":`)
+	buf.WriteString(strconv.FormatUint(uint64(i), 10))
+	buf.WriteString(`}},`)
+
+	ejvw.buf = append(ejvw.buf, buf.Bytes()...)
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteUndefined() error {
+	if err := ejvw.ensureElementValue(mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	ejvw.writeExtendedSingleValue("undefined", "true", false)
+	ejvw.buf = append(ejvw.buf, ',')
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+		ejvw.buf = append(ejvw.buf, []byte(fmt.Sprintf(`"%s":`, key))...)
+		ejvw.push(mElement)
+	default:
+		return nil, ejvw.invalidTransitionErr(mElement, "WriteDocumentElement", []mode{mDocument, mTopLevel, mCodeWithScope})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteDocumentEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mDocument, mTopLevel, mCodeWithScope:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	// close the document
+	if ejvw.buf[len(ejvw.buf)-1] == ',' {
+		ejvw.buf[len(ejvw.buf)-1] = '}'
+	} else {
+		ejvw.buf = append(ejvw.buf, '}')
+	}
+
+	switch ejvw.stack[ejvw.frame].mode {
+	case mCodeWithScope:
+		ejvw.buf = append(ejvw.buf, '}')
+		fallthrough
+	case mDocument:
+		ejvw.buf = append(ejvw.buf, ',')
+	case mTopLevel:
+		if ejvw.w != nil {
+			if _, err := ejvw.w.Write(ejvw.buf); err != nil {
+				return err
+			}
+			ejvw.buf = ejvw.buf[:0]
+		}
+	}
+
+	ejvw.pop()
+	return nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayElement() (ValueWriter, error) {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		ejvw.push(mValue)
+	default:
+		return nil, ejvw.invalidTransitionErr(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	return ejvw, nil
+}
+
+func (ejvw *extJSONValueWriter) WriteArrayEnd() error {
+	switch ejvw.stack[ejvw.frame].mode {
+	case mArray:
+		// close the array
+		if ejvw.buf[len(ejvw.buf)-1] == ',' {
+			ejvw.buf[len(ejvw.buf)-1] = ']'
+		} else {
+			ejvw.buf = append(ejvw.buf, ']')
+		}
+
+		ejvw.buf = append(ejvw.buf, ',')
+
+		ejvw.pop()
+	default:
+		return fmt.Errorf("incorrect mode to end array: %s", ejvw.stack[ejvw.frame].mode)
+	}
+
+	return nil
+}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, 'E') && !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+var hexChars = "0123456789abcdef"
+
+func writeStringWithEscapes(s string, buf *bytes.Buffer, escapeHTML bool) {
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	oldI := ss[i]
+	ss[i] = ss[j]
+	ss[j] = oldI
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go
new file mode 100644
index 0000000..03aabf5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/json_scanner.go
@@ -0,0 +1,439 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+)
+
+type jsonTokenType byte
+
+const (
+	jttBeginObject jsonTokenType = iota
+	jttEndObject
+	jttBeginArray
+	jttEndArray
+	jttColon
+	jttComma
+	jttInt32
+	jttInt64
+	jttDouble
+	jttString
+	jttBool
+	jttNull
+	jttEOF
+)
+
+type jsonToken struct {
+	t jsonTokenType
+	v interface{}
+	p int
+}
+
+type jsonScanner struct {
+	r           io.Reader
+	buf         []byte
+	pos         int
+	lastReadErr error
+}
+
+// nextToken returns the next JSON token if one exists. A token is a character
+// of the JSON grammar, a number, a string, or a literal.
+func (js *jsonScanner) nextToken() (*jsonToken, error) {
+	c, err := js.readNextByte()
+
+	// keep reading until a non-space is encountered (break on read error or EOF)
+	for isWhiteSpace(c) && err == nil {
+		c, err = js.readNextByte()
+	}
+
+	if err == io.EOF {
+		return &jsonToken{t: jttEOF}, nil
+	} else if err != nil {
+		return nil, err
+	}
+
+	// switch on the character
+	switch c {
+	case '{':
+		return &jsonToken{t: jttBeginObject, v: byte('{'), p: js.pos - 1}, nil
+	case '}':
+		return &jsonToken{t: jttEndObject, v: byte('}'), p: js.pos - 1}, nil
+	case '[':
+		return &jsonToken{t: jttBeginArray, v: byte('['), p: js.pos - 1}, nil
+	case ']':
+		return &jsonToken{t: jttEndArray, v: byte(']'), p: js.pos - 1}, nil
+	case ':':
+		return &jsonToken{t: jttColon, v: byte(':'), p: js.pos - 1}, nil
+	case ',':
+		return &jsonToken{t: jttComma, v: byte(','), p: js.pos - 1}, nil
+	case '"': // RFC-8259 only allows for double quotes (") not single (')
+		return js.scanString()
+	default:
+		// check if it's a number
+		if c == '-' || isDigit(c) {
+			return js.scanNumber(c)
+		} else if c == 't' || c == 'f' || c == 'n' {
+			// maybe a literal
+			return js.scanLiteral(c)
+		} else {
+			return nil, fmt.Errorf("invalid JSON input. Position: %d. Character: %c", js.pos-1, c)
+		}
+	}
+}
+
+// readNextByte attempts to read the next byte from the buffer. If the buffer
+// has been exhausted, this function calls readIntoBuf, thus refilling the
+// buffer and resetting the read position to 0
+func (js *jsonScanner) readNextByte() (byte, error) {
+	if js.pos >= len(js.buf) {
+		err := js.readIntoBuf()
+
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	b := js.buf[js.pos]
+	js.pos++
+
+	return b, nil
+}
+
+// readNNextBytes reads n bytes into dst, starting at offset
+func (js *jsonScanner) readNNextBytes(dst []byte, n, offset int) error {
+	var err error
+
+	for i := 0; i < n; i++ {
+		dst[i+offset], err = js.readNextByte()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// readIntoBuf reads up to 512 bytes from the scanner's io.Reader into the buffer
+func (js *jsonScanner) readIntoBuf() error {
+	if js.lastReadErr != nil {
+		js.buf = js.buf[:0]
+		js.pos = 0
+		return js.lastReadErr
+	}
+
+	if cap(js.buf) == 0 {
+		js.buf = make([]byte, 0, 512)
+	}
+
+	n, err := js.r.Read(js.buf[:cap(js.buf)])
+	if err != nil {
+		js.lastReadErr = err
+		if n > 0 {
+			err = nil
+		}
+	}
+	js.buf = js.buf[:n]
+	js.pos = 0
+
+	return err
+}
+
+func isWhiteSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+func isDigit(c byte) bool {
+	return unicode.IsDigit(rune(c))
+}
+
+func isValueTerminator(c byte) bool {
+	return c == ',' || c == '}' || c == ']' || isWhiteSpace(c)
+}
+
+// scanString reads from an opening '"' to a closing '"' and handles escaped characters
+func (js *jsonScanner) scanString() (*jsonToken, error) {
+	var b bytes.Buffer
+	var c byte
+	var err error
+
+	p := js.pos - 1
+
+	for {
+		c, err = js.readNextByte()
+		if err != nil {
+			if err == io.EOF {
+				return nil, errors.New("end of input in JSON string")
+			}
+			return nil, err
+		}
+
+		switch c {
+		case '\\':
+			c, err = js.readNextByte()
+			switch c {
+			case '"', '\\', '/', '\'':
+				b.WriteByte(c)
+			case 'b':
+				b.WriteByte('\b')
+			case 'f':
+				b.WriteByte('\f')
+			case 'n':
+				b.WriteByte('\n')
+			case 'r':
+				b.WriteByte('\r')
+			case 't':
+				b.WriteByte('\t')
+			case 'u':
+				us := make([]byte, 4)
+				err = js.readNNextBytes(us, 4, 0)
+				if err != nil {
+					return nil, fmt.Errorf("invalid unicode sequence in JSON string: %s", us)
+				}
+
+				s := fmt.Sprintf(`\u%s`, us)
+				s, err = strconv.Unquote(strings.Replace(strconv.Quote(s), `\\u`, `\u`, 1))
+				if err != nil {
+					return nil, err
+				}
+
+				b.WriteString(s)
+			default:
+				return nil, fmt.Errorf("invalid escape sequence in JSON string '\\%c'", c)
+			}
+		case '"':
+			return &jsonToken{t: jttString, v: b.String(), p: p}, nil
+		default:
+			b.WriteByte(c)
+		}
+	}
+}
+
+// scanLiteral reads an unquoted sequence of characters and determines if it is one of
+// three valid JSON literals (true, false, null); if so, it returns the appropriate
+// jsonToken; otherwise, it returns an error
+func (js *jsonScanner) scanLiteral(first byte) (*jsonToken, error) {
+	p := js.pos - 1
+
+	lit := make([]byte, 4)
+	lit[0] = first
+
+	err := js.readNNextBytes(lit, 3, 1)
+	if err != nil {
+		return nil, err
+	}
+
+	c5, err := js.readNextByte()
+
+	if bytes.Equal([]byte("true"), lit) && (isValueTerminator(c5) || err == io.EOF) {
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttBool, v: true, p: p}, nil
+	} else if bytes.Equal([]byte("null"), lit) && (isValueTerminator(c5) || err == io.EOF) {
+		js.pos = int(math.Max(0, float64(js.pos-1)))
+		return &jsonToken{t: jttNull, v: nil, p: p}, nil
+	} else if bytes.Equal([]byte("fals"), lit) {
+		if c5 == 'e' {
+			c5, err = js.readNextByte()
+
+			if isValueTerminator(c5) || err == io.EOF {
+				js.pos = int(math.Max(0, float64(js.pos-1)))
+				return &jsonToken{t: jttBool, v: false, p: p}, nil
+			}
+		}
+	}
+
+	return nil, fmt.Errorf("invalid JSON literal. Position: %d, literal: %s", p, lit)
+}
+
+type numberScanState byte
+
+const (
+	nssSawLeadingMinus numberScanState = iota
+	nssSawLeadingZero
+	nssSawIntegerDigits
+	nssSawDecimalPoint
+	nssSawFractionDigits
+	nssSawExponentLetter
+	nssSawExponentSign
+	nssSawExponentDigits
+	nssDone
+	nssInvalid
+)
+
+// scanNumber reads a JSON number (according to RFC-8259)
+func (js *jsonScanner) scanNumber(first byte) (*jsonToken, error) {
+	var b bytes.Buffer
+	var s numberScanState
+	var c byte
+	var err error
+
+	t := jttInt64 // assume it's an int64 until the type can be determined
+	start := js.pos - 1
+
+	b.WriteByte(first)
+
+	switch first {
+	case '-':
+		s = nssSawLeadingMinus
+	case '0':
+		s = nssSawLeadingZero
+	default:
+		s = nssSawIntegerDigits
+	}
+
+	for {
+		c, err = js.readNextByte()
+
+		if err != nil && err != io.EOF {
+			return nil, err
+		}
+
+		switch s {
+		case nssSawLeadingMinus:
+			switch c {
+			case '0':
+				s = nssSawLeadingZero
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawLeadingZero:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawIntegerDigits:
+			switch c {
+			case '.':
+				s = nssSawDecimalPoint
+				b.WriteByte(c)
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawIntegerDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawDecimalPoint:
+			t = jttDouble
+			if isDigit(c) {
+				s = nssSawFractionDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawFractionDigits:
+			switch c {
+			case 'e', 'E':
+				s = nssSawExponentLetter
+				b.WriteByte(c)
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawFractionDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentLetter:
+			t = jttDouble
+			switch c {
+			case '+', '-':
+				s = nssSawExponentSign
+				b.WriteByte(c)
+			default:
+				if isDigit(c) {
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		case nssSawExponentSign:
+			if isDigit(c) {
+				s = nssSawExponentDigits
+				b.WriteByte(c)
+			} else {
+				s = nssInvalid
+			}
+		case nssSawExponentDigits:
+			switch c {
+			case '}', ']', ',':
+				s = nssDone
+			default:
+				if isWhiteSpace(c) || err == io.EOF {
+					s = nssDone
+				} else if isDigit(c) {
+					s = nssSawExponentDigits
+					b.WriteByte(c)
+				} else {
+					s = nssInvalid
+				}
+			}
+		}
+
+		switch s {
+		case nssInvalid:
+			return nil, fmt.Errorf("invalid JSON number. Position: %d", start)
+		case nssDone:
+			js.pos = int(math.Max(0, float64(js.pos-1)))
+			if t != jttDouble {
+				v, err := strconv.ParseInt(b.String(), 10, 64)
+				if err == nil {
+					if v < math.MinInt32 || v > math.MaxInt32 {
+						return &jsonToken{t: jttInt64, v: v, p: start}, nil
+					}
+
+					return &jsonToken{t: jttInt32, v: int32(v), p: start}, nil
+				}
+			}
+
+			v, err := strconv.ParseFloat(b.String(), 64)
+			if err != nil {
+				return nil, err
+			}
+
+			return &jsonToken{t: jttDouble, v: v, p: start}, nil
+		}
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go
new file mode 100644
index 0000000..617b5e2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/mode.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"fmt"
+)
+
+type mode int
+
+const (
+	_ mode = iota
+	mTopLevel
+	mDocument
+	mArray
+	mValue
+	mElement
+	mCodeWithScope
+	mSpacer
+)
+
+func (m mode) String() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "DocumentMode"
+	case mArray:
+		str = "ArrayMode"
+	case mValue:
+		str = "ValueMode"
+	case mElement:
+		str = "ElementMode"
+	case mCodeWithScope:
+		str = "CodeWithScopeMode"
+	case mSpacer:
+		str = "CodeWithScopeSpacerFrame"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+func (m mode) TypeString() string {
+	var str string
+
+	switch m {
+	case mTopLevel:
+		str = "TopLevel"
+	case mDocument:
+		str = "Document"
+	case mArray:
+		str = "Array"
+	case mValue:
+		str = "Value"
+	case mElement:
+		str = "Element"
+	case mCodeWithScope:
+		str = "CodeWithScope"
+	case mSpacer:
+		str = "CodeWithScopeSpacer"
+	default:
+		str = "Unknown"
+	}
+
+	return str
+}
+
+// TransitionError is an error returned when an invalid progressing a
+// ValueReader or ValueWriter state machine occurs.
+// If read is false, the error is for writing
+type TransitionError struct {
+	name        string
+	parent      mode
+	current     mode
+	destination mode
+	modes       []mode
+	action      string
+}
+
+func (te TransitionError) Error() string {
+	errString := fmt.Sprintf("%s can only %s", te.name, te.action)
+	if te.destination != mode(0) {
+		errString = fmt.Sprintf("%s a %s", errString, te.destination.TypeString())
+	}
+	errString = fmt.Sprintf("%s while positioned on a", errString)
+	for ind, m := range te.modes {
+		if ind != 0 && len(te.modes) > 2 {
+			errString = fmt.Sprintf("%s,", errString)
+		}
+		if ind == len(te.modes)-1 && len(te.modes) > 1 {
+			errString = fmt.Sprintf("%s or", errString)
+		}
+		errString = fmt.Sprintf("%s %s", errString, m.TypeString())
+	}
+	errString = fmt.Sprintf("%s but is positioned on a %s", errString, te.current.TypeString())
+	if te.parent != mode(0) {
+		errString = fmt.Sprintf("%s with parent %s", errString, te.parent.TypeString())
+	}
+	return errString
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go
new file mode 100644
index 0000000..ecae739
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/reader.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ArrayReader is implemented by types that allow reading values from a BSON
+// array.
+type ArrayReader interface {
+	ReadValue() (ValueReader, error)
+}
+
+// DocumentReader is implemented by types that allow reading elements from a
+// BSON document.
+type DocumentReader interface {
+	ReadElement() (string, ValueReader, error)
+}
+
+// ValueReader is a generic interface used to read values from BSON. This type
+// is implemented by several types with different underlying representations of
+// BSON, such as a bson.Document, raw BSON bytes, or extended JSON.
+type ValueReader interface {
+	Type() bsontype.Type
+	Skip() error
+
+	ReadArray() (ArrayReader, error)
+	ReadBinary() (b []byte, btype byte, err error)
+	ReadBoolean() (bool, error)
+	ReadDocument() (DocumentReader, error)
+	ReadCodeWithScope() (code string, dr DocumentReader, err error)
+	ReadDBPointer() (ns string, oid primitive.ObjectID, err error)
+	ReadDateTime() (int64, error)
+	ReadDecimal128() (primitive.Decimal128, error)
+	ReadDouble() (float64, error)
+	ReadInt32() (int32, error)
+	ReadInt64() (int64, error)
+	ReadJavascript() (code string, err error)
+	ReadMaxKey() error
+	ReadMinKey() error
+	ReadNull() error
+	ReadObjectID() (primitive.ObjectID, error)
+	ReadRegex() (pattern, options string, err error)
+	ReadString() (string, error)
+	ReadSymbol() (symbol string, err error)
+	ReadTimestamp() (t, i uint32, err error)
+	ReadUndefined() error
+}
+
+// BytesReader is a generic interface used to read BSON bytes from a
+// ValueReader. This imterface is meant to be a superset of ValueReader, so that
+// types that implement ValueReader may also implement this interface.
+//
+// The bytes of the value will be appended to dst.
+type BytesReader interface {
+	ReadValueBytes(dst []byte) (bsontype.Type, []byte, error)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go
new file mode 100644
index 0000000..5fc0d8a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_reader.go
@@ -0,0 +1,882 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"sync"
+	"unicode"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+var _ ValueReader = (*valueReader)(nil)
+
+var vrPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueReader)
+	},
+}
+
+// BSONValueReaderPool is a pool for ValueReaders that read BSON.
+type BSONValueReaderPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueReaderPool instantiates a new BSONValueReaderPool.
+func NewBSONValueReaderPool() *BSONValueReaderPool {
+	return &BSONValueReaderPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueReader)
+			},
+		},
+	}
+}
+
+// Get retrieves a ValueReader from the pool and uses src as the underlying BSON.
+func (bvrp *BSONValueReaderPool) Get(src []byte) ValueReader {
+	vr := bvrp.pool.Get().(*valueReader)
+	vr.reset(src)
+	return vr
+}
+
+// Put inserts a ValueReader into the pool. If the ValueReader is not a BSON ValueReader nothing
+// is inserted into the pool and ok will be false.
+func (bvrp *BSONValueReaderPool) Put(vr ValueReader) (ok bool) {
+	bvr, ok := vr.(*valueReader)
+	if !ok {
+		return false
+	}
+
+	bvr.reset(nil)
+	bvrp.pool.Put(bvr)
+	return true
+}
+
+// ErrEOA is the error returned when the end of a BSON array has been reached.
+var ErrEOA = errors.New("end of array")
+
+// ErrEOD is the error returned when the end of a BSON document has been reached.
+var ErrEOD = errors.New("end of document")
+
+type vrState struct {
+	mode  mode
+	vType bsontype.Type
+	end   int64
+}
+
+// valueReader is for reading BSON values.
+type valueReader struct {
+	offset int64
+	d      []byte
+
+	stack []vrState
+	frame int64
+}
+
+// NewBSONDocumentReader returns a ValueReader using b for the underlying BSON
+// representation. Parameter b must be a BSON Document.
+//
+// TODO(skriptble): There's a lack of symmetry between the reader and writer, since the reader takes
+// a []byte while the writer takes an io.Writer. We should have two versions of each, one that takes
+// a []byte and one that takes an io.Reader or io.Writer. The []byte version will need to return a
+// thing that can return the finished []byte since it might be reallocated when appended to.
+func NewBSONDocumentReader(b []byte) ValueReader {
+	return newValueReader(b)
+}
+
+// NewBSONValueReader returns a ValueReader that starts in the Value mode instead of in top
+// level document mode. This enables the creation of a ValueReader for a single BSON value.
+func NewBSONValueReader(t bsontype.Type, val []byte) ValueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode:  mValue,
+		vType: t,
+	}
+	return &valueReader{
+		d:     val,
+		stack: stack,
+	}
+}
+
+func newValueReader(b []byte) *valueReader {
+	stack := make([]vrState, 1, 5)
+	stack[0] = vrState{
+		mode: mTopLevel,
+	}
+	return &valueReader{
+		d:     b,
+		stack: stack,
+	}
+}
+
+func (vr *valueReader) reset(b []byte) {
+	if vr.stack == nil {
+		vr.stack = make([]vrState, 1, 5)
+	}
+	vr.stack = vr.stack[:1]
+	vr.stack[0] = vrState{mode: mTopLevel}
+	vr.d = b
+	vr.offset = 0
+	vr.frame = 0
+}
+
+func (vr *valueReader) advanceFrame() {
+	if vr.frame+1 >= int64(len(vr.stack)) { // We need to grow the stack
+		length := len(vr.stack)
+		if length+1 >= cap(vr.stack) {
+			// double it
+			buf := make([]vrState, 2*cap(vr.stack)+1)
+			copy(buf, vr.stack)
+			vr.stack = buf
+		}
+		vr.stack = vr.stack[:length+1]
+	}
+	vr.frame++
+
+	// Clean the stack
+	vr.stack[vr.frame].mode = 0
+	vr.stack[vr.frame].vType = 0
+	vr.stack[vr.frame].end = 0
+}
+
+func (vr *valueReader) pushDocument() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mDocument
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushArray() error {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mArray
+
+	size, err := vr.readLength()
+	if err != nil {
+		return err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return nil
+}
+
+func (vr *valueReader) pushElement(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mElement
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushValue(t bsontype.Type) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mValue
+	vr.stack[vr.frame].vType = t
+}
+
+func (vr *valueReader) pushCodeWithScope() (int64, error) {
+	vr.advanceFrame()
+
+	vr.stack[vr.frame].mode = mCodeWithScope
+
+	size, err := vr.readLength()
+	if err != nil {
+		return 0, err
+	}
+	vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+
+	return int64(size), nil
+}
+
+func (vr *valueReader) pop() {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		vr.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vr.frame -= 2 // we pop twice to jump over the vrElement: vrDocument -> vrElement -> vrDocument/TopLevel/etc...
+	}
+}
+
+func (vr *valueReader) invalidTransitionErr(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vr.stack[vr.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "read",
+	}
+	if vr.frame != 0 {
+		te.parent = vr.stack[vr.frame-1].mode
+	}
+	return te
+}
+
+func (vr *valueReader) typeError(t bsontype.Type) error {
+	return fmt.Errorf("positioned on %s, but attempted to read %s", vr.stack[vr.frame].vType, t)
+}
+
+func (vr *valueReader) invalidDocumentLengthError() error {
+	return fmt.Errorf("document is invalid, end byte is at %d, but null byte found at %d", vr.stack[vr.frame].end, vr.offset)
+}
+
+func (vr *valueReader) ensureElementValue(t bsontype.Type, destination mode, callerName string) error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != t {
+			return vr.typeError(t)
+		}
+	default:
+		return vr.invalidTransitionErr(destination, callerName, []mode{mElement, mValue})
+	}
+
+	return nil
+}
+
+func (vr *valueReader) Type() bsontype.Type {
+	return vr.stack[vr.frame].vType
+}
+
+func (vr *valueReader) nextElementLength() (int32, error) {
+	var length int32
+	var err error
+	switch vr.stack[vr.frame].vType {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, err = vr.peekLength()
+	case bsontype.Binary:
+		length, err = vr.peekLength()
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, err = vr.peekLength()
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, err = vr.peekLength()
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+		if regex < 0 {
+			err = io.EOF
+			break
+		}
+		pattern := bytes.IndexByte(vr.d[regex+1:], 0x00)
+		if pattern < 0 {
+			err = io.EOF
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1 - vr.offset)
+	default:
+		return 0, fmt.Errorf("attempted to read bytes of unknown BSON type %v", vr.stack[vr.frame].vType)
+	}
+
+	return length, err
+}
+
+func (vr *valueReader) ReadValueBytes(dst []byte) (bsontype.Type, []byte, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		length, err := vr.peekLength()
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		dst, err = vr.appendBytes(dst, length)
+		if err != nil {
+			return bsontype.Type(0), nil, err
+		}
+		return bsontype.Type(0), dst, nil
+	case mElement, mValue:
+		length, err := vr.nextElementLength()
+		if err != nil {
+			return bsontype.Type(0), dst, err
+		}
+
+		dst, err = vr.appendBytes(dst, length)
+		t := vr.stack[vr.frame].vType
+		vr.pop()
+		return t, dst, err
+	default:
+		return bsontype.Type(0), nil, vr.invalidTransitionErr(0, "ReadValueBytes", []mode{mElement, mValue})
+	}
+}
+
+func (vr *valueReader) Skip() error {
+	switch vr.stack[vr.frame].mode {
+	case mElement, mValue:
+	default:
+		return vr.invalidTransitionErr(0, "Skip", []mode{mElement, mValue})
+	}
+
+	length, err := vr.nextElementLength()
+	if err != nil {
+		return err
+	}
+
+	err = vr.skipBytes(length)
+	vr.pop()
+	return err
+}
+
+func (vr *valueReader) ReadArray() (ArrayReader, error) {
+	if err := vr.ensureElementValue(bsontype.Array, mArray, "ReadArray"); err != nil {
+		return nil, err
+	}
+
+	err := vr.pushArray()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadBinary() (b []byte, btype byte, err error) {
+	if err := vr.ensureElementValue(bsontype.Binary, 0, "ReadBinary"); err != nil {
+		return nil, 0, err
+	}
+
+	length, err := vr.readLength()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	btype, err = vr.readByte()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	if btype == 0x02 {
+		length, err = vr.readLength()
+		if err != nil {
+			return nil, 0, err
+		}
+	}
+
+	b, err = vr.readBytes(length)
+	if err != nil {
+		return nil, 0, err
+	}
+
+	vr.pop()
+	return b, btype, nil
+}
+
+func (vr *valueReader) ReadBoolean() (bool, error) {
+	if err := vr.ensureElementValue(bsontype.Boolean, 0, "ReadBoolean"); err != nil {
+		return false, err
+	}
+
+	b, err := vr.readByte()
+	if err != nil {
+		return false, err
+	}
+
+	if b > 1 {
+		return false, fmt.Errorf("invalid byte for boolean, %b", b)
+	}
+
+	vr.pop()
+	return b == 1, nil
+}
+
+func (vr *valueReader) ReadDocument() (DocumentReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel:
+		// read size
+		size, err := vr.readLength()
+		if err != nil {
+			return nil, err
+		}
+		if int(size) != len(vr.d) {
+			return nil, fmt.Errorf("invalid document length")
+		}
+		vr.stack[vr.frame].end = int64(size) + vr.offset - 4
+		return vr, nil
+	case mElement, mValue:
+		if vr.stack[vr.frame].vType != bsontype.EmbeddedDocument {
+			return nil, vr.typeError(bsontype.EmbeddedDocument)
+		}
+	default:
+		return nil, vr.invalidTransitionErr(mDocument, "ReadDocument", []mode{mTopLevel, mElement, mValue})
+	}
+
+	err := vr.pushDocument()
+	if err != nil {
+		return nil, err
+	}
+
+	return vr, nil
+}
+
+func (vr *valueReader) ReadCodeWithScope() (code string, dr DocumentReader, err error) {
+	if err := vr.ensureElementValue(bsontype.CodeWithScope, 0, "ReadCodeWithScope"); err != nil {
+		return "", nil, err
+	}
+
+	totalLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	strLength, err := vr.readLength()
+	if err != nil {
+		return "", nil, err
+	}
+	strBytes, err := vr.readBytes(strLength)
+	if err != nil {
+		return "", nil, err
+	}
+	code = string(strBytes[:len(strBytes)-1])
+
+	size, err := vr.pushCodeWithScope()
+	if err != nil {
+		return "", nil, err
+	}
+
+	// The total length should equal:
+	// 4 (total length) + strLength + 4 (the length of str itself) + (document length)
+	componentsLength := int64(4+strLength+4) + size
+	if int64(totalLength) != componentsLength {
+		return "", nil, fmt.Errorf(
+			"length of CodeWithScope does not match lengths of components; total: %d; components: %d",
+			totalLength, componentsLength,
+		)
+	}
+	return code, vr, nil
+}
+
+func (vr *valueReader) ReadDBPointer() (ns string, oid primitive.ObjectID, err error) {
+	if err := vr.ensureElementValue(bsontype.DBPointer, 0, "ReadDBPointer"); err != nil {
+		return "", oid, err
+	}
+
+	ns, err = vr.readString()
+	if err != nil {
+		return "", oid, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return "", oid, err
+	}
+
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return ns, oid, nil
+}
+
+func (vr *valueReader) ReadDateTime() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.DateTime, 0, "ReadDateTime"); err != nil {
+		return 0, err
+	}
+
+	i, err := vr.readi64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return i, nil
+}
+
+func (vr *valueReader) ReadDecimal128() (primitive.Decimal128, error) {
+	if err := vr.ensureElementValue(bsontype.Decimal128, 0, "ReadDecimal128"); err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	b, err := vr.readBytes(16)
+	if err != nil {
+		return primitive.Decimal128{}, err
+	}
+
+	l := binary.LittleEndian.Uint64(b[0:8])
+	h := binary.LittleEndian.Uint64(b[8:16])
+
+	vr.pop()
+	return primitive.NewDecimal128(h, l), nil
+}
+
+func (vr *valueReader) ReadDouble() (float64, error) {
+	if err := vr.ensureElementValue(bsontype.Double, 0, "ReadDouble"); err != nil {
+		return 0, err
+	}
+
+	u, err := vr.readu64()
+	if err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return math.Float64frombits(u), nil
+}
+
+func (vr *valueReader) ReadInt32() (int32, error) {
+	if err := vr.ensureElementValue(bsontype.Int32, 0, "ReadInt32"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi32()
+}
+
+func (vr *valueReader) ReadInt64() (int64, error) {
+	if err := vr.ensureElementValue(bsontype.Int64, 0, "ReadInt64"); err != nil {
+		return 0, err
+	}
+
+	vr.pop()
+	return vr.readi64()
+}
+
+func (vr *valueReader) ReadJavascript() (code string, err error) {
+	if err := vr.ensureElementValue(bsontype.JavaScript, 0, "ReadJavascript"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadMaxKey() error {
+	if err := vr.ensureElementValue(bsontype.MaxKey, 0, "ReadMaxKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadMinKey() error {
+	if err := vr.ensureElementValue(bsontype.MinKey, 0, "ReadMinKey"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadNull() error {
+	if err := vr.ensureElementValue(bsontype.Null, 0, "ReadNull"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadObjectID() (primitive.ObjectID, error) {
+	if err := vr.ensureElementValue(bsontype.ObjectID, 0, "ReadObjectID"); err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	oidbytes, err := vr.readBytes(12)
+	if err != nil {
+		return primitive.ObjectID{}, err
+	}
+
+	var oid primitive.ObjectID
+	copy(oid[:], oidbytes)
+
+	vr.pop()
+	return oid, nil
+}
+
+func (vr *valueReader) ReadRegex() (string, string, error) {
+	if err := vr.ensureElementValue(bsontype.Regex, 0, "ReadRegex"); err != nil {
+		return "", "", err
+	}
+
+	pattern, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	options, err := vr.readCString()
+	if err != nil {
+		return "", "", err
+	}
+
+	vr.pop()
+	return pattern, options, nil
+}
+
+func (vr *valueReader) ReadString() (string, error) {
+	if err := vr.ensureElementValue(bsontype.String, 0, "ReadString"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadSymbol() (symbol string, err error) {
+	if err := vr.ensureElementValue(bsontype.Symbol, 0, "ReadSymbol"); err != nil {
+		return "", err
+	}
+
+	vr.pop()
+	return vr.readString()
+}
+
+func (vr *valueReader) ReadTimestamp() (t uint32, i uint32, err error) {
+	if err := vr.ensureElementValue(bsontype.Timestamp, 0, "ReadTimestamp"); err != nil {
+		return 0, 0, err
+	}
+
+	i, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	t, err = vr.readu32()
+	if err != nil {
+		return 0, 0, err
+	}
+
+	vr.pop()
+	return t, i, nil
+}
+
+func (vr *valueReader) ReadUndefined() error {
+	if err := vr.ensureElementValue(bsontype.Undefined, 0, "ReadUndefined"); err != nil {
+		return err
+	}
+
+	vr.pop()
+	return nil
+}
+
+func (vr *valueReader) ReadElement() (string, ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mTopLevel, mDocument, mCodeWithScope:
+	default:
+		return "", nil, vr.invalidTransitionErr(mElement, "ReadElement", []mode{mTopLevel, mDocument, mCodeWithScope})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return "", nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return "", nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return "", nil, ErrEOD
+	}
+
+	name, err := vr.readCString()
+	if err != nil {
+		return "", nil, err
+	}
+
+	vr.pushElement(bsontype.Type(t))
+	return name, vr, nil
+}
+
+func (vr *valueReader) ReadValue() (ValueReader, error) {
+	switch vr.stack[vr.frame].mode {
+	case mArray:
+	default:
+		return nil, vr.invalidTransitionErr(mValue, "ReadValue", []mode{mArray})
+	}
+
+	t, err := vr.readByte()
+	if err != nil {
+		return nil, err
+	}
+
+	if t == 0 {
+		if vr.offset != vr.stack[vr.frame].end {
+			return nil, vr.invalidDocumentLengthError()
+		}
+
+		vr.pop()
+		return nil, ErrEOA
+	}
+
+	_, err = vr.readCString()
+	if err != nil {
+		return nil, err
+	}
+
+	vr.pushValue(bsontype.Type(t))
+	return vr, nil
+}
+
+func (vr *valueReader) readBytes(length int32) ([]byte, error) {
+	if length < 0 {
+		return nil, fmt.Errorf("invalid length: %d", length)
+	}
+
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return vr.d[start : start+int64(length)], nil
+}
+
+func (vr *valueReader) appendBytes(dst []byte, length int32) ([]byte, error) {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return nil, io.EOF
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+	return append(dst, vr.d[start:start+int64(length)]...), nil
+}
+
+func (vr *valueReader) skipBytes(length int32) error {
+	if vr.offset+int64(length) > int64(len(vr.d)) {
+		return io.EOF
+	}
+
+	vr.offset += int64(length)
+	return nil
+}
+
+func (vr *valueReader) readByte() (byte, error) {
+	if vr.offset+1 > int64(len(vr.d)) {
+		return 0x0, io.EOF
+	}
+
+	vr.offset++
+	return vr.d[vr.offset-1], nil
+}
+
+func (vr *valueReader) readCString() (string, error) {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return "", io.EOF
+	}
+	start := vr.offset
+	// idx does not include the null byte
+	vr.offset += int64(idx) + 1
+	return string(vr.d[start : start+int64(idx)]), nil
+}
+
+func (vr *valueReader) skipCString() error {
+	idx := bytes.IndexByte(vr.d[vr.offset:], 0x00)
+	if idx < 0 {
+		return io.EOF
+	}
+	// idx does not include the null byte
+	vr.offset += int64(idx) + 1
+	return nil
+}
+
+func (vr *valueReader) readString() (string, error) {
+	length, err := vr.readLength()
+	if err != nil {
+		return "", err
+	}
+
+	if int64(length)+vr.offset > int64(len(vr.d)) {
+		return "", io.EOF
+	}
+
+	if length <= 0 {
+		return "", fmt.Errorf("invalid string length: %d", length)
+	}
+
+	if vr.d[vr.offset+int64(length)-1] != 0x00 {
+		return "", fmt.Errorf("string does not end with null byte, but with %v", vr.d[vr.offset+int64(length)-1])
+	}
+
+	start := vr.offset
+	vr.offset += int64(length)
+
+	if length == 2 {
+		asciiByte := vr.d[start]
+		if asciiByte > unicode.MaxASCII {
+			return "", fmt.Errorf("invalid ascii byte")
+		}
+	}
+
+	return string(vr.d[start : start+int64(length)-1]), nil
+}
+
+func (vr *valueReader) peekLength() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readLength() (int32, error) { return vr.readi32() }
+
+func (vr *valueReader) readi32() (int32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return (int32(vr.d[idx]) | int32(vr.d[idx+1])<<8 | int32(vr.d[idx+2])<<16 | int32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readu32() (uint32, error) {
+	if vr.offset+4 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 4
+	return (uint32(vr.d[idx]) | uint32(vr.d[idx+1])<<8 | uint32(vr.d[idx+2])<<16 | uint32(vr.d[idx+3])<<24), nil
+}
+
+func (vr *valueReader) readi64() (int64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return int64(vr.d[idx]) | int64(vr.d[idx+1])<<8 | int64(vr.d[idx+2])<<16 | int64(vr.d[idx+3])<<24 |
+		int64(vr.d[idx+4])<<32 | int64(vr.d[idx+5])<<40 | int64(vr.d[idx+6])<<48 | int64(vr.d[idx+7])<<56, nil
+}
+
+func (vr *valueReader) readu64() (uint64, error) {
+	if vr.offset+8 > int64(len(vr.d)) {
+		return 0, io.EOF
+	}
+
+	idx := vr.offset
+	vr.offset += 8
+	return uint64(vr.d[idx]) | uint64(vr.d[idx+1])<<8 | uint64(vr.d[idx+2])<<16 | uint64(vr.d[idx+3])<<24 |
+		uint64(vr.d[idx+4])<<32 | uint64(vr.d[idx+5])<<40 | uint64(vr.d[idx+6])<<48 | uint64(vr.d[idx+7])<<56, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go
new file mode 100644
index 0000000..2400bac
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/value_writer.go
@@ -0,0 +1,589 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+var _ ValueWriter = (*valueWriter)(nil)
+
+var vwPool = sync.Pool{
+	New: func() interface{} {
+		return new(valueWriter)
+	},
+}
+
+// BSONValueWriterPool is a pool for BSON ValueWriters.
+type BSONValueWriterPool struct {
+	pool sync.Pool
+}
+
+// NewBSONValueWriterPool creates a new pool for ValueWriter instances that write to BSON.
+func NewBSONValueWriterPool() *BSONValueWriterPool {
+	return &BSONValueWriterPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return new(valueWriter)
+			},
+		},
+	}
+}
+
+// Get retrieves a BSON ValueWriter from the pool and resets it to use w as the destination.
+func (bvwp *BSONValueWriterPool) Get(w io.Writer) ValueWriter {
+	vw := bvwp.pool.Get().(*valueWriter)
+	if writer, ok := w.(*SliceWriter); ok {
+		vw.reset(*writer)
+		vw.w = writer
+		return vw
+	}
+	vw.buf = vw.buf[:0]
+	vw.w = w
+	return vw
+}
+
+// Put inserts a ValueWriter into the pool. If the ValueWriter is not a BSON ValueWriter, nothing
+// happens and ok will be false.
+func (bvwp *BSONValueWriterPool) Put(vw ValueWriter) (ok bool) {
+	bvw, ok := vw.(*valueWriter)
+	if !ok {
+		return false
+	}
+
+	if _, ok := bvw.w.(*SliceWriter); ok {
+		bvw.buf = nil
+	}
+	bvw.w = nil
+
+	bvwp.pool.Put(bvw)
+	return true
+}
+
+// This is here so that during testing we can change it and not require
+// allocating a 4GB slice.
+var maxSize = math.MaxInt32
+
+var errNilWriter = errors.New("cannot create a ValueWriter from a nil io.Writer")
+
+type errMaxDocumentSizeExceeded struct {
+	size int64
+}
+
+func (mdse errMaxDocumentSizeExceeded) Error() string {
+	return fmt.Sprintf("document size (%d) is larger than the max int32", mdse.size)
+}
+
+type vwMode int
+
+const (
+	_ vwMode = iota
+	vwTopLevel
+	vwDocument
+	vwArray
+	vwValue
+	vwElement
+	vwCodeWithScope
+)
+
+func (vm vwMode) String() string {
+	var str string
+
+	switch vm {
+	case vwTopLevel:
+		str = "TopLevel"
+	case vwDocument:
+		str = "DocumentMode"
+	case vwArray:
+		str = "ArrayMode"
+	case vwValue:
+		str = "ValueMode"
+	case vwElement:
+		str = "ElementMode"
+	case vwCodeWithScope:
+		str = "CodeWithScopeMode"
+	default:
+		str = "UnknownMode"
+	}
+
+	return str
+}
+
+type vwState struct {
+	mode   mode
+	key    string
+	arrkey int
+	start  int32
+}
+
+type valueWriter struct {
+	w   io.Writer
+	buf []byte
+
+	stack []vwState
+	frame int64
+}
+
+func (vw *valueWriter) advanceFrame() {
+	if vw.frame+1 >= int64(len(vw.stack)) { // We need to grow the stack
+		length := len(vw.stack)
+		if length+1 >= cap(vw.stack) {
+			// double it
+			buf := make([]vwState, 2*cap(vw.stack)+1)
+			copy(buf, vw.stack)
+			vw.stack = buf
+		}
+		vw.stack = vw.stack[:length+1]
+	}
+	vw.frame++
+}
+
+func (vw *valueWriter) push(m mode) {
+	vw.advanceFrame()
+
+	// Clean the stack
+	vw.stack[vw.frame].mode = m
+	vw.stack[vw.frame].key = ""
+	vw.stack[vw.frame].arrkey = 0
+	vw.stack[vw.frame].start = 0
+
+	vw.stack[vw.frame].mode = m
+	switch m {
+	case mDocument, mArray, mCodeWithScope:
+		vw.reserveLength()
+	}
+}
+
+func (vw *valueWriter) reserveLength() {
+	vw.stack[vw.frame].start = int32(len(vw.buf))
+	vw.buf = append(vw.buf, 0x00, 0x00, 0x00, 0x00)
+}
+
+func (vw *valueWriter) pop() {
+	switch vw.stack[vw.frame].mode {
+	case mElement, mValue:
+		vw.frame--
+	case mDocument, mArray, mCodeWithScope:
+		vw.frame -= 2 // we pop twice to jump over the mElement: mDocument -> mElement -> mDocument/mTopLevel/etc...
+	}
+}
+
+// NewBSONValueWriter creates a ValueWriter that writes BSON to w.
+//
+// This ValueWriter will only write entire documents to the io.Writer and it
+// will buffer the document as it is built.
+func NewBSONValueWriter(w io.Writer) (ValueWriter, error) {
+	if w == nil {
+		return nil, errNilWriter
+	}
+	return newValueWriter(w), nil
+}
+
+func newValueWriter(w io.Writer) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.w = w
+	vw.stack = stack
+
+	return vw
+}
+
+func newValueWriterFromSlice(buf []byte) *valueWriter {
+	vw := new(valueWriter)
+	stack := make([]vwState, 1, 5)
+	stack[0] = vwState{mode: mTopLevel}
+	vw.stack = stack
+	vw.buf = buf
+
+	return vw
+}
+
+func (vw *valueWriter) reset(buf []byte) {
+	if vw.stack == nil {
+		vw.stack = make([]vwState, 1, 5)
+	}
+	vw.stack = vw.stack[:1]
+	vw.stack[0] = vwState{mode: mTopLevel}
+	vw.buf = buf
+	vw.frame = 0
+	vw.w = nil
+}
+
+func (vw *valueWriter) invalidTransitionError(destination mode, name string, modes []mode) error {
+	te := TransitionError{
+		name:        name,
+		current:     vw.stack[vw.frame].mode,
+		destination: destination,
+		modes:       modes,
+		action:      "write",
+	}
+	if vw.frame != 0 {
+		te.parent = vw.stack[vw.frame-1].mode
+	}
+	return te
+}
+
+func (vw *valueWriter) writeElementHeader(t bsontype.Type, destination mode, callerName string, addmodes ...mode) error {
+	switch vw.stack[vw.frame].mode {
+	case mElement:
+		vw.buf = bsoncore.AppendHeader(vw.buf, t, vw.stack[vw.frame].key)
+	case mValue:
+		// TODO: Do this with a cache of the first 1000 or so array keys.
+		vw.buf = bsoncore.AppendHeader(vw.buf, t, strconv.Itoa(vw.stack[vw.frame].arrkey))
+	default:
+		modes := []mode{mElement, mValue}
+		if addmodes != nil {
+			modes = append(modes, addmodes...)
+		}
+		return vw.invalidTransitionError(destination, callerName, modes)
+	}
+
+	return nil
+}
+
+func (vw *valueWriter) WriteValueBytes(t bsontype.Type, b []byte) error {
+	if err := vw.writeElementHeader(t, mode(0), "WriteValueBytes"); err != nil {
+		return err
+	}
+	vw.buf = append(vw.buf, b...)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteArray() (ArrayWriter, error) {
+	if err := vw.writeElementHeader(bsontype.Array, mArray, "WriteArray"); err != nil {
+		return nil, err
+	}
+
+	vw.push(mArray)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteBinary(b []byte) error {
+	return vw.WriteBinaryWithSubtype(b, 0x00)
+}
+
+func (vw *valueWriter) WriteBinaryWithSubtype(b []byte, btype byte) error {
+	if err := vw.writeElementHeader(bsontype.Binary, mode(0), "WriteBinaryWithSubtype"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBinary(vw.buf, btype, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteBoolean(b bool) error {
+	if err := vw.writeElementHeader(bsontype.Boolean, mode(0), "WriteBoolean"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendBoolean(vw.buf, b)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteCodeWithScope(code string) (DocumentWriter, error) {
+	if err := vw.writeElementHeader(bsontype.CodeWithScope, mCodeWithScope, "WriteCodeWithScope"); err != nil {
+		return nil, err
+	}
+
+	// CodeWithScope is a different than other types because we need an extra
+	// frame on the stack. In the EndDocument code, we write the document
+	// length, pop, write the code with scope length, and pop. To simplify the
+	// pop code, we push a spacer frame that we'll always jump over.
+	vw.push(mCodeWithScope)
+	vw.buf = bsoncore.AppendString(vw.buf, code)
+	vw.push(mSpacer)
+	vw.push(mDocument)
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDBPointer(ns string, oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.DBPointer, mode(0), "WriteDBPointer"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDBPointer(vw.buf, ns, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDateTime(dt int64) error {
+	if err := vw.writeElementHeader(bsontype.DateTime, mode(0), "WriteDateTime"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDateTime(vw.buf, dt)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDecimal128(d128 primitive.Decimal128) error {
+	if err := vw.writeElementHeader(bsontype.Decimal128, mode(0), "WriteDecimal128"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDecimal128(vw.buf, d128)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDouble(f float64) error {
+	if err := vw.writeElementHeader(bsontype.Double, mode(0), "WriteDouble"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendDouble(vw.buf, f)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt32(i32 int32) error {
+	if err := vw.writeElementHeader(bsontype.Int32, mode(0), "WriteInt32"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt32(vw.buf, i32)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteInt64(i64 int64) error {
+	if err := vw.writeElementHeader(bsontype.Int64, mode(0), "WriteInt64"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendInt64(vw.buf, i64)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteJavascript(code string) error {
+	if err := vw.writeElementHeader(bsontype.JavaScript, mode(0), "WriteJavascript"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendJavaScript(vw.buf, code)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMaxKey() error {
+	if err := vw.writeElementHeader(bsontype.MaxKey, mode(0), "WriteMaxKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteMinKey() error {
+	if err := vw.writeElementHeader(bsontype.MinKey, mode(0), "WriteMinKey"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteNull() error {
+	if err := vw.writeElementHeader(bsontype.Null, mode(0), "WriteNull"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteObjectID(oid primitive.ObjectID) error {
+	if err := vw.writeElementHeader(bsontype.ObjectID, mode(0), "WriteObjectID"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendObjectID(vw.buf, oid)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteRegex(pattern string, options string) error {
+	if err := vw.writeElementHeader(bsontype.Regex, mode(0), "WriteRegex"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendRegex(vw.buf, pattern, sortStringAlphebeticAscending(options))
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteString(s string) error {
+	if err := vw.writeElementHeader(bsontype.String, mode(0), "WriteString"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendString(vw.buf, s)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocument() (DocumentWriter, error) {
+	if vw.stack[vw.frame].mode == mTopLevel {
+		vw.reserveLength()
+		return vw, nil
+	}
+	if err := vw.writeElementHeader(bsontype.EmbeddedDocument, mDocument, "WriteDocument", mTopLevel); err != nil {
+		return nil, err
+	}
+
+	vw.push(mDocument)
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteSymbol(symbol string) error {
+	if err := vw.writeElementHeader(bsontype.Symbol, mode(0), "WriteSymbol"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendSymbol(vw.buf, symbol)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteTimestamp(t uint32, i uint32) error {
+	if err := vw.writeElementHeader(bsontype.Timestamp, mode(0), "WriteTimestamp"); err != nil {
+		return err
+	}
+
+	vw.buf = bsoncore.AppendTimestamp(vw.buf, t, i)
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteUndefined() error {
+	if err := vw.writeElementHeader(bsontype.Undefined, mode(0), "WriteUndefined"); err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+func (vw *valueWriter) WriteDocumentElement(key string) (ValueWriter, error) {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return nil, vw.invalidTransitionError(mElement, "WriteDocumentElement", []mode{mTopLevel, mDocument})
+	}
+
+	vw.push(mElement)
+	vw.stack[vw.frame].key = key
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteDocumentEnd() error {
+	switch vw.stack[vw.frame].mode {
+	case mTopLevel, mDocument:
+	default:
+		return fmt.Errorf("incorrect mode to end document: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	if vw.stack[vw.frame].mode == mTopLevel {
+		if vw.w != nil {
+			if sw, ok := vw.w.(*SliceWriter); ok {
+				*sw = vw.buf
+			} else {
+				_, err = vw.w.Write(vw.buf)
+				if err != nil {
+					return err
+				}
+				// reset buffer
+				vw.buf = vw.buf[:0]
+			}
+		}
+	}
+
+	vw.pop()
+
+	if vw.stack[vw.frame].mode == mCodeWithScope {
+		// We ignore the error here because of the gaurantee of writeLength.
+		// See the docs for writeLength for more info.
+		_ = vw.writeLength()
+		vw.pop()
+	}
+	return nil
+}
+
+func (vw *valueWriter) WriteArrayElement() (ValueWriter, error) {
+	if vw.stack[vw.frame].mode != mArray {
+		return nil, vw.invalidTransitionError(mValue, "WriteArrayElement", []mode{mArray})
+	}
+
+	arrkey := vw.stack[vw.frame].arrkey
+	vw.stack[vw.frame].arrkey++
+
+	vw.push(mValue)
+	vw.stack[vw.frame].arrkey = arrkey
+
+	return vw, nil
+}
+
+func (vw *valueWriter) WriteArrayEnd() error {
+	if vw.stack[vw.frame].mode != mArray {
+		return fmt.Errorf("incorrect mode to end array: %s", vw.stack[vw.frame].mode)
+	}
+
+	vw.buf = append(vw.buf, 0x00)
+
+	err := vw.writeLength()
+	if err != nil {
+		return err
+	}
+
+	vw.pop()
+	return nil
+}
+
+// NOTE: We assume that if we call writeLength more than once the same function
+// within the same function without altering the vw.buf that this method will
+// not return an error. If this changes ensure that the following methods are
+// updated:
+//
+// - WriteDocumentEnd
+func (vw *valueWriter) writeLength() error {
+	length := len(vw.buf)
+	if length > maxSize {
+		return errMaxDocumentSizeExceeded{size: int64(len(vw.buf))}
+	}
+	length = length - int(vw.stack[vw.frame].start)
+	start := vw.stack[vw.frame].start
+
+	vw.buf[start+0] = byte(length)
+	vw.buf[start+1] = byte(length >> 8)
+	vw.buf[start+2] = byte(length >> 16)
+	vw.buf[start+3] = byte(length >> 24)
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go
new file mode 100644
index 0000000..6ae4322
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsonrw/writer.go
@@ -0,0 +1,96 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonrw
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ArrayWriter is the interface used to create a BSON or BSON adjacent array.
+// Callers must ensure they call WriteArrayEnd when they have finished creating
+// the array.
+type ArrayWriter interface {
+	WriteArrayElement() (ValueWriter, error)
+	WriteArrayEnd() error
+}
+
+// DocumentWriter is the interface used to create a BSON or BSON adjacent
+// document. Callers must ensure they call WriteDocumentEnd when they have
+// finished creating the document.
+type DocumentWriter interface {
+	WriteDocumentElement(string) (ValueWriter, error)
+	WriteDocumentEnd() error
+}
+
+// ValueWriter is the interface used to write BSON values. Implementations of
+// this interface handle creating BSON or BSON adjacent representations of the
+// values.
+type ValueWriter interface {
+	WriteArray() (ArrayWriter, error)
+	WriteBinary(b []byte) error
+	WriteBinaryWithSubtype(b []byte, btype byte) error
+	WriteBoolean(bool) error
+	WriteCodeWithScope(code string) (DocumentWriter, error)
+	WriteDBPointer(ns string, oid primitive.ObjectID) error
+	WriteDateTime(dt int64) error
+	WriteDecimal128(primitive.Decimal128) error
+	WriteDouble(float64) error
+	WriteInt32(int32) error
+	WriteInt64(int64) error
+	WriteJavascript(code string) error
+	WriteMaxKey() error
+	WriteMinKey() error
+	WriteNull() error
+	WriteObjectID(primitive.ObjectID) error
+	WriteRegex(pattern, options string) error
+	WriteString(string) error
+	WriteDocument() (DocumentWriter, error)
+	WriteSymbol(symbol string) error
+	WriteTimestamp(t, i uint32) error
+	WriteUndefined() error
+}
+
+// BytesWriter is the interface used to write BSON bytes to a ValueWriter.
+// This interface is meant to be a superset of ValueWriter, so that types that
+// implement ValueWriter may also implement this interface.
+type BytesWriter interface {
+	WriteValueBytes(t bsontype.Type, b []byte) error
+}
+
+// SliceWriter allows a pointer to a slice of bytes to be used as an io.Writer.
+type SliceWriter []byte
+
+func (sw *SliceWriter) Write(p []byte) (int, error) {
+	written := len(p)
+	*sw = append(*sw, p...)
+	return written, nil
+}
+
+type writer []byte
+
+func (w *writer) Write(p []byte) (int, error) {
+	index := len(*w)
+	return w.WriteAt(p, int64(index))
+}
+
+func (w *writer) WriteAt(p []byte, off int64) (int, error) {
+	newend := off + int64(len(p))
+	if newend < int64(len(*w)) {
+		newend = int64(len(*w))
+	}
+
+	if newend > int64(cap(*w)) {
+		buf := make([]byte, int64(2*cap(*w))+newend)
+		copy(buf, *w)
+		*w = buf
+	}
+
+	*w = []byte(*w)[:newend]
+	copy([]byte(*w)[off:], p)
+	return len(p), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go b/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go
new file mode 100644
index 0000000..a17e5f5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/bsontype/bsontype.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsontype is a utility package that contains types for each BSON type and the
+// a stringifier for the Type to enable easier debugging when working with BSON.
+package bsontype
+
+// These constants uniquely refer to each BSON type.
+const (
+	Double           Type = 0x01
+	String           Type = 0x02
+	EmbeddedDocument Type = 0x03
+	Array            Type = 0x04
+	Binary           Type = 0x05
+	Undefined        Type = 0x06
+	ObjectID         Type = 0x07
+	Boolean          Type = 0x08
+	DateTime         Type = 0x09
+	Null             Type = 0x0A
+	Regex            Type = 0x0B
+	DBPointer        Type = 0x0C
+	JavaScript       Type = 0x0D
+	Symbol           Type = 0x0E
+	CodeWithScope    Type = 0x0F
+	Int32            Type = 0x10
+	Timestamp        Type = 0x11
+	Int64            Type = 0x12
+	Decimal128       Type = 0x13
+	MinKey           Type = 0xFF
+	MaxKey           Type = 0x7F
+)
+
+// Type represents a BSON type.
+type Type byte
+
+// String returns the string representation of the BSON type's name.
+func (bt Type) String() string {
+	switch bt {
+	case '\x01':
+		return "double"
+	case '\x02':
+		return "string"
+	case '\x03':
+		return "embedded document"
+	case '\x04':
+		return "array"
+	case '\x05':
+		return "binary"
+	case '\x06':
+		return "undefined"
+	case '\x07':
+		return "objectID"
+	case '\x08':
+		return "boolean"
+	case '\x09':
+		return "UTC datetime"
+	case '\x0A':
+		return "null"
+	case '\x0B':
+		return "regex"
+	case '\x0C':
+		return "dbPointer"
+	case '\x0D':
+		return "javascript"
+	case '\x0E':
+		return "symbol"
+	case '\x0F':
+		return "code with scope"
+	case '\x10':
+		return "32-bit integer"
+	case '\x11':
+		return "timestamp"
+	case '\x12':
+		return "64-bit integer"
+	case '\x13':
+		return "128-bit decimal"
+	case '\xFF':
+		return "min key"
+	case '\x7F':
+		return "max key"
+	default:
+		return "invalid"
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go b/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go
new file mode 100644
index 0000000..03c86a1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/decoder.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+// This pool is used to keep the allocations of Decoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Decoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var decPool = sync.Pool{
+	New: func() interface{} {
+		return new(Decoder)
+	},
+}
+
+// A Decoder reads and decodes BSON documents from a stream. It reads from a bsonrw.ValueReader as
+// the source of BSON data.
+type Decoder struct {
+	dc bsoncodec.DecodeContext
+	vr bsonrw.ValueReader
+}
+
+// NewDecoder returns a new decoder that uses the DefaultRegistry to read from vr.
+func NewDecoder(vr bsonrw.ValueReader) (*Decoder, error) {
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: bsoncodec.DecodeContext{Registry: DefaultRegistry},
+		vr: vr,
+	}, nil
+}
+
+// NewDecoderWithContext returns a new decoder that uses DecodeContext dc to read from vr.
+func NewDecoderWithContext(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader) (*Decoder, error) {
+	if dc.Registry == nil {
+		dc.Registry = DefaultRegistry
+	}
+	if vr == nil {
+		return nil, errors.New("cannot create a new Decoder with a nil ValueReader")
+	}
+
+	return &Decoder{
+		dc: dc,
+		vr: vr,
+	}, nil
+}
+
+// Decode reads the next BSON document from the stream and decodes it into the
+// value pointed to by val.
+//
+// The documentation for Unmarshal contains details about of BSON into a Go
+// value.
+func (d *Decoder) Decode(val interface{}) error {
+	if unmarshaler, ok := val.(Unmarshaler); ok {
+		// TODO(skriptble): Reuse a []byte here and use the AppendDocumentBytes method.
+		buf, err := bsonrw.Copier{}.CopyDocumentToBytes(d.vr)
+		if err != nil {
+			return err
+		}
+		return unmarshaler.UnmarshalBSON(buf)
+	}
+
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Decode must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	decoder, err := d.dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return decoder.DecodeValue(d.dc, d.vr, rval)
+}
+
+// Reset will reset the state of the decoder, using the same *DecodeContext used in
+// the original construction but using vr for reading.
+func (d *Decoder) Reset(vr bsonrw.ValueReader) error {
+	d.vr = vr
+	return nil
+}
+
+// SetRegistry replaces the current registry of the decoder with r.
+func (d *Decoder) SetRegistry(r *bsoncodec.Registry) error {
+	d.dc.Registry = r
+	return nil
+}
+
+// SetContext replaces the current registry of the decoder with dc.
+func (d *Decoder) SetContext(dc bsoncodec.DecodeContext) error {
+	d.dc = dc
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go b/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go
new file mode 100644
index 0000000..b3f6c52
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/doc.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bson is a library for reading, writing, and manipulating BSON. The
+// library has two families of types for representing BSON.
+//
+// The Raw family of types is used to validate and retrieve elements from a slice of bytes. This
+// type is most useful when you want do lookups on BSON bytes without unmarshaling it into another
+// type.
+//
+// Example:
+// 		var raw bson.Raw = ... // bytes from somewhere
+// 		err := raw.Validate()
+// 		if err != nil { return err }
+// 		val := raw.Lookup("foo")
+// 		i32, ok := val.Int32OK()
+// 		// do something with i32...
+//
+// The D family of types is used to build concise representations of BSON using native Go types.
+// These types do not support automatic lookup.
+//
+// Example:
+// 		bson.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+//
+// Marshaling and Unmarshaling are handled with the Marshal and Unmarshal family of functions. If
+// you need to write or read BSON from a non-slice source, an Encoder or Decoder can be used with a
+// bsonrw.ValueWriter or bsonrw.ValueReader.
+//
+// Example:
+// 		b, err := bson.Marshal(bson.D{{"foo", "bar"}})
+// 		if err != nil { return err }
+// 		var fooer struct {
+// 			Foo string
+// 		}
+// 		err = bson.Unmarshal(b, &fooer)
+// 		if err != nil { return err }
+// 		// do something with fooer...
+package bson
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go b/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go
new file mode 100644
index 0000000..3c40425
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/encoder.go
@@ -0,0 +1,99 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"reflect"
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+// This pool is used to keep the allocations of Encoders down. This is only used for the Marshal*
+// methods and is not consumable from outside of this package. The Encoders retrieved from this pool
+// must have both Reset and SetRegistry called on them.
+var encPool = sync.Pool{
+	New: func() interface{} {
+		return new(Encoder)
+	},
+}
+
+// An Encoder writes a serialization format to an output stream. It writes to a bsonrw.ValueWriter
+// as the destination of BSON data.
+type Encoder struct {
+	ec bsoncodec.EncodeContext
+	vw bsonrw.ValueWriter
+}
+
+// NewEncoder returns a new encoder that uses the DefaultRegistry to write to vw.
+func NewEncoder(vw bsonrw.ValueWriter) (*Encoder, error) {
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: bsoncodec.EncodeContext{Registry: DefaultRegistry},
+		vw: vw,
+	}, nil
+}
+
+// NewEncoderWithContext returns a new encoder that uses EncodeContext ec to write to vw.
+func NewEncoderWithContext(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter) (*Encoder, error) {
+	if ec.Registry == nil {
+		ec = bsoncodec.EncodeContext{Registry: DefaultRegistry}
+	}
+	if vw == nil {
+		return nil, errors.New("cannot create a new Encoder with a nil ValueWriter")
+	}
+
+	return &Encoder{
+		ec: ec,
+		vw: vw,
+	}, nil
+}
+
+// Encode writes the BSON encoding of val to the stream.
+//
+// The documentation for Marshal contains details about the conversion of Go
+// values to BSON.
+func (e *Encoder) Encode(val interface{}) error {
+	if marshaler, ok := val.(Marshaler); ok {
+		// TODO(skriptble): Should we have a MarshalAppender interface so that we can have []byte reuse?
+		buf, err := marshaler.MarshalBSON()
+		if err != nil {
+			return err
+		}
+		return bsonrw.Copier{}.CopyDocumentFromBytes(e.vw, buf)
+	}
+
+	encoder, err := e.ec.LookupEncoder(reflect.TypeOf(val))
+	if err != nil {
+		return err
+	}
+	return encoder.EncodeValue(e.ec, e.vw, reflect.ValueOf(val))
+}
+
+// Reset will reset the state of the encoder, using the same *EncodeContext used in
+// the original construction but using vw.
+func (e *Encoder) Reset(vw bsonrw.ValueWriter) error {
+	e.vw = vw
+	return nil
+}
+
+// SetRegistry replaces the current registry of the encoder with r.
+func (e *Encoder) SetRegistry(r *bsoncodec.Registry) error {
+	e.ec.Registry = r
+	return nil
+}
+
+// SetContext replaces the current EncodeContext of the encoder with er.
+func (e *Encoder) SetContext(ec bsoncodec.EncodeContext) error {
+	e.ec = ec
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go
new file mode 100644
index 0000000..398fb63
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/marshal.go
@@ -0,0 +1,156 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const defaultDstCap = 256
+
+var bvwPool = bsonrw.NewBSONValueWriterPool()
+var extjPool = bsonrw.NewExtJSONValueWriterPool()
+
+// Marshaler is an interface implemented by types that can marshal themselves
+// into a BSON document represented as bytes. The bytes returned must be a valid
+// BSON document if the error is nil.
+type Marshaler interface {
+	MarshalBSON() ([]byte, error)
+}
+
+// ValueMarshaler is an interface implemented by types that can marshal
+// themselves into a BSON value as bytes. The type must be the valid type for
+// the bytes returned. The bytes and byte type together must be valid if the
+// error is nil.
+type ValueMarshaler interface {
+	MarshalBSONValue() (bsontype.Type, []byte, error)
+}
+
+// Marshal returns the BSON encoding of val.
+//
+// Marshal will use the default registry created by NewRegistry to recursively
+// marshal val into a []byte. Marshal will inspect struct tags and alter the
+// marshaling process accordingly.
+func Marshal(val interface{}) ([]byte, error) {
+	return MarshalWithRegistry(DefaultRegistry, val)
+}
+
+// MarshalAppend will append the BSON encoding of val to dst. If dst is not
+// large enough to hold the BSON encoding of val, dst will be grown.
+func MarshalAppend(dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithRegistry(DefaultRegistry, dst, val)
+}
+
+// MarshalWithRegistry returns the BSON encoding of val using Registry r.
+func MarshalWithRegistry(r *bsoncodec.Registry, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0, 256) // TODO: make the default cap a constant
+	return MarshalAppendWithRegistry(r, dst, val)
+}
+
+// MarshalWithContext returns the BSON encoding of val using EncodeContext ec.
+func MarshalWithContext(ec bsoncodec.EncodeContext, val interface{}) ([]byte, error) {
+	dst := make([]byte, 0, 256) // TODO: make the default cap a constant
+	return MarshalAppendWithContext(ec, dst, val)
+}
+
+// MarshalAppendWithRegistry will append the BSON encoding of val to dst using
+// Registry r. If dst is not large enough to hold the BSON encoding of val, dst
+// will be grown.
+func MarshalAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}) ([]byte, error) {
+	return MarshalAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val)
+}
+
+// MarshalAppendWithContext will append the BSON encoding of val to dst using
+// EncodeContext ec. If dst is not large enough to hold the BSON encoding of val, dst
+// will be grown.
+func MarshalAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}) ([]byte, error) {
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	vw := bvwPool.Get(sw)
+	defer bvwPool.Put(vw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(vw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return *sw, nil
+}
+
+// MarshalExtJSON returns the extended JSON encoding of val.
+func MarshalExtJSON(val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONWithRegistry(DefaultRegistry, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppend will append the extended JSON encoding of val to dst.
+// If dst is not large enough to hold the extended JSON encoding of val, dst
+// will be grown.
+func MarshalExtJSONAppend(dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithRegistry(DefaultRegistry, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithRegistry returns the extended JSON encoding of val using Registry r.
+func MarshalExtJSONWithRegistry(r *bsoncodec.Registry, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONWithContext returns the extended JSON encoding of val using Registry r.
+func MarshalExtJSONWithContext(ec bsoncodec.EncodeContext, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	dst := make([]byte, 0, defaultDstCap)
+	return MarshalExtJSONAppendWithContext(ec, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithRegistry will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+func MarshalExtJSONAppendWithRegistry(r *bsoncodec.Registry, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	return MarshalExtJSONAppendWithContext(bsoncodec.EncodeContext{Registry: r}, dst, val, canonical, escapeHTML)
+}
+
+// MarshalExtJSONAppendWithContext will append the extended JSON encoding of
+// val to dst using Registry r. If dst is not large enough to hold the BSON
+// encoding of val, dst will be grown.
+func MarshalExtJSONAppendWithContext(ec bsoncodec.EncodeContext, dst []byte, val interface{}, canonical, escapeHTML bool) ([]byte, error) {
+	sw := new(bsonrw.SliceWriter)
+	*sw = dst
+	ejvw := extjPool.Get(sw, canonical, escapeHTML)
+	defer extjPool.Put(ejvw)
+
+	enc := encPool.Get().(*Encoder)
+	defer encPool.Put(enc)
+
+	err := enc.Reset(ejvw)
+	if err != nil {
+		return nil, err
+	}
+	err = enc.SetContext(ec)
+	if err != nil {
+		return nil, err
+	}
+
+	err = enc.Encode(val)
+	if err != nil {
+		return nil, err
+	}
+
+	return *sw, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go
new file mode 100644
index 0000000..d7fdb22
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/decimal.go
@@ -0,0 +1,307 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+// NewDecimal128 creates a Decimal128 using the provide high and low uint64s.
+func NewDecimal128(h, l uint64) Decimal128 {
+	return Decimal128{h: h, l: l}
+}
+
+// GetBytes returns the underlying bytes of the BSON decimal value as two uint16 values. The first
+// contains the most first 8 bytes of the value and the second contains the latter.
+func (d Decimal128) GetBytes() (uint64, uint64) {
+	return d.h, d.l
+}
+
+// String returns a string representation of the decimal value.
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Infinity"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+//ParseDecimal128 takes the given string and attempts to parse it into a valid
+// Decimal128 value.
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go
new file mode 100644
index 0000000..9eaaa06
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/objectid.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on gopkg.in/mgo.v2/bson by Gustavo Niemeyer
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package primitive
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"sync/atomic"
+	"time"
+)
+
+// ErrInvalidHex indicates that a hex string cannot be converted to an ObjectID.
+var ErrInvalidHex = errors.New("the provided hex string is not a valid ObjectID")
+
+// ObjectID is the BSON ObjectID type.
+type ObjectID [12]byte
+
+// NilObjectID is the zero value for ObjectID.
+var NilObjectID ObjectID
+
+var objectIDCounter = readRandomUint32()
+var processUnique = processUniqueBytes()
+
+// NewObjectID generates a new ObjectID.
+func NewObjectID() ObjectID {
+	var b [12]byte
+
+	binary.BigEndian.PutUint32(b[0:4], uint32(time.Now().Unix()))
+	copy(b[4:9], processUnique[:])
+	putUint24(b[9:12], atomic.AddUint32(&objectIDCounter, 1))
+
+	return b
+}
+
+// Hex returns the hex encoding of the ObjectID as a string.
+func (id ObjectID) Hex() string {
+	return hex.EncodeToString(id[:])
+}
+
+func (id ObjectID) String() string {
+	return fmt.Sprintf("ObjectID(%q)", id.Hex())
+}
+
+// IsZero returns true if id is the empty ObjectID.
+func (id ObjectID) IsZero() bool {
+	return bytes.Equal(id[:], NilObjectID[:])
+}
+
+// ObjectIDFromHex creates a new ObjectID from a hex string. It returns an error if the hex string is not a
+// valid ObjectID.
+func ObjectIDFromHex(s string) (ObjectID, error) {
+	b, err := hex.DecodeString(s)
+	if err != nil {
+		return NilObjectID, err
+	}
+
+	if len(b) != 12 {
+		return NilObjectID, ErrInvalidHex
+	}
+
+	var oid [12]byte
+	copy(oid[:], b[:])
+
+	return oid, nil
+}
+
+// MarshalJSON returns the ObjectID as a string
+func (id ObjectID) MarshalJSON() ([]byte, error) {
+	return json.Marshal(id.Hex())
+}
+
+// UnmarshalJSON populates the byte slice with the ObjectID. If the byte slice is 64 bytes long, it
+// will be populated with the hex representation of the ObjectID. If the byte slice is twelve bytes
+// long, it will be populated with the BSON representation of the ObjectID. Otherwise, it will
+// return an error.
+func (id *ObjectID) UnmarshalJSON(b []byte) error {
+	var err error
+	switch len(b) {
+	case 12:
+		copy(id[:], b)
+	default:
+		// Extended JSON
+		var res interface{}
+		err := json.Unmarshal(b, &res)
+		if err != nil {
+			return err
+		}
+		str, ok := res.(string)
+		if !ok {
+			m, ok := res.(map[string]interface{})
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			oid, ok := m["$oid"]
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+			str, ok = oid.(string)
+			if !ok {
+				return errors.New("not an extended JSON ObjectID")
+			}
+		}
+
+		if len(str) != 24 {
+			return fmt.Errorf("cannot unmarshal into an ObjectID, the length must be 12 but it is %d", len(str))
+		}
+
+		_, err = hex.Decode(id[:], []byte(str))
+		if err != nil {
+			return err
+		}
+	}
+
+	return err
+}
+
+func processUniqueBytes() [5]byte {
+	var b [5]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
+	}
+
+	return b
+}
+
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot initialize objectid package with crypto.rand.Reader: %v", err))
+	}
+
+	return (uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
+}
+
+func putUint24(b []byte, v uint32) {
+	b[0] = byte(v >> 16)
+	b[1] = byte(v >> 8)
+	b[2] = byte(v)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go
new file mode 100644
index 0000000..b9b43f4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive/primitive.go
@@ -0,0 +1,149 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package primitive contains types similar to Go primitives for BSON types can do not have direct
+// Go primitive representations.
+package primitive
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Binary represents a BSON binary value.
+type Binary struct {
+	Subtype byte
+	Data    []byte
+}
+
+// Equal compaes bp to bp2 and returns true is the are equal.
+func (bp Binary) Equal(bp2 Binary) bool {
+	if bp.Subtype != bp2.Subtype {
+		return false
+	}
+	return bytes.Equal(bp.Data, bp2.Data)
+}
+
+// Undefined represents the BSON undefined value type.
+type Undefined struct{}
+
+// DateTime represents the BSON datetime value.
+type DateTime int64
+
+// Null repreesnts the BSON null value.
+type Null struct{}
+
+// Regex represents a BSON regex value.
+type Regex struct {
+	Pattern string
+	Options string
+}
+
+func (rp Regex) String() string {
+	return fmt.Sprintf(`{"pattern": "%s", "options": "%s"}`, rp.Pattern, rp.Options)
+}
+
+// Equal compaes rp to rp2 and returns true is the are equal.
+func (rp Regex) Equal(rp2 Regex) bool {
+	return rp.Pattern == rp2.Pattern && rp.Options == rp.Options
+}
+
+// DBPointer represents a BSON dbpointer value.
+type DBPointer struct {
+	DB      string
+	Pointer ObjectID
+}
+
+func (d DBPointer) String() string {
+	return fmt.Sprintf(`{"db": "%s", "pointer": "%s"}`, d.DB, d.Pointer)
+}
+
+// Equal compaes d to d2 and returns true is the are equal.
+func (d DBPointer) Equal(d2 DBPointer) bool {
+	return d.DB == d2.DB && bytes.Equal(d.Pointer[:], d2.Pointer[:])
+}
+
+// JavaScript represents a BSON JavaScript code value.
+type JavaScript string
+
+// Symbol represents a BSON symbol value.
+type Symbol string
+
+// CodeWithScope represents a BSON JavaScript code with scope value.
+type CodeWithScope struct {
+	Code  JavaScript
+	Scope interface{}
+}
+
+func (cws CodeWithScope) String() string {
+	return fmt.Sprintf(`{"code": "%s", "scope": %v}`, cws.Code, cws.Scope)
+}
+
+// Timestamp represents a BSON timestamp value.
+type Timestamp struct {
+	T uint32
+	I uint32
+}
+
+// Equal compaes tp to tp2 and returns true is the are equal.
+func (tp Timestamp) Equal(tp2 Timestamp) bool {
+	return tp.T == tp2.T && tp.I == tp2.I
+}
+
+// MinKey represents the BSON minkey value.
+type MinKey struct{}
+
+// MaxKey represents the BSON maxkey value.
+type MaxKey struct{}
+
+// D represents a BSON Document. This type can be used to represent BSON in a concise and readable
+// manner. It should generally be used when serializing to BSON. For deserializing, the Raw or
+// Document types should be used.
+//
+// Example usage:
+//
+// 		primitive.D{{"foo", "bar"}, {"hello", "world"}, {"pi", 3.14159}}
+//
+// This type should be used in situations where order matters, such as MongoDB commands. If the
+// order is not important, a map is more comfortable and concise.
+type D []E
+
+// Map creates a map from the elements of the D.
+func (d D) Map() M {
+	m := make(M, len(d))
+	for _, e := range d {
+		m[e.Key] = e.Value
+	}
+	return m
+}
+
+// E represents a BSON element for a D. It is usually used inside a D.
+type E struct {
+	Key   string
+	Value interface{}
+}
+
+// M is an unordered, concise representation of a BSON Document. It should generally be used to
+// serialize BSON when the order of the elements of a BSON document do not matter. If the element
+// order matters, use a D instead.
+//
+// Example usage:
+//
+// 		primitive.M{"foo": "bar", "hello": "world", "pi": 3.14159}
+//
+// This type is handled in the encoders as a regular map[string]interface{}. The elements will be
+// serialized in an undefined, random order, and the order will be different each time.
+type M map[string]interface{}
+
+// An A represents a BSON array. This type can be used to represent a BSON array in a concise and
+// readable manner. It should generally be used when serializing to BSON. For deserializing, the
+// RawArray or Array types should be used.
+//
+// Example usage:
+//
+// 		primitive.A{"bar", "world", 3.14159, primitive.D{{"qux", 12345}}}
+//
+type A []interface{}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go
new file mode 100644
index 0000000..6536b2e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/primitive_codecs.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+)
+
+var primitiveCodecs PrimitiveCodecs
+
+// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
+// defined in this package.
+type PrimitiveCodecs struct{}
+
+// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
+// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
+func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
+	}
+
+	rb.
+		RegisterEncoder(tRawValue, bsoncodec.ValueEncoderFunc(pc.RawValueEncodeValue)).
+		RegisterEncoder(tRaw, bsoncodec.ValueEncoderFunc(pc.RawEncodeValue)).
+		RegisterDecoder(tRawValue, bsoncodec.ValueDecoderFunc(pc.RawValueDecodeValue)).
+		RegisterDecoder(tRaw, bsoncodec.ValueDecoderFunc(pc.RawDecodeValue))
+}
+
+// RawValueEncodeValue is the ValueEncoderFunc for RawValue.
+func (PrimitiveCodecs) RawValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRawValue {
+		return bsoncodec.ValueEncoderError{Name: "RawValueEncodeValue", Types: []reflect.Type{tRawValue}, Received: val}
+	}
+
+	rawvalue := val.Interface().(RawValue)
+
+	return bsonrw.Copier{}.CopyValueFromBytes(vw, rawvalue.Type, rawvalue.Value)
+}
+
+// RawValueDecodeValue is the ValueDecoderFunc for RawValue.
+func (PrimitiveCodecs) RawValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRawValue {
+		return bsoncodec.ValueDecoderError{Name: "RawValueDecodeValue", Types: []reflect.Type{tRawValue}, Received: val}
+	}
+
+	t, value, err := bsonrw.Copier{}.CopyValueToBytes(vr)
+	if err != nil {
+		return err
+	}
+
+	val.Set(reflect.ValueOf(RawValue{Type: t, Value: value}))
+	return nil
+}
+
+// RawEncodeValue is the ValueEncoderFunc for Reader.
+func (PrimitiveCodecs) RawEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tRaw {
+		return bsoncodec.ValueEncoderError{Name: "RawEncodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	rdr := val.Interface().(Raw)
+
+	return bsonrw.Copier{}.CopyDocumentFromBytes(vw, rdr)
+}
+
+// RawDecodeValue is the ValueDecoderFunc for Reader.
+func (PrimitiveCodecs) RawDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tRaw {
+		return bsoncodec.ValueDecoderError{Name: "RawDecodeValue", Types: []reflect.Type{tRaw}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	rdr, err := bsonrw.Copier{}.AppendDocumentBytes(val.Interface().(Raw), vr)
+	val.Set(reflect.ValueOf(rdr))
+	return err
+}
+
+func (pc PrimitiveCodecs) encodeRaw(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, raw Raw) error {
+	var copier bsonrw.Copier
+	elems, err := raw.Elements()
+	if err != nil {
+		return err
+	}
+	for _, elem := range elems {
+		dvw, err := dw.WriteDocumentElement(elem.Key())
+		if err != nil {
+			return err
+		}
+
+		val := elem.Value()
+		err = copier.CopyValueFromBytes(dvw, val.Type, val.Value)
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go
new file mode 100644
index 0000000..abd6e38
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw.go
@@ -0,0 +1,92 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"errors"
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilReader indicates that an operation was attempted on a nil bson.Reader.
+var ErrNilReader = errors.New("nil reader")
+var errValidateDone = errors.New("validation loop complete")
+
+// Raw is a wrapper around a byte slice. It will interpret the slice as a
+// BSON document. This type is a wrapper around a bsoncore.Document. Errors returned from the
+// methods on this type and associated types come from the bsoncore package.
+type Raw []byte
+
+// NewFromIOReader reads in a document from the given io.Reader and constructs a Raw from
+// it.
+func NewFromIOReader(r io.Reader) (Raw, error) {
+	doc, err := bsoncore.NewDocumentFromReader(r)
+	return Raw(doc), err
+}
+
+// Validate validates the document. This method only validates the first document in
+// the slice, to validate other documents, the slice must be resliced.
+func (r Raw) Validate() (err error) { return bsoncore.Document(r).Validate() }
+
+// Lookup search the document, potentially recursively, for the given key. If
+// there are multiple keys provided, this method will recurse down, as long as
+// the top and intermediate nodes are either documents or arrays.If an error
+// occurs or if the value doesn't exist, an empty RawValue is returned.
+func (r Raw) Lookup(key ...string) RawValue {
+	return convertFromCoreValue(bsoncore.Document(r).Lookup(key...))
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (r Raw) LookupErr(key ...string) (RawValue, error) {
+	val, err := bsoncore.Document(r).LookupErr(key...)
+	return convertFromCoreValue(val), err
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (r Raw) Elements() ([]RawElement, error) {
+	elems, err := bsoncore.Document(r).Elements()
+	relems := make([]RawElement, 0, len(elems))
+	for _, elem := range elems {
+		relems = append(relems, RawElement(elem))
+	}
+	return relems, err
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (r Raw) Values() ([]RawValue, error) {
+	vals, err := bsoncore.Document(r).Values()
+	rvals := make([]RawValue, 0, len(vals))
+	for _, val := range vals {
+		rvals = append(rvals, convertFromCoreValue(val))
+	}
+	return rvals, err
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (r Raw) Index(index uint) RawElement { return RawElement(bsoncore.Document(r).Index(index)) }
+
+// IndexErr searches for and retrieves the element at the given index.
+func (r Raw) IndexErr(index uint) (RawElement, error) {
+	elem, err := bsoncore.Document(r).IndexErr(index)
+	return RawElement(elem), err
+}
+
+// String implements the fmt.Stringer interface.
+func (r Raw) String() string { return bsoncore.Document(r).String() }
+
+// readi32 is a helper function for reading an int32 from slice of bytes.
+func readi32(b []byte) int32 {
+	_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
+	return int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go
new file mode 100644
index 0000000..2a01390
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_element.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// RawElement represents a BSON element in byte form. This type provides a simple way to
+// transform a slice of bytes into a BSON element and extract information from it.
+//
+// RawElement is a thin wrapper around a bsoncore.Element.
+type RawElement []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (re RawElement) Key() string { return bsoncore.Element(re).Key() }
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (re RawElement) KeyErr() (string, error) { return bsoncore.Element(re).KeyErr() }
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (re RawElement) Value() RawValue { return convertFromCoreValue(bsoncore.Element(re).Value()) }
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (re RawElement) ValueErr() (RawValue, error) {
+	val, err := bsoncore.Element(re).ValueErr()
+	return convertFromCoreValue(val), err
+}
+
+// Validate ensures re is a valid BSON element.
+func (re RawElement) Validate() error { return bsoncore.Element(re).Validate() }
+
+// String implements the fmt.Stringer interface. The output will be in extended JSON format.
+func (re RawElement) String() string {
+	doc := bsoncore.BuildDocument(nil, re)
+	j, err := MarshalExtJSON(Raw(doc), true, false)
+	if err != nil {
+		return "<malformed>"
+	}
+	return string(j)
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (re RawElement) DebugString() string { return bsoncore.Element(re).DebugString() }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go
new file mode 100644
index 0000000..2803b80
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/raw_value.go
@@ -0,0 +1,287 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilContext is returned when the provided DecodeContext is nil.
+var ErrNilContext = errors.New("DecodeContext cannot be nil")
+
+// ErrNilRegistry is returned when the provided registry is nil.
+var ErrNilRegistry = errors.New("Registry cannot be nil")
+
+// RawValue represents a BSON value in byte form. It can be used to hold unprocessed BSON or to
+// defer processing of BSON. Type is the BSON type of the value and Value are the raw bytes that
+// represent the element.
+//
+// This type wraps bsoncore.Value for most of it's functionality.
+type RawValue struct {
+	Type  bsontype.Type
+	Value []byte
+
+	r *bsoncodec.Registry
+}
+
+// Unmarshal deserializes BSON into the provided val. If RawValue cannot be unmarshaled into val, an
+// error is returned. This method will use the registry used to create the RawValue, if the RawValue
+// was created from partial BSON processing, or it will use the default registry. Users wishing to
+// specify the registry to use should use UnmarshalWithRegistry.
+func (rv RawValue) Unmarshal(val interface{}) error {
+	reg := rv.r
+	if reg == nil {
+		reg = DefaultRegistry
+	}
+	return rv.UnmarshalWithRegistry(reg, val)
+}
+
+// Equal compares rv and rv2 and returns true if they are equal.
+func (rv RawValue) Equal(rv2 RawValue) bool {
+	if rv.Type != rv2.Type {
+		return false
+	}
+
+	if !bytes.Equal(rv.Value, rv2.Value) {
+		return false
+	}
+
+	return true
+}
+
+// UnmarshalWithRegistry performs the same unmarshalling as Unmarshal but uses the provided registry
+// instead of the one attached or the default registry.
+func (rv RawValue) UnmarshalWithRegistry(r *bsoncodec.Registry, val interface{}) error {
+	if r == nil {
+		return ErrNilRegistry
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := r.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(bsoncodec.DecodeContext{Registry: r}, vr, rval)
+}
+
+// UnmarshalWithContext performs the same unmarshalling as Unmarshal but uses the provided DecodeContext
+// instead of the one attached or the default registry.
+func (rv RawValue) UnmarshalWithContext(dc *bsoncodec.DecodeContext, val interface{}) error {
+	if dc == nil {
+		return ErrNilContext
+	}
+
+	vr := bsonrw.NewBSONValueReader(rv.Type, rv.Value)
+	rval := reflect.ValueOf(val)
+	if rval.Kind() != reflect.Ptr {
+		return fmt.Errorf("argument to Unmarshal* must be a pointer to a type, but got %v", rval)
+	}
+	rval = rval.Elem()
+	dec, err := dc.LookupDecoder(rval.Type())
+	if err != nil {
+		return err
+	}
+	return dec.DecodeValue(*dc, vr, rval)
+}
+
+func convertFromCoreValue(v bsoncore.Value) RawValue { return RawValue{Type: v.Type, Value: v.Data} }
+func convertToCoreValue(v RawValue) bsoncore.Value   { return bsoncore.Value{Type: v.Type, Data: v.Value} }
+
+// Validate ensures the value is a valid BSON value.
+func (rv RawValue) Validate() error { return convertToCoreValue(rv).Validate() }
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (rv RawValue) IsNumber() bool { return convertToCoreValue(rv).IsNumber() }
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (rv RawValue) String() string { return convertToCoreValue(rv).String() }
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (rv RawValue) DebugString() string { return convertToCoreValue(rv).DebugString() }
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (rv RawValue) Double() float64 { return convertToCoreValue(rv).Double() }
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (rv RawValue) DoubleOK() (float64, bool) { return convertToCoreValue(rv).DoubleOK() }
+
+// StringValue returns the string value for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (rv RawValue) StringValue() string { return convertToCoreValue(rv).StringValue() }
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (rv RawValue) StringValueOK() (string, bool) { return convertToCoreValue(rv).StringValueOK() }
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (rv RawValue) Document() Raw { return Raw(convertToCoreValue(rv).Document()) }
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (rv RawValue) DocumentOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).DocumentOK()
+	return Raw(doc), ok
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (rv RawValue) Array() Raw { return Raw(convertToCoreValue(rv).Array()) }
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (rv RawValue) ArrayOK() (Raw, bool) {
+	doc, ok := convertToCoreValue(rv).ArrayOK()
+	return Raw(doc), ok
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (rv RawValue) Binary() (subtype byte, data []byte) { return convertToCoreValue(rv).Binary() }
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BinaryOK() (subtype byte, data []byte, ok bool) {
+	return convertToCoreValue(rv).BinaryOK()
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (rv RawValue) ObjectID() primitive.ObjectID { return convertToCoreValue(rv).ObjectID() }
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) ObjectIDOK() (primitive.ObjectID, bool) { return convertToCoreValue(rv).ObjectIDOK() }
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (rv RawValue) Boolean() bool { return convertToCoreValue(rv).Boolean() }
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) BooleanOK() (bool, bool) { return convertToCoreValue(rv).BooleanOK() }
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (rv RawValue) DateTime() int64 { return convertToCoreValue(rv).DateTime() }
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) DateTimeOK() (int64, bool) { return convertToCoreValue(rv).DateTimeOK() }
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (rv RawValue) Time() time.Time { return convertToCoreValue(rv).Time() }
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) TimeOK() (time.Time, bool) { return convertToCoreValue(rv).TimeOK() }
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (rv RawValue) Regex() (pattern, options string) { return convertToCoreValue(rv).Regex() }
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (rv RawValue) RegexOK() (pattern, options string, ok bool) {
+	return convertToCoreValue(rv).RegexOK()
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (rv RawValue) DBPointer() (string, primitive.ObjectID) { return convertToCoreValue(rv).DBPointer() }
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) DBPointerOK() (string, primitive.ObjectID, bool) {
+	return convertToCoreValue(rv).DBPointerOK()
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (rv RawValue) JavaScript() string { return convertToCoreValue(rv).JavaScript() }
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) JavaScriptOK() (string, bool) { return convertToCoreValue(rv).JavaScriptOK() }
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (rv RawValue) Symbol() string { return convertToCoreValue(rv).Symbol() }
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (rv RawValue) SymbolOK() (string, bool) { return convertToCoreValue(rv).SymbolOK() }
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (rv RawValue) CodeWithScope() (string, Raw) {
+	code, scope := convertToCoreValue(rv).CodeWithScope()
+	return code, Raw(scope)
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) CodeWithScopeOK() (string, Raw, bool) {
+	code, scope, ok := convertToCoreValue(rv).CodeWithScopeOK()
+	return code, Raw(scope), ok
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (rv RawValue) Int32() int32 { return convertToCoreValue(rv).Int32() }
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int32OK() (int32, bool) { return convertToCoreValue(rv).Int32OK() }
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (rv RawValue) Timestamp() (t, i uint32) { return convertToCoreValue(rv).Timestamp() }
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) TimestampOK() (t, i uint32, ok bool) { return convertToCoreValue(rv).TimestampOK() }
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (rv RawValue) Int64() int64 { return convertToCoreValue(rv).Int64() }
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (rv RawValue) Int64OK() (int64, bool) { return convertToCoreValue(rv).Int64OK() }
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (rv RawValue) Decimal128() primitive.Decimal128 { return convertToCoreValue(rv).Decimal128() }
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (rv RawValue) Decimal128OK() (primitive.Decimal128, bool) {
+	return convertToCoreValue(rv).Decimal128OK()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go b/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go
new file mode 100644
index 0000000..c9ef029
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/registry.go
@@ -0,0 +1,24 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import "github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+
+// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
+// primitive codecs.
+var DefaultRegistry = NewRegistryBuilder().Build()
+
+// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
+// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
+// PrimitiveCodecs type in this package.
+func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
+	rb := bsoncodec.NewRegistryBuilder()
+	bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
+	bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
+	primitiveCodecs.RegisterPrimitiveCodecs(rb)
+	return rb
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/types.go b/vendor/github.com/mongodb/mongo-go-driver/bson/types.go
new file mode 100644
index 0000000..7109254
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/types.go
@@ -0,0 +1,85 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"reflect"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// These constants uniquely refer to each BSON type.
+const (
+	TypeDouble           = bsontype.Double
+	TypeString           = bsontype.String
+	TypeEmbeddedDocument = bsontype.EmbeddedDocument
+	TypeArray            = bsontype.Array
+	TypeBinary           = bsontype.Binary
+	TypeUndefined        = bsontype.Undefined
+	TypeObjectID         = bsontype.ObjectID
+	TypeBoolean          = bsontype.Boolean
+	TypeDateTime         = bsontype.DateTime
+	TypeNull             = bsontype.Null
+	TypeRegex            = bsontype.Regex
+	TypeDBPointer        = bsontype.DBPointer
+	TypeJavaScript       = bsontype.JavaScript
+	TypeSymbol           = bsontype.Symbol
+	TypeCodeWithScope    = bsontype.CodeWithScope
+	TypeInt32            = bsontype.Int32
+	TypeTimestamp        = bsontype.Timestamp
+	TypeInt64            = bsontype.Int64
+	TypeDecimal128       = bsontype.Decimal128
+	TypeMinKey           = bsontype.MinKey
+	TypeMaxKey           = bsontype.MaxKey
+)
+
+var tBinary = reflect.TypeOf(primitive.Binary{})
+var tBool = reflect.TypeOf(false)
+var tCodeWithScope = reflect.TypeOf(primitive.CodeWithScope{})
+var tDBPointer = reflect.TypeOf(primitive.DBPointer{})
+var tDecimal = reflect.TypeOf(primitive.Decimal128{})
+var tD = reflect.TypeOf(D{})
+var tA = reflect.TypeOf(A{})
+var tDateTime = reflect.TypeOf(primitive.DateTime(0))
+var tUndefined = reflect.TypeOf(primitive.Undefined{})
+var tNull = reflect.TypeOf(primitive.Null{})
+var tRawValue = reflect.TypeOf(RawValue{})
+var tFloat32 = reflect.TypeOf(float32(0))
+var tFloat64 = reflect.TypeOf(float64(0))
+var tInt = reflect.TypeOf(int(0))
+var tInt8 = reflect.TypeOf(int8(0))
+var tInt16 = reflect.TypeOf(int16(0))
+var tInt32 = reflect.TypeOf(int32(0))
+var tInt64 = reflect.TypeOf(int64(0))
+var tJavaScript = reflect.TypeOf(primitive.JavaScript(""))
+var tOID = reflect.TypeOf(primitive.ObjectID{})
+var tRaw = reflect.TypeOf(Raw(nil))
+var tRegex = reflect.TypeOf(primitive.Regex{})
+var tString = reflect.TypeOf("")
+var tSymbol = reflect.TypeOf(primitive.Symbol(""))
+var tTime = reflect.TypeOf(time.Time{})
+var tTimestamp = reflect.TypeOf(primitive.Timestamp{})
+var tUint = reflect.TypeOf(uint(0))
+var tUint8 = reflect.TypeOf(uint8(0))
+var tUint16 = reflect.TypeOf(uint16(0))
+var tUint32 = reflect.TypeOf(uint32(0))
+var tUint64 = reflect.TypeOf(uint64(0))
+var tMinKey = reflect.TypeOf(primitive.MinKey{})
+var tMaxKey = reflect.TypeOf(primitive.MaxKey{})
+
+var tEmpty = reflect.TypeOf((*interface{})(nil)).Elem()
+var tEmptySlice = reflect.TypeOf([]interface{}(nil))
+
+var zeroVal reflect.Value
+
+// this references the quantity of milliseconds between zero time and
+// the unix epoch. useful for making sure that we convert time.Time
+// objects correctly to match the legacy bson library's handling of
+// time.Time values.
+const zeroEpochMs = int64(62135596800000)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go b/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go
new file mode 100644
index 0000000..2b3cca8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/bson/unmarshal.go
@@ -0,0 +1,101 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bson
+
+import (
+	"bytes"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// Unmarshaler is an interface implemented by types that can unmarshal a BSON
+// document representation of themselves. The BSON bytes can be assumed to be
+// valid. UnmarshalBSON must copy the BSON bytes if it wishes to retain the data
+// after returning.
+type Unmarshaler interface {
+	UnmarshalBSON([]byte) error
+}
+
+// ValueUnmarshaler is an interface implemented by types that can unmarshal a
+// BSON value representaiton of themselves. The BSON bytes and type can be
+// assumed to be valid. UnmarshalBSONValue must copy the BSON value bytes if it
+// wishes to retain the data after returning.
+type ValueUnmarshaler interface {
+	UnmarshalBSONValue(bsontype.Type, []byte) error
+}
+
+// Unmarshal parses the BSON-encoded data and stores the result in the value
+// pointed to by val. If val is nil or not a pointer, Unmarshal returns
+// InvalidUnmarshalError.
+func Unmarshal(data []byte, val interface{}) error {
+	return UnmarshalWithRegistry(DefaultRegistry, data, val)
+}
+
+// UnmarshalWithRegistry parses the BSON-encoded data using Registry r and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalWithRegistry(r *bsoncodec.Registry, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, vr, val)
+}
+
+// UnmarshalWithContext parses the BSON-encoded data using DecodeContext dc and
+// stores the result in the value pointed to by val. If val is nil or not
+// a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalWithContext(dc bsoncodec.DecodeContext, data []byte, val interface{}) error {
+	vr := bsonrw.NewBSONDocumentReader(data)
+	return unmarshalFromReader(dc, vr, val)
+}
+
+// UnmarshalExtJSON parses the extended JSON-encoded data and stores the result
+// in the value pointed to by val. If val is nil or not a pointer, Unmarshal
+// returns InvalidUnmarshalError.
+func UnmarshalExtJSON(data []byte, canonical bool, val interface{}) error {
+	return UnmarshalExtJSONWithRegistry(DefaultRegistry, data, canonical, val)
+}
+
+// UnmarshalExtJSONWithRegistry parses the extended JSON-encoded data using
+// Registry r and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalExtJSONWithRegistry(r *bsoncodec.Registry, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(bsoncodec.DecodeContext{Registry: r}, ejvr, val)
+}
+
+// UnmarshalExtJSONWithContext parses the extended JSON-encoded data using
+// DecodeContext dc and stores the result in the value pointed to by val. If val is
+// nil or not a pointer, UnmarshalWithRegistry returns InvalidUnmarshalError.
+func UnmarshalExtJSONWithContext(dc bsoncodec.DecodeContext, data []byte, canonical bool, val interface{}) error {
+	ejvr, err := bsonrw.NewExtJSONValueReader(bytes.NewReader(data), canonical)
+	if err != nil {
+		return err
+	}
+
+	return unmarshalFromReader(dc, ejvr, val)
+}
+
+func unmarshalFromReader(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val interface{}) error {
+	dec := decPool.Get().(*Decoder)
+	defer decPool.Put(dec)
+
+	err := dec.Reset(vr)
+	if err != nil {
+		return err
+	}
+	err = dec.SetContext(dc)
+	if err != nil {
+		return err
+	}
+
+	return dec.Decode(val)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl b/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl
new file mode 100755
index 0000000..bebc7db
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/etc/generate-notices.pl
@@ -0,0 +1,99 @@
+#!/usr/bin/env perl
+use v5.10;
+use strict;
+use warnings;
+use utf8;
+use open qw/:std :utf8/;
+use File::Find qw/find/;
+
+my @license_files;
+
+find(
+    sub {
+        return unless lc($_) eq 'license';
+        push @license_files, [ $File::Find::dir, $File::Find::name ];
+    },
+    'vendor'
+);
+
+print forked_licenses();
+
+for my $entry (sort { $a->[0] cmp $b->[0] } @license_files) {
+    ( my $package_name = $entry->[0] ) =~ s{vendor/}{};
+    my $license_text = do { local ( @ARGV, $/ ) = $entry->[1]; <> };
+    $license_text =~ s/ +$//mg;
+    say "" x 70;
+    say "-" x 70;
+    say "License notice for $package_name";
+    say "-" x 70;
+    say "";
+    print $license_text;
+}
+
+# These licenses are the originals for forked code; they must
+# be included along with license from the vendor directory
+sub forked_licenses {
+    return <<'HERE';
+---------------------------------------------------------------------
+License notice for gopkg.in/mgo.v2/bson
+---------------------------------------------------------------------
+
+BSON library for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+---------------------------------------------------------------------
+License notice for JSON and CSV code from github.com/golang/go
+---------------------------------------------------------------------
+
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+HERE
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go b/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go
new file mode 100644
index 0000000..293fd40
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/event/monitoring.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package event
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// CommandStartedEvent represents an event generated when a command is sent to a server.
+type CommandStartedEvent struct {
+	Command      bson.Raw
+	DatabaseName string
+	CommandName  string
+	RequestID    int64
+	ConnectionID string
+}
+
+// CommandFinishedEvent represents a generic command finishing.
+type CommandFinishedEvent struct {
+	DurationNanos int64
+	CommandName   string
+	RequestID     int64
+	ConnectionID  string
+}
+
+// CommandSucceededEvent represents an event generated when a command's execution succeeds.
+type CommandSucceededEvent struct {
+	CommandFinishedEvent
+	Reply bson.Raw
+}
+
+// CommandFailedEvent represents an event generated when a command's execution fails.
+type CommandFailedEvent struct {
+	CommandFinishedEvent
+	Failure string
+}
+
+// CommandMonitor represents a monitor that is triggered for different events.
+type CommandMonitor struct {
+	Started   func(context.Context, *CommandStartedEvent)
+	Succeeded func(context.Context, *CommandSucceededEvent)
+	Failed    func(context.Context, *CommandFailedEvent)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go b/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go
new file mode 100644
index 0000000..770cb33
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/channel_connection.go
@@ -0,0 +1,74 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"errors"
+	"fmt"
+)
+
+// Implements the connection.Connection interface by reading and writing wire messages
+// to a channel
+type ChannelConn struct {
+	WriteErr error
+	Written  chan wiremessage.WireMessage
+	ReadResp chan wiremessage.WireMessage
+	ReadErr  chan error
+}
+
+func (c *ChannelConn) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	select {
+	case c.Written <- wm:
+	default:
+		c.WriteErr = errors.New("could not write wiremessage to written channel")
+	}
+	return c.WriteErr
+}
+
+func (c *ChannelConn) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	var wm wiremessage.WireMessage
+	var err error
+	select {
+	case wm = <-c.ReadResp:
+	case err = <-c.ReadErr:
+	case <-ctx.Done():
+	}
+	return wm, err
+}
+
+func (c *ChannelConn) Close() error {
+	return nil
+}
+
+func (c *ChannelConn) Expired() bool {
+	return false
+}
+
+func (c *ChannelConn) Alive() bool {
+	return true
+}
+
+func (c *ChannelConn) ID() string {
+	return "faked"
+}
+
+// Create a OP_REPLY wiremessage from a BSON document
+func MakeReply(doc bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := doc.MarshalBSON()
+	if err != nil {
+		return nil, errors.New(fmt.Sprintf("could not create document: %v", err))
+	}
+	return wiremessage.Reply{
+		NumberReturned: 1,
+		Documents:      []bson.Raw{rdr},
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/const.go b/vendor/github.com/mongodb/mongo-go-driver/internal/const.go
new file mode 100644
index 0000000..7100e31
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/const.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+// Version is the current version of the driver.
+var Version = "local build"
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/error.go b/vendor/github.com/mongodb/mongo-go-driver/internal/error.go
new file mode 100644
index 0000000..6a105af
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/error.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"fmt"
+)
+
+// WrappedError represents an error that contains another error.
+type WrappedError interface {
+	// Message gets the basic message of the error.
+	Message() string
+	// Inner gets the inner error if one exists.
+	Inner() error
+}
+
+// RolledUpErrorMessage gets a flattened error message.
+func RolledUpErrorMessage(err error) string {
+	if wrappedErr, ok := err.(WrappedError); ok {
+		inner := wrappedErr.Inner()
+		if inner != nil {
+			return fmt.Sprintf("%s: %s", wrappedErr.Message(), RolledUpErrorMessage(inner))
+		}
+
+		return wrappedErr.Message()
+	}
+
+	return err.Error()
+}
+
+//UnwrapError attempts to unwrap the error down to its root cause.
+func UnwrapError(err error) error {
+
+	switch tErr := err.(type) {
+	case WrappedError:
+		return UnwrapError(tErr.Inner())
+	case *multiError:
+		return UnwrapError(tErr.errors[0])
+	}
+
+	return err
+}
+
+// WrapError wraps an error with a message.
+func WrapError(inner error, message string) error {
+	return &wrappedError{message, inner}
+}
+
+// WrapErrorf wraps an error with a message.
+func WrapErrorf(inner error, format string, args ...interface{}) error {
+	return &wrappedError{fmt.Sprintf(format, args...), inner}
+}
+
+// MultiError combines multiple errors into a single error. If there are no errors,
+// nil is returned. If there is 1 error, it is returned. Otherwise, they are combined.
+func MultiError(errors ...error) error {
+
+	// remove nils from the error list
+	var nonNils []error
+	for _, e := range errors {
+		if e != nil {
+			nonNils = append(nonNils, e)
+		}
+	}
+
+	switch len(nonNils) {
+	case 0:
+		return nil
+	case 1:
+		return nonNils[0]
+	default:
+		return &multiError{
+			message: "multiple errors encountered",
+			errors:  nonNils,
+		}
+	}
+}
+
+type multiError struct {
+	message string
+	errors  []error
+}
+
+func (e *multiError) Message() string {
+	return e.message
+}
+
+func (e *multiError) Error() string {
+	result := e.message
+	for _, e := range e.errors {
+		result += fmt.Sprintf("\n  %s", e)
+	}
+	return result
+}
+
+func (e *multiError) Errors() []error {
+	return e.errors
+}
+
+type wrappedError struct {
+	message string
+	inner   error
+}
+
+func (e *wrappedError) Message() string {
+	return e.message
+}
+
+func (e *wrappedError) Error() string {
+	return RolledUpErrorMessage(e)
+}
+
+func (e *wrappedError) Inner() error {
+	return e.inner
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/results.go b/vendor/github.com/mongodb/mongo-go-driver/internal/results.go
new file mode 100644
index 0000000..7879ebe
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/results.go
@@ -0,0 +1,54 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"time"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// IsMasterResult is the result of executing this
+// ismaster command.
+type IsMasterResult struct {
+	Arbiters            []string          `bson:"arbiters,omitempty"`
+	ArbiterOnly         bool              `bson:"arbiterOnly,omitempty"`
+	ElectionID          primitive.ObjectID `bson:"electionId,omitempty"`
+	Hidden              bool              `bson:"hidden,omitempty"`
+	Hosts               []string          `bson:"hosts,omitempty"`
+	IsMaster            bool              `bson:"ismaster,omitempty"`
+	IsReplicaSet        bool              `bson:"isreplicaset,omitempty"`
+	LastWriteTimestamp  time.Time         `bson:"lastWriteDate,omitempty"`
+	MaxBSONObjectSize   uint32            `bson:"maxBsonObjectSize,omitempty"`
+	MaxMessageSizeBytes uint32            `bson:"maxMessageSizeBytes,omitempty"`
+	MaxWriteBatchSize   uint16            `bson:"maxWriteBatchSize,omitempty"`
+	Me                  string            `bson:"me,omitempty"`
+	MaxWireVersion      int32             `bson:"maxWireVersion,omitempty"`
+	MinWireVersion      int32             `bson:"minWireVersion,omitempty"`
+	Msg                 string            `bson:"msg,omitempty"`
+	OK                  int32             `bson:"ok"`
+	Passives            []string          `bson:"passives,omitempty"`
+	ReadOnly            bool              `bson:"readOnly,omitempty"`
+	Secondary           bool              `bson:"secondary,omitempty"`
+	SetName             string            `bson:"setName,omitempty"`
+	SetVersion          uint32            `bson:"setVersion,omitempty"`
+	Tags                map[string]string `bson:"tags,omitempty"`
+}
+
+// BuildInfoResult is the result of executing the
+// buildInfo command.
+type BuildInfoResult struct {
+	OK           bool    `bson:"ok"`
+	GitVersion   string  `bson:"gitVersion,omitempty"`
+	Version      string  `bson:"version,omitempty"`
+	VersionArray []uint8 `bson:"versionArray,omitempty"`
+}
+
+// GetLastErrorResult is the result of executing the
+// getLastError command.
+type GetLastErrorResult struct {
+	ConnectionID uint32 `bson:"connectionId"`
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go b/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go
new file mode 100644
index 0000000..792e531
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/internal/semaphore.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package internal
+
+import (
+	"context"
+	"errors"
+)
+
+// NewSemaphore creates a new semaphore.
+func NewSemaphore(slots uint64) *Semaphore {
+	ch := make(chan struct{}, slots)
+	for i := uint64(0); i < slots; i++ {
+		ch <- struct{}{}
+	}
+
+	return &Semaphore{
+		permits: ch,
+	}
+}
+
+// Semaphore is a synchronization primitive that controls access
+// to a common resource.
+type Semaphore struct {
+	permits chan struct{}
+}
+
+// Len gets the number of permits available.
+func (s *Semaphore) Len() uint64 {
+	return uint64(len(s.permits))
+}
+
+// Wait waits until a resource is available or until the context
+// is done.
+func (s *Semaphore) Wait(ctx context.Context) error {
+	select {
+	case <-s.permits:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+// Release releases a resource back into the pool.
+func (s *Semaphore) Release() error {
+	select {
+	case s.permits <- struct{}{}:
+	default:
+		return errors.New("internal.Semaphore.Release: attempt to release more resources than are available")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go
new file mode 100644
index 0000000..a7ce0b5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/batch_cursor.go
@@ -0,0 +1,30 @@
+package mongo
+
+import (
+	"context"
+)
+
+// batchCursor is the interface implemented by types that can provide batches of document results.
+// The Cursor type is built on top of this type.
+type batchCursor interface {
+	// ID returns the ID of the cursor.
+	ID() int64
+
+	// Next returns true if there is a batch available.
+	Next(context.Context) bool
+
+	// Batch appends the current batch of documents to dst. RequiredBytes can be used to determine
+	// the length of the current batch of documents.
+	//
+	// If there is no batch available, this method should do nothing.
+	Batch(dst []byte) []byte
+
+	// RequiredBytes returns the number of bytes required fo rthe current batch.
+	RequiredBytes() int
+
+	// Err returns the last error encountered.
+	Err() error
+
+	// Close closes the cursor.
+	Close(context.Context) error
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go
new file mode 100644
index 0000000..f086189
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/bulk_write.go
@@ -0,0 +1,341 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+)
+
+// WriteModel is the interface satisfied by all models for bulk writes.
+type WriteModel interface {
+	convertModel() driver.WriteModel
+}
+
+// InsertOneModel is the write model for insert operations.
+type InsertOneModel struct {
+	Document interface{}
+}
+
+// NewInsertOneModel creates a new InsertOneModel.
+func NewInsertOneModel() *InsertOneModel {
+	return &InsertOneModel{}
+}
+
+// SetDocument sets the BSON document for the InsertOneModel.
+func (iom *InsertOneModel) SetDocument(doc interface{}) *InsertOneModel {
+	iom.Document = doc
+	return iom
+}
+
+func (iom *InsertOneModel) convertModel() driver.WriteModel {
+	return driver.InsertOneModel{
+		Document: iom.Document,
+	}
+}
+
+// DeleteOneModel is the write model for delete operations.
+type DeleteOneModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+// NewDeleteOneModel creates a new DeleteOneModel.
+func NewDeleteOneModel() *DeleteOneModel {
+	return &DeleteOneModel{}
+}
+
+// SetFilter sets the filter for the DeleteOneModel.
+func (dom *DeleteOneModel) SetFilter(filter interface{}) *DeleteOneModel {
+	dom.Filter = filter
+	return dom
+}
+
+// SetCollation sets the collation for the DeleteOneModel.
+func (dom *DeleteOneModel) SetCollation(collation *options.Collation) *DeleteOneModel {
+	dom.Collation = collation
+	return dom
+}
+
+func (dom *DeleteOneModel) convertModel() driver.WriteModel {
+	return driver.DeleteOneModel{
+		Collation: dom.Collation,
+		Filter:    dom.Filter,
+	}
+}
+
+// DeleteManyModel is the write model for deleteMany operations.
+type DeleteManyModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+// NewDeleteManyModel creates a new DeleteManyModel.
+func NewDeleteManyModel() *DeleteManyModel {
+	return &DeleteManyModel{}
+}
+
+// SetFilter sets the filter for the DeleteManyModel.
+func (dmm *DeleteManyModel) SetFilter(filter interface{}) *DeleteManyModel {
+	dmm.Filter = filter
+	return dmm
+}
+
+// SetCollation sets the collation for the DeleteManyModel.
+func (dmm *DeleteManyModel) SetCollation(collation *options.Collation) *DeleteManyModel {
+	dmm.Collation = collation
+	return dmm
+}
+
+func (dmm *DeleteManyModel) convertModel() driver.WriteModel {
+	return driver.DeleteManyModel{
+		Collation: dmm.Collation,
+		Filter:    dmm.Filter,
+	}
+}
+
+// ReplaceOneModel is the write model for replace operations.
+type ReplaceOneModel struct {
+	Collation   *options.Collation
+	Upsert      *bool
+	Filter      interface{}
+	Replacement interface{}
+}
+
+// NewReplaceOneModel creates a new ReplaceOneModel.
+func NewReplaceOneModel() *ReplaceOneModel {
+	return &ReplaceOneModel{}
+}
+
+// SetFilter sets the filter for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetFilter(filter interface{}) *ReplaceOneModel {
+	rom.Filter = filter
+	return rom
+}
+
+// SetReplacement sets the replacement document for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetReplacement(rep interface{}) *ReplaceOneModel {
+	rom.Replacement = rep
+	return rom
+}
+
+// SetCollation sets the collation for the ReplaceOneModel.
+func (rom *ReplaceOneModel) SetCollation(collation *options.Collation) *ReplaceOneModel {
+	rom.Collation = collation
+	return rom
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (rom *ReplaceOneModel) SetUpsert(upsert bool) *ReplaceOneModel {
+	rom.Upsert = &upsert
+	return rom
+}
+
+func (rom *ReplaceOneModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: rom.Collation,
+	}
+	if rom.Upsert != nil {
+		um.Upsert = *rom.Upsert
+		um.UpsertSet = true
+	}
+
+	return driver.ReplaceOneModel{
+		UpdateModel: um,
+		Filter:      rom.Filter,
+		Replacement: rom.Replacement,
+	}
+}
+
+// UpdateOneModel is the write model for update operations.
+type UpdateOneModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+}
+
+// NewUpdateOneModel creates a new UpdateOneModel.
+func NewUpdateOneModel() *UpdateOneModel {
+	return &UpdateOneModel{}
+}
+
+// SetFilter sets the filter for the UpdateOneModel.
+func (uom *UpdateOneModel) SetFilter(filter interface{}) *UpdateOneModel {
+	uom.Filter = filter
+	return uom
+}
+
+// SetUpdate sets the update document for the UpdateOneModel.
+func (uom *UpdateOneModel) SetUpdate(update interface{}) *UpdateOneModel {
+	uom.Update = update
+	return uom
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
+func (uom *UpdateOneModel) SetArrayFilters(filters options.ArrayFilters) *UpdateOneModel {
+	uom.ArrayFilters = &filters
+	return uom
+}
+
+// SetCollation sets the collation for the UpdateOneModel.
+func (uom *UpdateOneModel) SetCollation(collation *options.Collation) *UpdateOneModel {
+	uom.Collation = collation
+	return uom
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (uom *UpdateOneModel) SetUpsert(upsert bool) *UpdateOneModel {
+	uom.Upsert = &upsert
+	return uom
+}
+
+func (uom *UpdateOneModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: uom.Collation,
+	}
+	if uom.Upsert != nil {
+		um.Upsert = *uom.Upsert
+		um.UpsertSet = true
+	}
+
+	converted := driver.UpdateOneModel{
+		UpdateModel: um,
+		Filter:      uom.Filter,
+		Update:      uom.Update,
+	}
+	if uom.ArrayFilters != nil {
+		converted.ArrayFilters = *uom.ArrayFilters
+		converted.ArrayFiltersSet = true
+	}
+
+	return converted
+}
+
+// UpdateManyModel is the write model for updateMany operations.
+type UpdateManyModel struct {
+	Collation    *options.Collation
+	Upsert       *bool
+	Filter       interface{}
+	Update       interface{}
+	ArrayFilters *options.ArrayFilters
+}
+
+// NewUpdateManyModel creates a new UpdateManyModel.
+func NewUpdateManyModel() *UpdateManyModel {
+	return &UpdateManyModel{}
+}
+
+// SetFilter sets the filter for the UpdateManyModel.
+func (umm *UpdateManyModel) SetFilter(filter interface{}) *UpdateManyModel {
+	umm.Filter = filter
+	return umm
+}
+
+// SetUpdate sets the update document for the UpdateManyModel.
+func (umm *UpdateManyModel) SetUpdate(update interface{}) *UpdateManyModel {
+	umm.Update = update
+	return umm
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply.
+func (umm *UpdateManyModel) SetArrayFilters(filters options.ArrayFilters) *UpdateManyModel {
+	umm.ArrayFilters = &filters
+	return umm
+}
+
+// SetCollation sets the collation for the UpdateManyModel.
+func (umm *UpdateManyModel) SetCollation(collation *options.Collation) *UpdateManyModel {
+	umm.Collation = collation
+	return umm
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (umm *UpdateManyModel) SetUpsert(upsert bool) *UpdateManyModel {
+	umm.Upsert = &upsert
+	return umm
+}
+
+func (umm *UpdateManyModel) convertModel() driver.WriteModel {
+	um := driver.UpdateModel{
+		Collation: umm.Collation,
+	}
+	if umm.Upsert != nil {
+		um.Upsert = *umm.Upsert
+		um.UpsertSet = true
+	}
+
+	converted := driver.UpdateManyModel{
+		UpdateModel: um,
+		Filter:      umm.Filter,
+		Update:      umm.Update,
+	}
+	if umm.ArrayFilters != nil {
+		converted.ArrayFilters = *umm.ArrayFilters
+		converted.ArrayFiltersSet = true
+	}
+
+	return converted
+}
+
+func dispatchToMongoModel(model driver.WriteModel) WriteModel {
+	switch conv := model.(type) {
+	case driver.InsertOneModel:
+		return &InsertOneModel{
+			Document: conv.Document,
+		}
+	case driver.DeleteOneModel:
+		return &DeleteOneModel{
+			Filter:    conv.Filter,
+			Collation: conv.Collation,
+		}
+	case driver.DeleteManyModel:
+		return &DeleteManyModel{
+			Filter:    conv.Filter,
+			Collation: conv.Collation,
+		}
+	case driver.ReplaceOneModel:
+		rom := &ReplaceOneModel{
+			Filter:      conv.Filter,
+			Replacement: conv.Replacement,
+			Collation:   conv.Collation,
+		}
+		if conv.UpsertSet {
+			rom.Upsert = &conv.Upsert
+		}
+		return rom
+	case driver.UpdateOneModel:
+		uom := &UpdateOneModel{
+			Filter:    conv.Filter,
+			Update:    conv.Update,
+			Collation: conv.Collation,
+		}
+		if conv.UpsertSet {
+			uom.Upsert = &conv.Upsert
+		}
+		if conv.ArrayFiltersSet {
+			uom.ArrayFilters = &conv.ArrayFilters
+		}
+		return uom
+	case driver.UpdateManyModel:
+		umm := &UpdateManyModel{
+			Filter:    conv.Filter,
+			Update:    conv.Update,
+			Collation: conv.Collation,
+		}
+		if conv.UpsertSet {
+			umm.Upsert = &conv.Upsert
+		}
+		if conv.ArrayFiltersSet {
+			umm.ArrayFilters = &conv.ArrayFilters
+		}
+		return umm
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go
new file mode 100644
index 0000000..5330117
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/change_stream.go
@@ -0,0 +1,508 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const errorInterrupted int32 = 11601
+const errorCappedPositionLost int32 = 136
+const errorCursorKilled int32 = 237
+
+// ErrMissingResumeToken indicates that a change stream notification from the server did not
+// contain a resume token.
+var ErrMissingResumeToken = errors.New("cannot provide resume functionality when the resume token is missing")
+
+// ErrNilCursor indicates that the cursor for the change stream is nil.
+var ErrNilCursor = errors.New("cursor is nil")
+
+// ChangeStream instances iterate a stream of change documents. Each document can be decoded via the
+// Decode method. Resume tokens should be retrieved via the ResumeToken method and can be stored to
+// resume the change stream at a specific point in time.
+//
+// A typical usage of the ChangeStream type would be:
+type ChangeStream struct {
+	// Current is the BSON bytes of the current change document. This property is only valid until
+	// the next call to Next or Close. If continued access is required to the bson.Raw, you must
+	// make a copy of it.
+	Current bson.Raw
+
+	cmd        bsonx.Doc // aggregate command to run to create stream and rebuild cursor
+	pipeline   bsonx.Arr
+	options    *options.ChangeStreamOptions
+	coll       *Collection
+	db         *Database
+	ns         command.Namespace
+	cursor     *Cursor
+	cursorOpts bsonx.Doc
+
+	resumeToken bsonx.Doc
+	err         error
+	streamType  StreamType
+	client      *Client
+	sess        Session
+	readPref    *readpref.ReadPref
+	readConcern *readconcern.ReadConcern
+	registry    *bsoncodec.Registry
+}
+
+func (cs *ChangeStream) replaceOptions(desc description.SelectedServer) {
+	// if cs has not received any changes and resumeAfter not specified and max wire version >= 7, run known agg cmd
+	// with startAtOperationTime set to startAtOperationTime provided by user or saved from initial agg
+	// must not send resumeAfter key
+
+	// else: run known agg cmd with resumeAfter set to last known resumeToken
+	// must not set startAtOperationTime (remove if originally in cmd)
+
+	if cs.options.ResumeAfter == nil && desc.WireVersion.Max >= 7 && cs.resumeToken == nil {
+		cs.options.SetStartAtOperationTime(cs.sess.OperationTime())
+	} else {
+		if cs.resumeToken == nil {
+			return // restart stream without the resume token
+		}
+
+		cs.options.SetResumeAfter(cs.resumeToken)
+		// remove startAtOperationTime
+		cs.options.SetStartAtOperationTime(nil)
+	}
+}
+
+// Create options docs for the pipeline and cursor
+func createCmdDocs(csType StreamType, opts *options.ChangeStreamOptions, registry *bsoncodec.Registry) (bsonx.Doc,
+	bsonx.Doc, bsonx.Doc, error) {
+
+	pipelineDoc := bsonx.Doc{}
+	cursorDoc := bsonx.Doc{}
+	optsDoc := bsonx.Doc{}
+
+	if csType == ClientStream {
+		pipelineDoc = pipelineDoc.Append("allChangesForCluster", bsonx.Boolean(true))
+	}
+
+	if opts.BatchSize != nil {
+		cursorDoc = cursorDoc.Append("batchSize", bsonx.Int32(*opts.BatchSize))
+	}
+	if opts.Collation != nil {
+		optsDoc = optsDoc.Append("collation", bsonx.Document(opts.Collation.ToDocument()))
+	}
+	if opts.FullDocument != nil {
+		pipelineDoc = pipelineDoc.Append("fullDocument", bsonx.String(string(*opts.FullDocument)))
+	}
+	if opts.MaxAwaitTime != nil {
+		ms := int64(time.Duration(*opts.MaxAwaitTime) / time.Millisecond)
+		pipelineDoc = pipelineDoc.Append("maxAwaitTimeMS", bsonx.Int64(ms))
+	}
+	if opts.ResumeAfter != nil {
+		rt, err := transformDocument(registry, opts.ResumeAfter)
+		if err != nil {
+			return nil, nil, nil, err
+		}
+
+		pipelineDoc = pipelineDoc.Append("resumeAfter", bsonx.Document(rt))
+	}
+	if opts.StartAtOperationTime != nil {
+		pipelineDoc = pipelineDoc.Append("startAtOperationTime",
+			bsonx.Timestamp(opts.StartAtOperationTime.T, opts.StartAtOperationTime.I))
+	}
+
+	return pipelineDoc, cursorDoc, optsDoc, nil
+}
+
+func getSession(ctx context.Context, client *Client) (Session, error) {
+	sess := sessionFromContext(ctx)
+	if err := client.ValidSession(sess); err != nil {
+		return nil, err
+	}
+
+	var mongoSess Session
+	if sess != nil {
+		mongoSess = &sessionImpl{
+			Client: sess,
+		}
+	} else {
+		// create implicit session because it will be needed
+		newSess, err := session.NewClientSession(client.topology.SessionPool, client.id, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+
+		mongoSess = &sessionImpl{
+			Client: newSess,
+		}
+	}
+
+	return mongoSess, nil
+}
+
+func parseOptions(csType StreamType, opts *options.ChangeStreamOptions, registry *bsoncodec.Registry) (bsonx.Doc,
+	bsonx.Doc, bsonx.Doc, error) {
+
+	if opts.FullDocument == nil {
+		opts = opts.SetFullDocument(options.Default)
+	}
+
+	pipelineDoc, cursorDoc, optsDoc, err := createCmdDocs(csType, opts, registry)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+
+	return pipelineDoc, cursorDoc, optsDoc, nil
+}
+
+func (cs *ChangeStream) runCommand(ctx context.Context, replaceOptions bool) error {
+	ss, err := cs.client.topology.SelectServer(ctx, cs.db.writeSelector)
+	if err != nil {
+		return err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return err
+	}
+	defer conn.Close()
+
+	if replaceOptions {
+		cs.replaceOptions(desc)
+		optionsDoc, _, _, err := createCmdDocs(cs.streamType, cs.options, cs.registry)
+		if err != nil {
+			return err
+		}
+
+		changeStreamDoc := bsonx.Doc{
+			{"$changeStream", bsonx.Document(optionsDoc)},
+		}
+		cs.pipeline[0] = bsonx.Document(changeStreamDoc)
+		cs.cmd.Set("pipeline", bsonx.Array(cs.pipeline))
+	}
+
+	readCmd := command.Read{
+		DB:          cs.db.name,
+		Command:     cs.cmd,
+		Session:     cs.sess.(*sessionImpl).Client,
+		Clock:       cs.client.clock,
+		ReadPref:    cs.readPref,
+		ReadConcern: cs.readConcern,
+	}
+
+	rdr, err := readCmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+
+	batchCursor, err := driver.NewBatchCursor(bsoncore.Document(rdr), readCmd.Session, readCmd.Clock, ss.Server)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+	cursor, err := newCursor(batchCursor, cs.registry)
+	if err != nil {
+		cs.sess.EndSession(ctx)
+		return err
+	}
+	cs.cursor = cursor
+
+	cursorValue, err := rdr.LookupErr("cursor")
+	if err != nil {
+		return err
+	}
+	cursorDoc := cursorValue.Document()
+	cs.ns = command.ParseNamespace(cursorDoc.Lookup("ns").StringValue())
+
+	return nil
+}
+
+func newChangeStream(ctx context.Context, coll *Collection, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(coll.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(CollectionStream, csOpts, coll.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, coll.client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.String(coll.name)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      coll.client,
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		coll:        coll,
+		db:          coll.db,
+		streamType:  CollectionStream,
+		readPref:    coll.readPreference,
+		readConcern: coll.readConcern,
+		options:     csOpts,
+		registry:    coll.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func newDbChangeStream(ctx context.Context, db *Database, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(db.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(DatabaseStream, csOpts, db.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, db.client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.Int32(1)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      db.client,
+		db:          db,
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		streamType:  DatabaseStream,
+		readPref:    db.readPreference,
+		readConcern: db.readConcern,
+		options:     csOpts,
+		registry:    db.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func newClientChangeStream(ctx context.Context, client *Client, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	pipelineArr, err := transformAggregatePipeline(client.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	csOpts := options.MergeChangeStreamOptions(opts...)
+	pipelineDoc, cursorDoc, optsDoc, err := parseOptions(ClientStream, csOpts, client.registry)
+	if err != nil {
+		return nil, err
+	}
+	sess, err := getSession(ctx, client)
+	if err != nil {
+		return nil, err
+	}
+
+	csDoc := bsonx.Document(bsonx.Doc{
+		{"$changeStream", bsonx.Document(pipelineDoc)},
+	})
+	pipelineArr = append(bsonx.Arr{csDoc}, pipelineArr...)
+
+	cmd := bsonx.Doc{
+		{"aggregate", bsonx.Int32(1)},
+		{"pipeline", bsonx.Array(pipelineArr)},
+		{"cursor", bsonx.Document(cursorDoc)},
+	}
+	cmd = append(cmd, optsDoc...)
+
+	cs := &ChangeStream{
+		client:      client,
+		db:          client.Database("admin"),
+		sess:        sess,
+		cmd:         cmd,
+		pipeline:    pipelineArr,
+		streamType:  ClientStream,
+		readPref:    client.readPreference,
+		readConcern: client.readConcern,
+		options:     csOpts,
+		registry:    client.registry,
+		cursorOpts:  cursorDoc,
+	}
+
+	err = cs.runCommand(ctx, false)
+	if err != nil {
+		return nil, err
+	}
+
+	return cs, nil
+}
+
+func (cs *ChangeStream) storeResumeToken() error {
+	idVal, err := cs.cursor.Current.LookupErr("_id")
+	if err != nil {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+
+	var idDoc bson.Raw
+	idDoc, ok := idVal.DocumentOK()
+	if !ok {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+	tokenDoc, err := bsonx.ReadDoc(idDoc)
+	if err != nil {
+		_ = cs.Close(context.Background())
+		return ErrMissingResumeToken
+	}
+
+	cs.resumeToken = tokenDoc
+	return nil
+}
+
+// ID returns the cursor ID for this change stream.
+func (cs *ChangeStream) ID() int64 {
+	if cs.cursor == nil {
+		return 0
+	}
+
+	return cs.cursor.ID()
+}
+
+// Next gets the next result from this change stream. Returns true if there were no errors and the next
+// result is available for decoding.
+func (cs *ChangeStream) Next(ctx context.Context) bool {
+	// execute in a loop to retry resume-able errors and advance the underlying cursor
+	for {
+		if cs.cursor == nil {
+			return false
+		}
+
+		if cs.cursor.Next(ctx) {
+			err := cs.storeResumeToken()
+			if err != nil {
+				cs.err = err
+				return false
+			}
+
+			cs.Current = cs.cursor.Current
+			return true
+		}
+
+		err := cs.cursor.Err()
+		if err == nil {
+			return false
+		}
+
+		switch t := err.(type) {
+		case command.Error:
+			if t.Code == errorInterrupted || t.Code == errorCappedPositionLost || t.Code == errorCursorKilled {
+				return false
+			}
+		}
+
+		killCursors := command.KillCursors{
+			NS:  cs.ns,
+			IDs: []int64{cs.ID()},
+		}
+
+		_, _ = driver.KillCursors(ctx, killCursors, cs.client.topology, cs.db.writeSelector)
+		cs.err = cs.runCommand(ctx, true)
+		if cs.err != nil {
+			return false
+		}
+	}
+}
+
+// Decode will decode the current document into val.
+func (cs *ChangeStream) Decode(out interface{}) error {
+	if cs.cursor == nil {
+		return ErrNilCursor
+	}
+
+	return bson.UnmarshalWithRegistry(cs.registry, cs.Current, out)
+}
+
+// Err returns the current error.
+func (cs *ChangeStream) Err() error {
+	if cs.err != nil {
+		return cs.err
+	}
+	if cs.cursor == nil {
+		return nil
+	}
+
+	return cs.cursor.Err()
+}
+
+// Close closes this cursor.
+func (cs *ChangeStream) Close(ctx context.Context) error {
+	if cs.cursor == nil {
+		return nil // cursor is already closed
+	}
+
+	return cs.cursor.Close(ctx)
+}
+
+// StreamType represents the type of a change stream.
+type StreamType uint8
+
+// These constants represent valid change stream types. A change stream can be initialized over a collection, all
+// collections in a database, or over a whole client.
+const (
+	CollectionStream StreamType = iota
+	DatabaseStream
+	ClientStream
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go
new file mode 100644
index 0000000..7984bc0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/client.go
@@ -0,0 +1,454 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/tag"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const defaultLocalThreshold = 15 * time.Millisecond
+
+// Client performs operations on a given topology.
+type Client struct {
+	id              uuid.UUID
+	topologyOptions []topology.Option
+	topology        *topology.Topology
+	connString      connstring.ConnString
+	localThreshold  time.Duration
+	retryWrites     bool
+	clock           *session.ClusterClock
+	readPreference  *readpref.ReadPref
+	readConcern     *readconcern.ReadConcern
+	writeConcern    *writeconcern.WriteConcern
+	registry        *bsoncodec.Registry
+	marshaller      BSONAppender
+}
+
+// Connect creates a new Client and then initializes it using the Connect method.
+func Connect(ctx context.Context, uri string, opts ...*options.ClientOptions) (*Client, error) {
+	c, err := NewClientWithOptions(uri, opts...)
+	if err != nil {
+		return nil, err
+	}
+	err = c.Connect(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return c, nil
+}
+
+// NewClient creates a new client to connect to a cluster specified by the uri.
+func NewClient(uri string) (*Client, error) {
+	cs, err := connstring.Parse(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	return newClient(cs)
+}
+
+// NewClientWithOptions creates a new client to connect to to a cluster specified by the connection
+// string and the options manually passed in. If the same option is configured in both the
+// connection string and the manual options, the manual option will be ignored.
+func NewClientWithOptions(uri string, opts ...*options.ClientOptions) (*Client, error) {
+	cs, err := connstring.Parse(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	return newClient(cs, opts...)
+}
+
+// Connect initializes the Client by starting background monitoring goroutines.
+// This method must be called before a Client can be used.
+func (c *Client) Connect(ctx context.Context) error {
+	err := c.topology.Connect(ctx)
+	if err != nil {
+		return replaceTopologyErr(err)
+	}
+
+	return nil
+
+}
+
+// Disconnect closes sockets to the topology referenced by this Client. It will
+// shut down any monitoring goroutines, close the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and closed before returning. If the context expires via cancellation,
+// deadline, or timeout before the in use connections have returned, the in use
+// connections will be closed, resulting in the failure of any in flight read
+// or write operations. If this method returns with no errors, all connections
+// associated with this Client have been closed.
+func (c *Client) Disconnect(ctx context.Context) error {
+	c.endSessions(ctx)
+	return replaceTopologyErr(c.topology.Disconnect(ctx))
+}
+
+// Ping verifies that the client can connect to the topology.
+// If readPreference is nil then will use the client's default read
+// preference.
+func (c *Client) Ping(ctx context.Context, rp *readpref.ReadPref) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if rp == nil {
+		rp = c.readPreference
+	}
+
+	_, err := c.topology.SelectServer(ctx, description.ReadPrefSelector(rp))
+	return replaceTopologyErr(err)
+}
+
+// StartSession starts a new session.
+func (c *Client) StartSession(opts ...*options.SessionOptions) (Session, error) {
+	if c.topology.SessionPool == nil {
+		return nil, ErrClientDisconnected
+	}
+
+	sopts := options.MergeSessionOptions(opts...)
+	coreOpts := &session.ClientOptions{
+		DefaultReadConcern:    c.readConcern,
+		DefaultReadPreference: c.readPreference,
+		DefaultWriteConcern:   c.writeConcern,
+	}
+	if sopts.CausalConsistency != nil {
+		coreOpts.CausalConsistency = sopts.CausalConsistency
+	}
+	if sopts.DefaultReadConcern != nil {
+		coreOpts.DefaultReadConcern = sopts.DefaultReadConcern
+	}
+	if sopts.DefaultWriteConcern != nil {
+		coreOpts.DefaultWriteConcern = sopts.DefaultWriteConcern
+	}
+	if sopts.DefaultReadPreference != nil {
+		coreOpts.DefaultReadPreference = sopts.DefaultReadPreference
+	}
+
+	sess, err := session.NewClientSession(c.topology.SessionPool, c.id, session.Explicit, coreOpts)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	sess.RetryWrite = c.retryWrites
+
+	return &sessionImpl{
+		Client: sess,
+		topo:   c.topology,
+	}, nil
+}
+
+func (c *Client) endSessions(ctx context.Context) {
+	if c.topology.SessionPool == nil {
+		return
+	}
+	cmd := command.EndSessions{
+		Clock:      c.clock,
+		SessionIDs: c.topology.SessionPool.IDSlice(),
+	}
+
+	_, _ = driver.EndSessions(ctx, cmd, c.topology, description.ReadPrefSelector(readpref.PrimaryPreferred()))
+}
+
+func newClient(cs connstring.ConnString, opts ...*options.ClientOptions) (*Client, error) {
+	clientOpt := options.MergeClientOptions(cs, opts...)
+
+	client := &Client{
+		topologyOptions: clientOpt.TopologyOptions,
+		connString:      clientOpt.ConnString,
+		localThreshold:  defaultLocalThreshold,
+		readPreference:  clientOpt.ReadPreference,
+		readConcern:     clientOpt.ReadConcern,
+		writeConcern:    clientOpt.WriteConcern,
+		registry:        clientOpt.Registry,
+	}
+
+	if client.connString.RetryWritesSet {
+		client.retryWrites = client.connString.RetryWrites
+	}
+	if clientOpt.RetryWrites != nil {
+		client.retryWrites = *clientOpt.RetryWrites
+	}
+
+	clientID, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+	client.id = clientID
+
+	topts := append(
+		client.topologyOptions,
+		topology.WithConnString(func(connstring.ConnString) connstring.ConnString { return client.connString }),
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(opts, topology.WithClock(func(clock *session.ClusterClock) *session.ClusterClock {
+				return client.clock
+			}), topology.WithRegistry(func(registry *bsoncodec.Registry) *bsoncodec.Registry {
+				return client.registry
+			}))
+		}),
+	)
+	topo, err := topology.New(topts...)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+	client.topology = topo
+	client.clock = &session.ClusterClock{}
+
+	if client.readConcern == nil {
+		client.readConcern = readConcernFromConnString(&client.connString)
+
+		if client.readConcern == nil {
+			// no read concern in conn string
+			client.readConcern = readconcern.New()
+		}
+	}
+
+	if client.writeConcern == nil {
+		client.writeConcern = writeConcernFromConnString(&client.connString)
+	}
+	if client.readPreference == nil {
+		rp, err := readPreferenceFromConnString(&client.connString)
+		if err != nil {
+			return nil, err
+		}
+		if rp != nil {
+			client.readPreference = rp
+		} else {
+			client.readPreference = readpref.Primary()
+		}
+	}
+
+	if client.registry == nil {
+		client.registry = bson.DefaultRegistry
+	}
+	return client, nil
+}
+
+func readConcernFromConnString(cs *connstring.ConnString) *readconcern.ReadConcern {
+	if len(cs.ReadConcernLevel) == 0 {
+		return nil
+	}
+
+	rc := &readconcern.ReadConcern{}
+	readconcern.Level(cs.ReadConcernLevel)(rc)
+
+	return rc
+}
+
+func writeConcernFromConnString(cs *connstring.ConnString) *writeconcern.WriteConcern {
+	var wc *writeconcern.WriteConcern
+
+	if len(cs.WString) > 0 {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.WTagSet(cs.WString)(wc)
+	} else if cs.WNumberSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.W(cs.WNumber)(wc)
+	}
+
+	if cs.JSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.J(cs.J)(wc)
+	}
+
+	if cs.WTimeoutSet {
+		if wc == nil {
+			wc = writeconcern.New()
+		}
+
+		writeconcern.WTimeout(cs.WTimeout)(wc)
+	}
+
+	return wc
+}
+
+func readPreferenceFromConnString(cs *connstring.ConnString) (*readpref.ReadPref, error) {
+	var rp *readpref.ReadPref
+	var err error
+	options := make([]readpref.Option, 0, 1)
+
+	tagSets := tag.NewTagSetsFromMaps(cs.ReadPreferenceTagSets)
+	if len(tagSets) > 0 {
+		options = append(options, readpref.WithTagSets(tagSets...))
+	}
+
+	if cs.MaxStaleness != 0 {
+		options = append(options, readpref.WithMaxStaleness(cs.MaxStaleness))
+	}
+
+	if len(cs.ReadPreference) > 0 {
+		if rp == nil {
+			mode, _ := readpref.ModeFromString(cs.ReadPreference)
+			rp, err = readpref.New(mode, options...)
+			if err != nil {
+				return nil, err
+			}
+		}
+	}
+
+	return rp, nil
+}
+
+// ValidSession returns an error if the session doesn't belong to the client
+func (c *Client) ValidSession(sess *session.Client) error {
+	if sess != nil && !uuid.Equal(sess.ClientID, c.id) {
+		return ErrWrongClient
+	}
+	return nil
+}
+
+// Database returns a handle for a given database.
+func (c *Client) Database(name string, opts ...*options.DatabaseOptions) *Database {
+	return newDatabase(c, name, opts...)
+}
+
+// ConnectionString returns the connection string of the cluster the client is connected to.
+func (c *Client) ConnectionString() string {
+	return c.connString.Original
+}
+
+// ListDatabases returns a ListDatabasesResult.
+func (c *Client) ListDatabases(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) (ListDatabasesResult, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := c.ValidSession(sess)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	f, err := transformDocument(c.registry, filter)
+	if err != nil {
+		return ListDatabasesResult{}, err
+	}
+
+	cmd := command.ListDatabases{
+		Filter:  f,
+		Session: sess,
+		Clock:   c.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(c.localThreshold),
+	})
+	res, err := driver.ListDatabases(
+		ctx, cmd,
+		c.topology,
+		readSelector,
+		c.id,
+		c.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return ListDatabasesResult{}, replaceTopologyErr(err)
+	}
+
+	return (ListDatabasesResult{}).fromResult(res), nil
+}
+
+// ListDatabaseNames returns a slice containing the names of all of the databases on the server.
+func (c *Client) ListDatabaseNames(ctx context.Context, filter interface{}, opts ...*options.ListDatabasesOptions) ([]string, error) {
+	opts = append(opts, options.ListDatabases().SetNameOnly(true))
+
+	res, err := c.ListDatabases(ctx, filter, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	names := make([]string, 0)
+	for _, spec := range res.Databases {
+		names = append(names, spec.Name)
+	}
+
+	return names, nil
+}
+
+// WithSession allows a user to start a session themselves and manage
+// its lifetime. The only way to provide a session to a CRUD method is
+// to invoke that CRUD method with the mongo.SessionContext within the
+// closure. The mongo.SessionContext can be used as a regular context,
+// so methods like context.WithDeadline and context.WithTimeout are
+// supported.
+//
+// If the context.Context already has a mongo.Session attached, that
+// mongo.Session will be replaced with the one provided.
+//
+// Errors returned from the closure are transparently returned from
+// this function.
+func WithSession(ctx context.Context, sess Session, fn func(SessionContext) error) error {
+	return fn(contextWithSession(ctx, sess))
+}
+
+// UseSession creates a default session, that is only valid for the
+// lifetime of the closure. No cleanup outside of closing the session
+// is done upon exiting the closure. This means that an outstanding
+// transaction will be aborted, even if the closure returns an error.
+//
+// If ctx already contains a mongo.Session, that mongo.Session will be
+// replaced with the newly created mongo.Session.
+//
+// Errors returned from the closure are transparently returned from
+// this method.
+func (c *Client) UseSession(ctx context.Context, fn func(SessionContext) error) error {
+	return c.UseSessionWithOptions(ctx, options.Session(), fn)
+}
+
+// UseSessionWithOptions works like UseSession but allows the caller
+// to specify the options used to create the session.
+func (c *Client) UseSessionWithOptions(ctx context.Context, opts *options.SessionOptions, fn func(SessionContext) error) error {
+	defaultSess, err := c.StartSession(opts)
+	if err != nil {
+		return err
+	}
+
+	defer defaultSess.EndSession(ctx)
+
+	sessCtx := sessionContext{
+		Context: context.WithValue(ctx, sessionKey{}, defaultSess),
+		Session: defaultSess,
+	}
+
+	return fn(sessCtx)
+}
+
+// Watch returns a change stream cursor used to receive information of changes to the client. This method is preferred
+// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
+// The client must have read concern majority or no read concern for a change stream to be created successfully.
+func (c *Client) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	return newClientChangeStream(ctx, c, pipeline, opts...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go
new file mode 100644
index 0000000..fb16775
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/collection.go
@@ -0,0 +1,1298 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Collection performs operations on a given collection.
+type Collection struct {
+	client         *Client
+	db             *Database
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	registry       *bsoncodec.Registry
+}
+
+func newCollection(db *Database, name string, opts ...*options.CollectionOptions) *Collection {
+	collOpt := options.MergeCollectionOptions(opts...)
+
+	rc := db.readConcern
+	if collOpt.ReadConcern != nil {
+		rc = collOpt.ReadConcern
+	}
+
+	wc := db.writeConcern
+	if collOpt.WriteConcern != nil {
+		wc = collOpt.WriteConcern
+	}
+
+	rp := db.readPreference
+	if collOpt.ReadPreference != nil {
+		rp = collOpt.ReadPreference
+	}
+
+	reg := db.registry
+	if collOpt.Registry != nil {
+		reg = collOpt.Registry
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(rp),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	writeSelector := description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	coll := &Collection{
+		client:         db.client,
+		db:             db,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		readSelector:   readSelector,
+		writeSelector:  writeSelector,
+		registry:       reg,
+	}
+
+	return coll
+}
+
+func (coll *Collection) copy() *Collection {
+	return &Collection{
+		client:         coll.client,
+		db:             coll.db,
+		name:           coll.name,
+		readConcern:    coll.readConcern,
+		writeConcern:   coll.writeConcern,
+		readPreference: coll.readPreference,
+		readSelector:   coll.readSelector,
+		writeSelector:  coll.writeSelector,
+		registry:       coll.registry,
+	}
+}
+
+// Clone creates a copy of this collection with updated options, if any are given.
+func (coll *Collection) Clone(opts ...*options.CollectionOptions) (*Collection, error) {
+	copyColl := coll.copy()
+	optsColl := options.MergeCollectionOptions(opts...)
+
+	if optsColl.ReadConcern != nil {
+		copyColl.readConcern = optsColl.ReadConcern
+	}
+
+	if optsColl.WriteConcern != nil {
+		copyColl.writeConcern = optsColl.WriteConcern
+	}
+
+	if optsColl.ReadPreference != nil {
+		copyColl.readPreference = optsColl.ReadPreference
+	}
+
+	if optsColl.Registry != nil {
+		copyColl.registry = optsColl.Registry
+	}
+
+	copyColl.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(copyColl.readPreference),
+		description.LatencySelector(copyColl.client.localThreshold),
+	})
+
+	return copyColl, nil
+}
+
+// Name provides access to the name of the collection.
+func (coll *Collection) Name() string {
+	return coll.name
+}
+
+// namespace returns the namespace of the collection.
+func (coll *Collection) namespace() command.Namespace {
+	return command.NewNamespace(coll.db.name, coll.name)
+}
+
+// Database provides access to the database that contains the collection.
+func (coll *Collection) Database() *Database {
+	return coll.db
+}
+
+// BulkWrite performs a bulk write operation.
+//
+// See https://docs.mongodb.com/manual/core/bulk-write-operations/.
+func (coll *Collection) BulkWrite(ctx context.Context, models []WriteModel,
+	opts ...*options.BulkWriteOptions) (*BulkWriteResult, error) {
+
+	if len(models) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	dispatchModels := make([]driver.WriteModel, len(models))
+	for i, model := range models {
+		if model == nil {
+			return nil, ErrNilDocument
+		}
+		dispatchModels[i] = model.convertModel()
+	}
+
+	res, err := driver.BulkWrite(
+		ctx,
+		coll.namespace(),
+		dispatchModels,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		sess,
+		coll.writeConcern,
+		coll.client.clock,
+		coll.registry,
+		opts...,
+	)
+
+	if err != nil {
+		if conv, ok := err.(driver.BulkWriteException); ok {
+			return &BulkWriteResult{}, BulkWriteException{
+				WriteConcernError: convertWriteConcernError(conv.WriteConcernError),
+				WriteErrors:       convertBulkWriteErrors(conv.WriteErrors),
+			}
+		}
+
+		return &BulkWriteResult{}, replaceTopologyErr(err)
+	}
+
+	return &BulkWriteResult{
+		InsertedCount: res.InsertedCount,
+		MatchedCount:  res.MatchedCount,
+		ModifiedCount: res.ModifiedCount,
+		DeletedCount:  res.DeletedCount,
+		UpsertedCount: res.UpsertedCount,
+		UpsertedIDs:   res.UpsertedIDs,
+	}, nil
+}
+
+// InsertOne inserts a single document into the collection.
+func (coll *Collection) InsertOne(ctx context.Context, document interface{},
+	opts ...*options.InsertOneOptions) (*InsertOneResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	doc, insertedID, err := transformAndEnsureID(coll.registry, document)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+	oldns := coll.namespace()
+	cmd := command.Insert{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         []bsonx.Doc{doc},
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	// convert to InsertManyOptions so these can be argued to dispatch.Insert
+	insertOpts := make([]*options.InsertManyOptions, len(opts))
+	for i, opt := range opts {
+		insertOpts[i] = options.InsertMany()
+		insertOpts[i].BypassDocumentValidation = opt.BypassDocumentValidation
+	}
+
+	res, err := driver.Insert(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		insertOpts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+
+	return &InsertOneResult{InsertedID: insertedID}, err
+}
+
+// InsertMany inserts the provided documents.
+func (coll *Collection) InsertMany(ctx context.Context, documents []interface{},
+	opts ...*options.InsertManyOptions) (*InsertManyResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if len(documents) == 0 {
+		return nil, ErrEmptySlice
+	}
+
+	result := make([]interface{}, len(documents))
+	docs := make([]bsonx.Doc, len(documents))
+
+	for i, doc := range documents {
+		if doc == nil {
+			return nil, ErrNilDocument
+		}
+		bdoc, insertedID, err := transformAndEnsureID(coll.registry, doc)
+		if err != nil {
+			return nil, err
+		}
+
+		docs[i] = bdoc
+		result[i] = insertedID
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Insert{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         docs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Insert(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+
+	switch err {
+	case nil:
+	case command.ErrUnacknowledgedWrite:
+		return &InsertManyResult{InsertedIDs: result}, ErrUnacknowledgedWrite
+	default:
+		return nil, replaceTopologyErr(err)
+	}
+	if len(res.WriteErrors) > 0 || res.WriteConcernError != nil {
+		bwErrors := make([]BulkWriteError, 0, len(res.WriteErrors))
+		for _, we := range res.WriteErrors {
+			bwErrors = append(bwErrors, BulkWriteError{
+				WriteError{
+					Index:   we.Index,
+					Code:    we.Code,
+					Message: we.ErrMsg,
+				},
+				nil,
+			})
+		}
+
+		err = BulkWriteException{
+			WriteErrors:       bwErrors,
+			WriteConcernError: convertWriteConcernError(res.WriteConcernError),
+		}
+	}
+
+	return &InsertManyResult{InsertedIDs: result}, err
+}
+
+// DeleteOne deletes a single document from the collection.
+func (coll *Collection) DeleteOne(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+	deleteDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(f)},
+			{"limit", bsonx.Int32(1)},
+		},
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Delete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Deletes:      deleteDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Delete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+	return &DeleteResult{DeletedCount: int64(res.N)}, err
+}
+
+// DeleteMany deletes multiple documents from the collection.
+func (coll *Collection) DeleteMany(ctx context.Context, filter interface{},
+	opts ...*options.DeleteOptions) (*DeleteResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+	deleteDocs := []bsonx.Doc{{{"q", bsonx.Document(f)}, {"limit", bsonx.Int32(0)}}}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Delete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Deletes:      deleteDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.Delete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		false,
+		opts...,
+	)
+
+	rr, err := processWriteError(res.WriteConcernError, res.WriteErrors, err)
+	if rr&rrMany == 0 {
+		return nil, err
+	}
+	return &DeleteResult{DeletedCount: int64(res.N)}, err
+}
+
+func (coll *Collection) updateOrReplaceOne(ctx context.Context, filter,
+	update bsonx.Doc, sess *session.Client, opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	// TODO: should session be taken from ctx or left as argument?
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	updateDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(filter)},
+			{"u", bsonx.Document(update)},
+			{"multi", bsonx.Boolean(false)},
+		},
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Update{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         updateDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	r, err := driver.Update(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		opts...,
+	)
+	if err != nil && err != command.ErrUnacknowledgedWrite {
+		return nil, replaceTopologyErr(err)
+	}
+
+	res := &UpdateResult{
+		MatchedCount:  r.MatchedCount,
+		ModifiedCount: r.ModifiedCount,
+		UpsertedCount: int64(len(r.Upserted)),
+	}
+	if len(r.Upserted) > 0 {
+		res.UpsertedID = r.Upserted[0].ID
+		res.MatchedCount--
+	}
+
+	rr, err := processWriteError(r.WriteConcernError, r.WriteErrors, err)
+	if rr&rrOne == 0 {
+		return nil, err
+	}
+	return res, err
+}
+
+// UpdateOne updates a single document in the collection.
+func (coll *Collection) UpdateOne(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := ensureDollarKey(u); err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	return coll.updateOrReplaceOne(ctx, f, u, sess, opts...)
+}
+
+// UpdateMany updates multiple documents in the collection.
+func (coll *Collection) UpdateMany(ctx context.Context, filter interface{}, update interface{},
+	opts ...*options.UpdateOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return nil, err
+	}
+
+	if err = ensureDollarKey(u); err != nil {
+		return nil, err
+	}
+
+	updateDocs := []bsonx.Doc{
+		{
+			{"q", bsonx.Document(f)},
+			{"u", bsonx.Document(u)},
+			{"multi", bsonx.Boolean(true)},
+		},
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Update{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Docs:         updateDocs,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	r, err := driver.Update(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		false,
+		opts...,
+	)
+	if err != nil && err != command.ErrUnacknowledgedWrite {
+		return nil, replaceTopologyErr(err)
+	}
+	res := &UpdateResult{
+		MatchedCount:  r.MatchedCount,
+		ModifiedCount: r.ModifiedCount,
+		UpsertedCount: int64(len(r.Upserted)),
+	}
+	// TODO(skriptble): Is this correct? Do we only return the first upserted ID for an UpdateMany?
+	if len(r.Upserted) > 0 {
+		res.UpsertedID = r.Upserted[0].ID
+		res.MatchedCount--
+	}
+
+	rr, err := processWriteError(r.WriteConcernError, r.WriteErrors, err)
+	if rr&rrMany == 0 {
+		return nil, err
+	}
+	return res, err
+}
+
+// ReplaceOne replaces a single document in the collection.
+func (coll *Collection) ReplaceOne(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.ReplaceOptions) (*UpdateResult, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	r, err := transformDocument(coll.registry, replacement)
+	if err != nil {
+		return nil, err
+	}
+
+	if len(r) > 0 && strings.HasPrefix(r[0].Key, "$") {
+		return nil, errors.New("replacement document cannot contains keys beginning with '$")
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	updateOptions := make([]*options.UpdateOptions, 0, len(opts))
+	for _, opt := range opts {
+		uOpts := options.Update()
+		uOpts.BypassDocumentValidation = opt.BypassDocumentValidation
+		uOpts.Collation = opt.Collation
+		uOpts.Upsert = opt.Upsert
+		updateOptions = append(updateOptions, uOpts)
+	}
+
+	return coll.updateOrReplaceOne(ctx, f, r, sess, updateOptions...)
+}
+
+// Aggregate runs an aggregation framework pipeline.
+//
+// See https://docs.mongodb.com/manual/aggregation/.
+func (coll *Collection) Aggregate(ctx context.Context, pipeline interface{},
+	opts ...*options.AggregateOptions) (*Cursor, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	pipelineArr, err := transformAggregatePipeline(coll.registry, pipeline)
+	if err != nil {
+		return nil, err
+	}
+
+	aggOpts := options.MergeAggregateOptions(opts...)
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Aggregate{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Pipeline:     pipelineArr,
+		ReadPref:     coll.readPreference,
+		WriteConcern: wc,
+		ReadConcern:  rc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	batchCursor, err := driver.Aggregate(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		aggOpts,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// Count gets the number of documents matching the filter.
+func (coll *Collection) Count(ctx context.Context, filter interface{},
+	opts ...*options.CountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return 0, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Count{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:       f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	count, err := driver.Count(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		opts...,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// CountDocuments gets the number of documents matching the filter.
+func (coll *Collection) CountDocuments(ctx context.Context, filter interface{},
+	opts ...*options.CountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	pipelineArr, err := countDocumentsAggregatePipeline(coll.registry, filter, countOpts)
+	if err != nil {
+		return 0, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.CountDocuments{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Pipeline:    pipelineArr,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	count, err := driver.CountDocuments(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		countOpts,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// EstimatedDocumentCount gets an estimate of the count of documents in a collection using collection metadata.
+func (coll *Collection) EstimatedDocumentCount(ctx context.Context,
+	opts ...*options.EstimatedDocumentCountOptions) (int64, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return 0, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Count{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:       bsonx.Doc{},
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	countOpts := options.Count()
+	if len(opts) >= 1 {
+		countOpts = countOpts.SetMaxTime(*opts[len(opts)-1].MaxTime)
+	}
+
+	count, err := driver.Count(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		countOpts,
+	)
+
+	return count, replaceTopologyErr(err)
+}
+
+// Distinct finds the distinct values for a specified field across a single
+// collection.
+func (coll *Collection) Distinct(ctx context.Context, fieldName string, filter interface{},
+	opts ...*options.DistinctOptions) ([]interface{}, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Distinct{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Field:       fieldName,
+		Query:       f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	res, err := driver.Distinct(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	return res.Values, nil
+}
+
+// Find finds the documents matching a model.
+func (coll *Collection) Find(ctx context.Context, filter interface{},
+	opts ...*options.FindOptions) (*Cursor, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Find{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Filter:      f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	batchCursor, err := driver.Find(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// FindOne returns up to one document that matches the model.
+func (coll *Collection) FindOne(ctx context.Context, filter interface{},
+	opts ...*options.FindOneOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	rc := coll.readConcern
+	if sess != nil && (sess.TransactionInProgress()) {
+		rc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.Find{
+		NS:          command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Filter:      f,
+		ReadPref:    coll.readPreference,
+		ReadConcern: rc,
+		Session:     sess,
+		Clock:       coll.client.clock,
+	}
+
+	findOpts := make([]*options.FindOptions, len(opts))
+	for i, opt := range opts {
+		findOpts[i] = &options.FindOptions{
+			AllowPartialResults: opt.AllowPartialResults,
+			BatchSize:           opt.BatchSize,
+			Collation:           opt.Collation,
+			Comment:             opt.Comment,
+			CursorType:          opt.CursorType,
+			Hint:                opt.Hint,
+			Max:                 opt.Max,
+			MaxAwaitTime:        opt.MaxAwaitTime,
+			Min:                 opt.Min,
+			NoCursorTimeout:     opt.NoCursorTimeout,
+			OplogReplay:         opt.OplogReplay,
+			Projection:          opt.Projection,
+			ReturnKey:           opt.ReturnKey,
+			ShowRecordID:        opt.ShowRecordID,
+			Skip:                opt.Skip,
+			Snapshot:            opt.Snapshot,
+			Sort:                opt.Sort,
+		}
+	}
+
+	batchCursor, err := driver.Find(
+		ctx, cmd,
+		coll.client.topology,
+		coll.readSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.registry,
+		findOpts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	cursor, err := newCursor(batchCursor, coll.registry)
+	return &SingleResult{cur: cursor, reg: coll.registry, err: replaceTopologyErr(err)}
+}
+
+// FindOneAndDelete find a single document and deletes it, returning the
+// original in result.
+func (coll *Collection) FindOneAndDelete(ctx context.Context, filter interface{},
+	opts ...*options.FindOneAndDeleteOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	oldns := coll.namespace()
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	cmd := command.FindOneAndDelete{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndDelete(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// FindOneAndReplace finds a single document and replaces it, returning either
+// the original or the replaced document.
+func (coll *Collection) FindOneAndReplace(ctx context.Context, filter interface{},
+	replacement interface{}, opts ...*options.FindOneAndReplaceOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	r, err := transformDocument(coll.registry, replacement)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	if len(r) > 0 && strings.HasPrefix(r[0].Key, "$") {
+		return &SingleResult{err: errors.New("replacement document cannot contains keys beginning with '$")}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.FindOneAndReplace{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		Replacement:  r,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndReplace(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// FindOneAndUpdate finds a single document and updates it, returning either
+// the original or the updated.
+func (coll *Collection) FindOneAndUpdate(ctx context.Context, filter interface{},
+	update interface{}, opts ...*options.FindOneAndUpdateOptions) *SingleResult {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	f, err := transformDocument(coll.registry, filter)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	u, err := transformDocument(coll.registry, update)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	err = ensureDollarKey(u)
+	if err != nil {
+		return &SingleResult{
+			err: err,
+		}
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err = coll.client.ValidSession(sess)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	oldns := coll.namespace()
+	cmd := command.FindOneAndUpdate{
+		NS:           command.Namespace{DB: oldns.DB, Collection: oldns.Collection},
+		Query:        f,
+		Update:       u,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+
+	res, err := driver.FindOneAndUpdate(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+		coll.client.retryWrites,
+		coll.registry,
+		opts...,
+	)
+	if err != nil {
+		return &SingleResult{err: replaceTopologyErr(err)}
+	}
+
+	return &SingleResult{rdr: res.Value, reg: coll.registry}
+}
+
+// Watch returns a change stream cursor used to receive notifications of changes to the collection.
+//
+// This method is preferred to running a raw aggregation with a $changeStream stage because it
+// supports resumability in the case of some errors. The collection must have read concern majority or no read concern
+// for a change stream to be created successfully.
+func (coll *Collection) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+	return newChangeStream(ctx, coll, pipeline, opts...)
+}
+
+// Indexes returns the index view for this collection.
+func (coll *Collection) Indexes() IndexView {
+	return IndexView{coll: coll}
+}
+
+// Drop drops this collection from database.
+func (coll *Collection) Drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := coll.client.ValidSession(sess)
+	if err != nil {
+		return err
+	}
+
+	wc := coll.writeConcern
+	if sess != nil && sess.TransactionRunning() {
+		wc = nil
+	}
+
+	cmd := command.DropCollection{
+		DB:           coll.db.name,
+		Collection:   coll.name,
+		WriteConcern: wc,
+		Session:      sess,
+		Clock:        coll.client.clock,
+	}
+	_, err = driver.DropCollection(
+		ctx, cmd,
+		coll.client.topology,
+		coll.writeSelector,
+		coll.client.id,
+		coll.client.topology.SessionPool,
+	)
+	if err != nil && !command.IsNotFound(err) {
+		return replaceTopologyErr(err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go
new file mode 100644
index 0000000..a9dc13d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/cursor.go
@@ -0,0 +1,137 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+)
+
+// Cursor is used to iterate a stream of documents. Each document is decoded into the result
+// according to the rules of the bson package.
+//
+// A typical usage of the Cursor type would be:
+//
+//		var cur *Cursor
+//		ctx := context.Background()
+//		defer cur.Close(ctx)
+//
+// 		for cur.Next(ctx) {
+//			elem := &bson.D{}
+//			if err := cur.Decode(elem); err != nil {
+// 				log.Fatal(err)
+// 			}
+//
+// 			// do something with elem....
+//		}
+//
+// 		if err := cur.Err(); err != nil {
+//			log.Fatal(err)
+//		}
+//
+type Cursor struct {
+	// Current is the BSON bytes of the current document. This property is only valid until the next
+	// call to Next or Close. If continued access is required to the bson.Raw, you must make a copy
+	// of it.
+	Current bson.Raw
+
+	bc       batchCursor
+	pos      int
+	batch    []byte
+	registry *bsoncodec.Registry
+
+	err error
+}
+
+func newCursor(bc batchCursor, registry *bsoncodec.Registry) (*Cursor, error) {
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &Cursor{bc: bc, pos: 0, batch: make([]byte, 0, 256), registry: registry}, nil
+}
+
+func newEmptyCursor() *Cursor {
+	return &Cursor{bc: driver.NewEmptyBatchCursor()}
+}
+
+// ID returns the ID of this cursor.
+func (c *Cursor) ID() int64 { return c.bc.ID() }
+
+func (c *Cursor) advanceCurrentDocument() bool {
+	if len(c.batch[c.pos:]) < 4 {
+		c.err = errors.New("could not read next document: insufficient bytes")
+		return false
+	}
+	length := (int(c.batch[c.pos]) | int(c.batch[c.pos+1])<<8 | int(c.batch[c.pos+2])<<16 | int(c.batch[c.pos+3])<<24)
+	if len(c.batch[c.pos:]) < length {
+		c.err = errors.New("could not read next document: insufficient bytes")
+		return false
+	}
+	if len(c.Current) > 4 {
+		c.Current[0], c.Current[1], c.Current[2], c.Current[3] = 0x00, 0x00, 0x00, 0x00 // Invalidate the current document
+	}
+	c.Current = c.batch[c.pos : c.pos+length]
+	c.pos += length
+	return true
+}
+
+// Next gets the next result from this cursor. Returns true if there were no errors and the next
+// result is available for decoding.
+func (c *Cursor) Next(ctx context.Context) bool {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+	if c.pos < len(c.batch) {
+		return c.advanceCurrentDocument()
+	}
+
+	// clear the batch
+	c.batch = c.batch[:0]
+	c.pos = 0
+	c.Current = c.Current[:0]
+
+	// call the Next method in a loop until at least one document is returned in the next batch or
+	// the context times out.
+	for len(c.batch) == 0 {
+		// If we don't have a next batch
+		if !c.bc.Next(ctx) {
+			// Do we have an error? If so we return false.
+			c.err = c.bc.Err()
+			if c.err != nil {
+				return false
+			}
+			// Is the cursor ID zero?
+			if c.bc.ID() == 0 {
+				return false
+			}
+			// empty batch, but cursor is still valid, so continue.
+			continue
+		}
+
+		c.batch = c.bc.Batch(c.batch[:0])
+	}
+
+	return c.advanceCurrentDocument()
+}
+
+// Decode will decode the current document into val.
+func (c *Cursor) Decode(val interface{}) error {
+	return bson.UnmarshalWithRegistry(c.registry, c.Current, val)
+}
+
+// Err returns the current error.
+func (c *Cursor) Err() error { return c.err }
+
+// Close closes this cursor.
+func (c *Cursor) Close(ctx context.Context) error { return c.bc.Close(ctx) }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go
new file mode 100644
index 0000000..9575e06
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/database.go
@@ -0,0 +1,282 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Database performs operations on a given database.
+type Database struct {
+	client         *Client
+	name           string
+	readConcern    *readconcern.ReadConcern
+	writeConcern   *writeconcern.WriteConcern
+	readPreference *readpref.ReadPref
+	readSelector   description.ServerSelector
+	writeSelector  description.ServerSelector
+	registry       *bsoncodec.Registry
+}
+
+func newDatabase(client *Client, name string, opts ...*options.DatabaseOptions) *Database {
+	dbOpt := options.MergeDatabaseOptions(opts...)
+
+	rc := client.readConcern
+	if dbOpt.ReadConcern != nil {
+		rc = dbOpt.ReadConcern
+	}
+
+	rp := client.readPreference
+	if dbOpt.ReadPreference != nil {
+		rp = dbOpt.ReadPreference
+	}
+
+	wc := client.writeConcern
+	if dbOpt.WriteConcern != nil {
+		wc = dbOpt.WriteConcern
+	}
+
+	db := &Database{
+		client:         client,
+		name:           name,
+		readPreference: rp,
+		readConcern:    rc,
+		writeConcern:   wc,
+		registry:       client.registry,
+	}
+
+	db.readSelector = description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(db.readPreference),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	db.writeSelector = description.CompositeSelector([]description.ServerSelector{
+		description.WriteSelector(),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	return db
+}
+
+// Client returns the Client the database was created from.
+func (db *Database) Client() *Client {
+	return db.client
+}
+
+// Name returns the name of the database.
+func (db *Database) Name() string {
+	return db.name
+}
+
+// Collection gets a handle for a given collection in the database.
+func (db *Database) Collection(name string, opts ...*options.CollectionOptions) *Collection {
+	return newCollection(db, name, opts...)
+}
+
+func (db *Database) processRunCommand(ctx context.Context, cmd interface{}, opts ...*options.RunCmdOptions) (command.Read,
+	description.ServerSelector, error) {
+
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+	runCmd := options.MergeRunCmdOptions(opts...)
+
+	if err := db.client.ValidSession(sess); err != nil {
+		return command.Read{}, nil, err
+	}
+
+	rp := runCmd.ReadPreference
+	if rp == nil {
+		if sess != nil && sess.TransactionRunning() {
+			rp = sess.CurrentRp // override with transaction read pref if specified
+		}
+		if rp == nil {
+			rp = readpref.Primary() // set to primary if nothing specified in options
+		}
+	}
+
+	runCmdDoc, err := transformDocument(db.registry, cmd)
+	if err != nil {
+		return command.Read{}, nil, err
+	}
+
+	readSelect := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(rp),
+		description.LatencySelector(db.client.localThreshold),
+	})
+
+	return command.Read{
+		DB:       db.Name(),
+		Command:  runCmdDoc,
+		ReadPref: rp,
+		Session:  sess,
+		Clock:    db.client.clock,
+	}, readSelect, nil
+}
+
+// RunCommand runs a command on the database. A user can supply a custom
+// context to this method, or nil to default to context.Background().
+func (db *Database) RunCommand(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) *SingleResult {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	readCmd, readSelect, err := db.processRunCommand(ctx, runCommand, opts...)
+	if err != nil {
+		return &SingleResult{err: err}
+	}
+
+	doc, err := driver.Read(ctx,
+		readCmd,
+		db.client.topology,
+		readSelect,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+
+	return &SingleResult{err: replaceTopologyErr(err), rdr: doc, reg: db.registry}
+}
+
+// RunCommandCursor runs a command on the database and returns a cursor over the resulting reader. A user can supply
+// a custom context to this method, or nil to default to context.Background().
+func (db *Database) RunCommandCursor(ctx context.Context, runCommand interface{}, opts ...*options.RunCmdOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	readCmd, readSelect, err := db.processRunCommand(ctx, runCommand, opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	batchCursor, err := driver.ReadCursor(
+		ctx,
+		readCmd,
+		db.client.topology,
+		readSelect,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, db.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// Drop drops this database from mongodb.
+func (db *Database) Drop(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := db.client.ValidSession(sess)
+	if err != nil {
+		return err
+	}
+
+	cmd := command.DropDatabase{
+		DB:      db.name,
+		Session: sess,
+		Clock:   db.client.clock,
+	}
+	_, err = driver.DropDatabase(
+		ctx, cmd,
+		db.client.topology,
+		db.writeSelector,
+		db.client.id,
+		db.client.topology.SessionPool,
+	)
+	if err != nil && !command.IsNotFound(err) {
+		return replaceTopologyErr(err)
+	}
+	return nil
+}
+
+// ListCollections list collections from mongodb database.
+func (db *Database) ListCollections(ctx context.Context, filter interface{}, opts ...*options.ListCollectionsOptions) (*Cursor, error) {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := db.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	filterDoc, err := transformDocument(db.registry, filter)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.ListCollections{
+		DB:       db.name,
+		Filter:   filterDoc,
+		ReadPref: readpref.Primary(), // list collections must be run on a primary by default
+		Session:  sess,
+		Clock:    db.client.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(db.client.localThreshold),
+	})
+	batchCursor, err := driver.ListCollections(
+		ctx, cmd,
+		db.client.topology,
+		readSelector,
+		db.client.id,
+		db.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, db.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// ReadConcern returns the read concern of this database.
+func (db *Database) ReadConcern() *readconcern.ReadConcern {
+	return db.readConcern
+}
+
+// ReadPreference returns the read preference of this database.
+func (db *Database) ReadPreference() *readpref.ReadPref {
+	return db.readPreference
+}
+
+// WriteConcern returns the write concern of this database.
+func (db *Database) WriteConcern() *writeconcern.WriteConcern {
+	return db.writeConcern
+}
+
+// Watch returns a change stream cursor used to receive information of changes to the database. This method is preferred
+// to running a raw aggregation with a $changeStream stage because it supports resumability in the case of some errors.
+// The database must have read concern majority or no read concern for a change stream to be created successfully.
+func (db *Database) Watch(ctx context.Context, pipeline interface{},
+	opts ...*options.ChangeStreamOptions) (*ChangeStream, error) {
+
+	return newDbChangeStream(ctx, db, pipeline, opts...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go
new file mode 100644
index 0000000..4f7f819
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/doc.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// NOTE: This documentation should be kept in line with the Example* test functions.
+
+// Package mongo provides a MongoDB Driver API for Go.
+//
+// Basic usage of the driver starts with creating a Client from a connection
+// string. To do so, call the NewClient and Connect functions:
+//
+// 		client, err := NewClient("mongodb://foo:bar@localhost:27017")
+// 		if err != nil { return err }
+// 		ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+// 		defer cancel()
+// 		err = client.Connect(ctx)
+// 		if err != nil { return err }
+//
+// This will create a new client and start monitoring the MongoDB server on localhost.
+// The Database and Collection types can be used to access the database:
+//
+//    collection := client.Database("baz").Collection("qux")
+//
+// A Collection can be used to query the database or insert documents:
+//
+//    res, err := collection.InsertOne(context.Background(), bson.M{"hello": "world"})
+//    if err != nil { return err }
+//    id := res.InsertedID
+//
+// Several methods return a cursor, which can be used like this:
+//
+//    cur, err := collection.Find(context.Background(), bson.D{})
+//    if err != nil { log.Fatal(err) }
+//    defer cur.Close(context.Background())
+//    for cur.Next(context.Background()) {
+//       raw, err := cur.DecodeBytes()
+//       if err != nil { log.Fatal(err) }
+//       // do something with elem....
+//    }
+//    if err := cur.Err(); err != nil {
+//    		return err
+//    }
+//
+// Methods that only return a single document will return a *SingleResult, which works
+// like a *sql.Row:
+//
+// 	  result := struct{
+// 	  	Foo string
+// 	  	Bar int32
+// 	  }{}
+//    filter := bson.D{{"hello", "world"}}
+//    err := collection.FindOne(context.Background(), filter).Decode(&result)
+//    if err != nil { return err }
+//    // do something with result...
+//
+// Additional examples can be found under the examples directory in the driver's repository and
+// on the MongoDB website.
+package mongo
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go
new file mode 100644
index 0000000..3505bcc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/errors.go
@@ -0,0 +1,186 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
+// write concern.
+var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+
+// ErrClientDisconnected is returned when a user attempts to call a method on a
+// disconnected client
+var ErrClientDisconnected = errors.New("client is disconnected")
+
+// ErrNilDocument is returned when a user attempts to pass a nil document or filter
+// to a function where the field is required.
+var ErrNilDocument = errors.New("document is nil")
+
+// ErrEmptySlice is returned when a user attempts to pass an empty slice as input
+// to a function wehere the field is required.
+var ErrEmptySlice = errors.New("must provide at least one element in input slice")
+
+func replaceTopologyErr(err error) error {
+	if err == topology.ErrTopologyClosed {
+		return ErrClientDisconnected
+	}
+	return err
+}
+
+// WriteError is a non-write concern failure that occurred as a result of a write
+// operation.
+type WriteError struct {
+	Index   int
+	Code    int
+	Message string
+}
+
+func (we WriteError) Error() string { return we.Message }
+
+// WriteErrors is a group of non-write concern failures that occurred as a result
+// of a write operation.
+type WriteErrors []WriteError
+
+func (we WriteErrors) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "write errors: [")
+	for idx, err := range we {
+		if idx != 0 {
+			fmt.Fprintf(&buf, ", ")
+		}
+		fmt.Fprintf(&buf, "{%s}", err)
+	}
+	fmt.Fprint(&buf, "]")
+	return buf.String()
+}
+
+func writeErrorsFromResult(rwes []result.WriteError) WriteErrors {
+	wes := make(WriteErrors, 0, len(rwes))
+	for _, err := range rwes {
+		wes = append(wes, WriteError{Index: err.Index, Code: err.Code, Message: err.ErrMsg})
+	}
+	return wes
+}
+
+// WriteConcernError is a write concern failure that occurred as a result of a
+// write operation.
+type WriteConcernError struct {
+	Code    int
+	Message string
+	Details bson.Raw
+}
+
+func (wce WriteConcernError) Error() string { return wce.Message }
+
+// WriteException is an error for a non-bulk write operation.
+type WriteException struct {
+	WriteConcernError *WriteConcernError
+	WriteErrors       WriteErrors
+}
+
+func (mwe WriteException) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "multiple write errors: [")
+	fmt.Fprintf(&buf, "{%s}, ", mwe.WriteErrors)
+	fmt.Fprintf(&buf, "{%s}]", mwe.WriteConcernError)
+	return buf.String()
+}
+
+func convertBulkWriteErrors(errors []driver.BulkWriteError) []BulkWriteError {
+	bwErrors := make([]BulkWriteError, 0, len(errors))
+	for _, err := range errors {
+		bwErrors = append(bwErrors, BulkWriteError{
+			WriteError{
+				Index:   err.Index,
+				Code:    err.Code,
+				Message: err.ErrMsg,
+			},
+			dispatchToMongoModel(err.Model),
+		})
+	}
+
+	return bwErrors
+}
+
+func convertWriteConcernError(wce *result.WriteConcernError) *WriteConcernError {
+	if wce == nil {
+		return nil
+	}
+
+	return &WriteConcernError{Code: wce.Code, Message: wce.ErrMsg, Details: wce.ErrInfo}
+}
+
+// BulkWriteError is an error for one operation in a bulk write.
+type BulkWriteError struct {
+	WriteError
+	Request WriteModel
+}
+
+func (bwe BulkWriteError) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "{%s}", bwe.WriteError)
+	return buf.String()
+}
+
+// BulkWriteException is an error for a bulk write operation.
+type BulkWriteException struct {
+	WriteConcernError *WriteConcernError
+	WriteErrors       []BulkWriteError
+}
+
+func (bwe BulkWriteException) Error() string {
+	var buf bytes.Buffer
+	fmt.Fprint(&buf, "bulk write error: [")
+	fmt.Fprintf(&buf, "{%s}, ", bwe.WriteErrors)
+	fmt.Fprintf(&buf, "{%s}]", bwe.WriteConcernError)
+	return buf.String()
+}
+
+// returnResult is used to determine if a function calling processWriteError should return
+// the result or return nil. Since the processWriteError function is used by many different
+// methods, both *One and *Many, we need a way to differentiate if the method should return
+// the result and the error.
+type returnResult int
+
+const (
+	rrNone returnResult = 1 << iota // None means do not return the result ever.
+	rrOne                           // One means return the result if this was called by a *One method.
+	rrMany                          // Many means return the result is this was called by a *Many method.
+
+	rrAll returnResult = rrOne | rrMany // All means always return the result.
+)
+
+// processWriteError handles processing the result of a write operation. If the retrunResult matches
+// the calling method's type, it should return the result object in addition to the error.
+// This function will wrap the errors from other packages and return them as errors from this package.
+//
+// WriteConcernError will be returned over WriteErrors if both are present.
+func processWriteError(wce *result.WriteConcernError, wes []result.WriteError, err error) (returnResult, error) {
+	switch {
+	case err == command.ErrUnacknowledgedWrite:
+		return rrAll, ErrUnacknowledgedWrite
+	case err != nil:
+		return rrNone, replaceTopologyErr(err)
+	case wce != nil || len(wes) > 0:
+		return rrMany, WriteException{
+			WriteConcernError: convertWriteConcernError(wce),
+			WriteErrors:       writeErrorsFromResult(wes),
+		}
+	default:
+		return rrAll, nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go
new file mode 100644
index 0000000..abc1514
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_options_builder.go
@@ -0,0 +1,134 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// IndexOptionsBuilder constructs a BSON document for index options
+type IndexOptionsBuilder struct {
+	document bson.D
+}
+
+// NewIndexOptionsBuilder creates a new instance of IndexOptionsBuilder
+func NewIndexOptionsBuilder() *IndexOptionsBuilder {
+	return &IndexOptionsBuilder{}
+}
+
+// Background sets the background option
+func (iob *IndexOptionsBuilder) Background(background bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"background", background})
+	return iob
+}
+
+// ExpireAfterSeconds sets the expireAfterSeconds option
+func (iob *IndexOptionsBuilder) ExpireAfterSeconds(expireAfterSeconds int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"expireAfterSeconds", expireAfterSeconds})
+	return iob
+}
+
+// Name sets the name option
+func (iob *IndexOptionsBuilder) Name(name string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"name", name})
+	return iob
+}
+
+// Sparse sets the sparse option
+func (iob *IndexOptionsBuilder) Sparse(sparse bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"sparse", sparse})
+	return iob
+}
+
+// StorageEngine sets the storageEngine option
+func (iob *IndexOptionsBuilder) StorageEngine(storageEngine interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"storageEngine", storageEngine})
+	return iob
+}
+
+// Unique sets the unique option
+func (iob *IndexOptionsBuilder) Unique(unique bool) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"unique", unique})
+	return iob
+}
+
+// Version sets the version option
+func (iob *IndexOptionsBuilder) Version(version int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"v", version})
+	return iob
+}
+
+// DefaultLanguage sets the defaultLanguage option
+func (iob *IndexOptionsBuilder) DefaultLanguage(defaultLanguage string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"default_language", defaultLanguage})
+	return iob
+}
+
+// LanguageOverride sets the languageOverride option
+func (iob *IndexOptionsBuilder) LanguageOverride(languageOverride string) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"language_override", languageOverride})
+	return iob
+}
+
+// TextVersion sets the textVersion option
+func (iob *IndexOptionsBuilder) TextVersion(textVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"textIndexVersion", textVersion})
+	return iob
+}
+
+// Weights sets the weights option
+func (iob *IndexOptionsBuilder) Weights(weights interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"weights", weights})
+	return iob
+}
+
+// SphereVersion sets the sphereVersion option
+func (iob *IndexOptionsBuilder) SphereVersion(sphereVersion int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"2dsphereIndexVersion", sphereVersion})
+	return iob
+}
+
+// Bits sets the bits option
+func (iob *IndexOptionsBuilder) Bits(bits int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bits", bits})
+	return iob
+}
+
+// Max sets the max option
+func (iob *IndexOptionsBuilder) Max(max float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"max", max})
+	return iob
+}
+
+// Min sets the min option
+func (iob *IndexOptionsBuilder) Min(min float64) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"min", min})
+	return iob
+}
+
+// BucketSize sets the bucketSize option
+func (iob *IndexOptionsBuilder) BucketSize(bucketSize int32) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"bucketSize", bucketSize})
+	return iob
+}
+
+// PartialFilterExpression sets the partialFilterExpression option
+func (iob *IndexOptionsBuilder) PartialFilterExpression(partialFilterExpression interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"partialFilterExpression", partialFilterExpression})
+	return iob
+}
+
+// Collation sets the collation option
+func (iob *IndexOptionsBuilder) Collation(collation interface{}) *IndexOptionsBuilder {
+	iob.document = append(iob.document, bson.E{"collation", collation})
+	return iob
+}
+
+// Build returns the BSON document from the builder
+func (iob *IndexOptionsBuilder) Build() bson.D {
+	return iob.document
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go
new file mode 100644
index 0000000..1ff8d49
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/index_view.go
@@ -0,0 +1,343 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrInvalidIndexValue indicates that the index Keys document has a value that isn't either a number or a string.
+var ErrInvalidIndexValue = errors.New("invalid index value")
+
+// ErrNonStringIndexName indicates that the index name specified in the options is not a string.
+var ErrNonStringIndexName = errors.New("index name must be a string")
+
+// ErrMultipleIndexDrop indicates that multiple indexes would be dropped from a call to IndexView.DropOne.
+var ErrMultipleIndexDrop = errors.New("multiple indexes would be dropped")
+
+// IndexView is used to create, drop, and list indexes on a given collection.
+type IndexView struct {
+	coll *Collection
+}
+
+// IndexModel contains information about an index.
+type IndexModel struct {
+	Keys    interface{}
+	Options *options.IndexOptions
+}
+
+// List returns a cursor iterating over all the indexes in the collection.
+func (iv IndexView) List(ctx context.Context, opts ...*options.ListIndexesOptions) (*Cursor, error) {
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	listCmd := command.ListIndexes{
+		NS:      iv.coll.namespace(),
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	readSelector := description.CompositeSelector([]description.ServerSelector{
+		description.ReadPrefSelector(readpref.Primary()),
+		description.LatencySelector(iv.coll.client.localThreshold),
+	})
+	batchCursor, err := driver.ListIndexes(
+		ctx, listCmd,
+		iv.coll.client.topology,
+		readSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		if err == command.ErrEmptyCursor {
+			return newEmptyCursor(), nil
+		}
+		return nil, replaceTopologyErr(err)
+	}
+
+	cursor, err := newCursor(batchCursor, iv.coll.registry)
+	return cursor, replaceTopologyErr(err)
+}
+
+// CreateOne creates a single index in the collection specified by the model.
+func (iv IndexView) CreateOne(ctx context.Context, model IndexModel, opts ...*options.CreateIndexesOptions) (string, error) {
+	names, err := iv.CreateMany(ctx, []IndexModel{model}, opts...)
+	if err != nil {
+		return "", err
+	}
+
+	return names[0], nil
+}
+
+// CreateMany creates multiple indexes in the collection specified by the models. The names of the
+// creates indexes are returned.
+func (iv IndexView) CreateMany(ctx context.Context, models []IndexModel, opts ...*options.CreateIndexesOptions) ([]string, error) {
+	names := make([]string, 0, len(models))
+	indexes := bsonx.Arr{}
+
+	for _, model := range models {
+		if model.Keys == nil {
+			return nil, fmt.Errorf("index model keys cannot be nil")
+		}
+
+		name, err := getOrGenerateIndexName(iv.coll.registry, model)
+		if err != nil {
+			return nil, err
+		}
+
+		names = append(names, name)
+
+		keys, err := transformDocument(iv.coll.registry, model.Keys)
+		if err != nil {
+			return nil, err
+		}
+		index := bsonx.Doc{{"key", bsonx.Document(keys)}}
+		if model.Options != nil {
+			optsDoc, err := iv.createOptionsDoc(model.Options)
+			if err != nil {
+				return nil, err
+			}
+
+			index = append(index, optsDoc...)
+		}
+		index = index.Set("name", bsonx.String(name))
+
+		indexes = append(indexes, bsonx.Document(index))
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.CreateIndexes{
+		NS:      iv.coll.namespace(),
+		Indexes: indexes,
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	_, err = driver.CreateIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+	if err != nil {
+		return nil, err
+	}
+
+	return names, nil
+}
+
+func (iv IndexView) createOptionsDoc(opts *options.IndexOptions) (bsonx.Doc, error) {
+	optsDoc := bsonx.Doc{}
+	if opts.Background != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"background", bsonx.Boolean(*opts.Background)})
+	}
+	if opts.ExpireAfterSeconds != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"expireAfterSeconds", bsonx.Int32(*opts.ExpireAfterSeconds)})
+	}
+	if opts.Name != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"name", bsonx.String(*opts.Name)})
+	}
+	if opts.Sparse != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"sparse", bsonx.Boolean(*opts.Sparse)})
+	}
+	if opts.StorageEngine != nil {
+		doc, err := transformDocument(iv.coll.registry, opts.StorageEngine)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"storageEngine", bsonx.Document(doc)})
+	}
+	if opts.Unique != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"unique", bsonx.Boolean(*opts.Unique)})
+	}
+	if opts.Version != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"v", bsonx.Int32(*opts.Version)})
+	}
+	if opts.DefaultLanguage != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"default_language", bsonx.String(*opts.DefaultLanguage)})
+	}
+	if opts.LanguageOverride != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"language_override", bsonx.String(*opts.LanguageOverride)})
+	}
+	if opts.TextVersion != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"textIndexVersion", bsonx.Int32(*opts.TextVersion)})
+	}
+	if opts.Weights != nil {
+		weightsDoc, err := transformDocument(iv.coll.registry, opts.Weights)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"weights", bsonx.Document(weightsDoc)})
+	}
+	if opts.SphereVersion != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"2dsphereIndexVersion", bsonx.Int32(*opts.SphereVersion)})
+	}
+	if opts.Bits != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"bits", bsonx.Int32(*opts.Bits)})
+	}
+	if opts.Max != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"max", bsonx.Double(*opts.Max)})
+	}
+	if opts.Min != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"min", bsonx.Double(*opts.Min)})
+	}
+	if opts.BucketSize != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"bucketSize", bsonx.Int32(*opts.BucketSize)})
+	}
+	if opts.PartialFilterExpression != nil {
+		doc, err := transformDocument(iv.coll.registry, opts.PartialFilterExpression)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, bsonx.Elem{"partialFilterExpression", bsonx.Document(doc)})
+	}
+	if opts.Collation != nil {
+		doc := opts.Collation.ToDocument()
+		optsDoc = append(optsDoc, bsonx.Elem{"collation", bsonx.Document(doc)})
+	}
+
+	return optsDoc, nil
+}
+
+// DropOne drops the index with the given name from the collection.
+func (iv IndexView) DropOne(ctx context.Context, name string, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	if name == "*" {
+		return nil, ErrMultipleIndexDrop
+	}
+
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.DropIndexes{
+		NS:      iv.coll.namespace(),
+		Index:   name,
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	return driver.DropIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+}
+
+// DropAll drops all indexes in the collection.
+func (iv IndexView) DropAll(ctx context.Context, opts ...*options.DropIndexesOptions) (bson.Raw, error) {
+	sess := sessionFromContext(ctx)
+
+	err := iv.coll.client.ValidSession(sess)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd := command.DropIndexes{
+		NS:      iv.coll.namespace(),
+		Index:   "*",
+		Session: sess,
+		Clock:   iv.coll.client.clock,
+	}
+
+	return driver.DropIndexes(
+		ctx, cmd,
+		iv.coll.client.topology,
+		iv.coll.writeSelector,
+		iv.coll.client.id,
+		iv.coll.client.topology.SessionPool,
+		opts...,
+	)
+}
+
+func getOrGenerateIndexName(registry *bsoncodec.Registry, model IndexModel) (string, error) {
+	if model.Options != nil && model.Options.Name != nil {
+		return *model.Options.Name, nil
+	}
+
+	name := bytes.NewBufferString("")
+	first := true
+
+	keys, err := transformDocument(registry, model.Keys)
+	if err != nil {
+		return "", err
+	}
+	for _, elem := range keys {
+		if !first {
+			_, err := name.WriteRune('_')
+			if err != nil {
+				return "", err
+			}
+		}
+
+		_, err := name.WriteString(elem.Key)
+		if err != nil {
+			return "", err
+		}
+
+		_, err = name.WriteRune('_')
+		if err != nil {
+			return "", err
+		}
+
+		var value string
+
+		switch elem.Value.Type() {
+		case bsontype.Int32:
+			value = fmt.Sprintf("%d", elem.Value.Int32())
+		case bsontype.Int64:
+			value = fmt.Sprintf("%d", elem.Value.Int64())
+		case bsontype.String:
+			value = elem.Value.StringValue()
+		default:
+			return "", ErrInvalidIndexValue
+		}
+
+		_, err = name.WriteString(value)
+		if err != nil {
+			return "", err
+		}
+
+		first = false
+	}
+
+	return name.String(), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go
new file mode 100644
index 0000000..9757d9b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/mongo.go
@@ -0,0 +1,242 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net"
+	"reflect"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// BSONAppender is an interface implemented by types that can marshal a
+// provided type into BSON bytes and append those bytes to the provided []byte.
+// The AppendBSON can return a non-nil error and non-nil []byte. The AppendBSON
+// method may also write incomplete BSON to the []byte.
+type BSONAppender interface {
+	AppendBSON([]byte, interface{}) ([]byte, error)
+}
+
+// BSONAppenderFunc is an adapter function that allows any function that
+// satisfies the AppendBSON method signature to be used where a BSONAppender is
+// used.
+type BSONAppenderFunc func([]byte, interface{}) ([]byte, error)
+
+// AppendBSON implements the BSONAppender interface
+func (baf BSONAppenderFunc) AppendBSON(dst []byte, val interface{}) ([]byte, error) {
+	return baf(dst, val)
+}
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a *bsonx.Document", reflect.TypeOf(me.Value))
+}
+
+// Pipeline is a type that makes creating aggregation pipelines easier. It is a
+// helper and is intended for serializing to BSON.
+//
+// Example usage:
+//
+//		mongo.Pipeline{
+//			{{"$group", bson.D{{"_id", "$state"}, {"totalPop", bson.D{{"$sum", "$pop"}}}}}},
+//			{{"$match", bson.D{{"totalPop", bson.D{{"$gte", 10*1000*1000}}}}}},
+//		}
+//
+type Pipeline []bson.D
+
+// transformAndEnsureID is a hack that makes it easy to get a RawValue as the _id value. This will
+// be removed when we switch from using bsonx to bsoncore for the driver package.
+func transformAndEnsureID(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, interface{}, error) {
+	// TODO: performance is going to be pretty bad for bsonx.Doc here since we turn it into a []byte
+	// only to turn it back into a bsonx.Doc. We can fix this post beta1 when we refactor the driver
+	// package to use bsoncore.Document instead of bsonx.Doc.
+	if registry == nil {
+		registry = bson.NewRegistryBuilder().Build()
+	}
+	switch tt := val.(type) {
+	case nil:
+		return nil, nil, ErrNilDocument
+	case bsonx.Doc:
+		val = tt.Copy()
+	case []byte:
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(tt)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf, val)
+	if err != nil {
+		return nil, nil, MarshalError{Value: val, Err: err}
+	}
+
+	d, err := bsonx.ReadDoc(b)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	var id interface{}
+
+	idx := d.IndexOf("_id")
+	var idElem bsonx.Elem
+	switch idx {
+	case -1:
+		idElem = bsonx.Elem{"_id", bsonx.ObjectID(primitive.NewObjectID())}
+		d = append(d, bsonx.Elem{})
+		copy(d[1:], d)
+		d[0] = idElem
+	default:
+		idElem = d[idx]
+		copy(d[1:idx+1], d[0:idx])
+		d[0] = idElem
+	}
+
+	t, data, err := idElem.Value.MarshalAppendBSONValue(buf[:0])
+	if err != nil {
+		return nil, nil, err
+	}
+
+	err = bson.RawValue{Type: t, Value: data}.UnmarshalWithRegistry(registry, &id)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return d, id, nil
+}
+
+func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) {
+	if registry == nil {
+		registry = bson.NewRegistryBuilder().Build()
+	}
+	if val == nil {
+		return nil, ErrNilDocument
+	}
+	if doc, ok := val.(bsonx.Doc); ok {
+		return doc.Copy(), nil
+	}
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf[:0], val)
+	if err != nil {
+		return nil, MarshalError{Value: val, Err: err}
+	}
+	return bsonx.ReadDoc(b)
+}
+
+func ensureID(d bsonx.Doc) (bsonx.Doc, interface{}) {
+	var id interface{}
+
+	elem, err := d.LookupElementErr("_id")
+	switch err.(type) {
+	case nil:
+		id = elem
+	default:
+		oid := primitive.NewObjectID()
+		d = append(d, bsonx.Elem{"_id", bsonx.ObjectID(oid)})
+		id = oid
+	}
+	return d, id
+}
+
+func ensureDollarKey(doc bsonx.Doc) error {
+	if len(doc) == 0 {
+		return errors.New("update document must have at least one element")
+	}
+	if !strings.HasPrefix(doc[0].Key, "$") {
+		return errors.New("update document must contain key beginning with '$'")
+	}
+	return nil
+}
+
+func transformAggregatePipeline(registry *bsoncodec.Registry, pipeline interface{}) (bsonx.Arr, error) {
+	pipelineArr := bsonx.Arr{}
+	switch t := pipeline.(type) {
+	case bsoncodec.ValueMarshaler:
+		btype, val, err := t.MarshalBSONValue()
+		if err != nil {
+			return nil, err
+		}
+		if btype != bsontype.Array {
+			return nil, fmt.Errorf("ValueMarshaler returned a %v, but was expecting %v", btype, bsontype.Array)
+		}
+		err = pipelineArr.UnmarshalBSONValue(btype, val)
+		if err != nil {
+			return nil, err
+		}
+	default:
+		val := reflect.ValueOf(t)
+		if !val.IsValid() || (val.Kind() != reflect.Slice && val.Kind() != reflect.Array) {
+			return nil, fmt.Errorf("can only transform slices and arrays into aggregation pipelines, but got %v", val.Kind())
+		}
+		for idx := 0; idx < val.Len(); idx++ {
+			elem, err := transformDocument(registry, val.Index(idx).Interface())
+			if err != nil {
+				return nil, err
+			}
+			pipelineArr = append(pipelineArr, bsonx.Document(elem))
+		}
+	}
+
+	return pipelineArr, nil
+}
+
+// Build the aggregation pipeline for the CountDocument command.
+func countDocumentsAggregatePipeline(registry *bsoncodec.Registry, filter interface{}, opts *options.CountOptions) (bsonx.Arr, error) {
+	pipeline := bsonx.Arr{}
+	filterDoc, err := transformDocument(registry, filter)
+
+	if err != nil {
+		return nil, err
+	}
+	pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$match", bsonx.Document(filterDoc)}}))
+
+	if opts != nil {
+		if opts.Skip != nil {
+			pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$skip", bsonx.Int64(*opts.Skip)}}))
+		}
+		if opts.Limit != nil {
+			pipeline = append(pipeline, bsonx.Document(bsonx.Doc{{"$limit", bsonx.Int64(*opts.Limit)}}))
+		}
+	}
+
+	pipeline = append(pipeline, bsonx.Document(bsonx.Doc{
+		{"$group", bsonx.Document(bsonx.Doc{
+			{"_id", bsonx.Null()},
+			{"n", bsonx.Document(bsonx.Doc{{"$sum", bsonx.Int32(1)}})},
+		})},
+	},
+	))
+
+	return pipeline, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go
new file mode 100644
index 0000000..3700d84
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/aggregateoptions.go
@@ -0,0 +1,119 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// AggregateOptions represents all possible options to the aggregate() function
+type AggregateOptions struct {
+	AllowDiskUse             *bool          // Enables writing to temporary files. When set to true, aggregation stages can write data to the _tmp subdirectory in the dbPath directory
+	BatchSize                *int32         // The number of documents to return per batch
+	BypassDocumentValidation *bool          // If true, allows the write to opt-out of document level validation. This only applies when the $out stage is specified
+	Collation                *Collation     // Specifies a collation
+	MaxTime                  *time.Duration // The maximum amount of time to allow the query to run
+	MaxAwaitTime             *time.Duration // The maximum amount of time for the server to wait on new documents to satisfy a tailable cursor query
+	Comment                  *string        // Enables users to specify an arbitrary string to help trace the operation through the database profiler, currentOp and logs.
+	Hint                     interface{}    // The index to use for the aggregation. The hint does not apply to $lookup and $graphLookup stages
+}
+
+// Aggregate returns a pointer to a new AggregateOptions
+func Aggregate() *AggregateOptions {
+	return &AggregateOptions{}
+}
+
+// SetAllowDiskUse enables writing to temporary files. When set to true,
+// aggregation stages can write data to the _tmp subdirectory in the
+// dbPath directory
+func (ao *AggregateOptions) SetAllowDiskUse(b bool) *AggregateOptions {
+	ao.AllowDiskUse = &b
+	return ao
+}
+
+// SetBatchSize specifies the number of documents to return per batch
+func (ao *AggregateOptions) SetBatchSize(i int32) *AggregateOptions {
+	ao.BatchSize = &i
+	return ao
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level
+// validation. This only applies when the $out stage is specified
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ao *AggregateOptions) SetBypassDocumentValidation(b bool) *AggregateOptions {
+	ao.BypassDocumentValidation = &b
+	return ao
+}
+
+// SetCollation specifies a collation.
+// Valid for server versions >= 3.4
+func (ao *AggregateOptions) SetCollation(c *Collation) *AggregateOptions {
+	ao.Collation = c
+	return ao
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run
+func (ao *AggregateOptions) SetMaxTime(d time.Duration) *AggregateOptions {
+	ao.MaxTime = &d
+	return ao
+}
+
+// SetMaxAwaitTime specifies the maximum amount of time for the server to
+// wait on new documents to satisfy a tailable cursor query
+// For servers < 3.2, this option is ignored
+func (ao *AggregateOptions) SetMaxAwaitTime(d time.Duration) *AggregateOptions {
+	ao.MaxAwaitTime = &d
+	return ao
+}
+
+// SetComment enables users to specify an arbitrary string to help trace the
+// operation through the database profiler, currentOp and logs.
+func (ao *AggregateOptions) SetComment(s string) *AggregateOptions {
+	ao.Comment = &s
+	return ao
+}
+
+// SetHint specifies the index to use for the aggregation. The hint does not
+// apply to $lookup and $graphLookup stages
+func (ao *AggregateOptions) SetHint(h interface{}) *AggregateOptions {
+	ao.Hint = h
+	return ao
+}
+
+// MergeAggregateOptions combines the argued AggregateOptions into a single AggregateOptions in a last-one-wins fashion
+func MergeAggregateOptions(opts ...*AggregateOptions) *AggregateOptions {
+	aggOpts := Aggregate()
+	for _, ao := range opts {
+		if ao == nil {
+			continue
+		}
+		if ao.AllowDiskUse != nil {
+			aggOpts.AllowDiskUse = ao.AllowDiskUse
+		}
+		if ao.BatchSize != nil {
+			aggOpts.BatchSize = ao.BatchSize
+		}
+		if ao.BypassDocumentValidation != nil {
+			aggOpts.BypassDocumentValidation = ao.BypassDocumentValidation
+		}
+		if ao.Collation != nil {
+			aggOpts.Collation = ao.Collation
+		}
+		if ao.MaxTime != nil {
+			aggOpts.MaxTime = ao.MaxTime
+		}
+		if ao.MaxAwaitTime != nil {
+			aggOpts.MaxAwaitTime = ao.MaxAwaitTime
+		}
+		if ao.Comment != nil {
+			aggOpts.Comment = ao.Comment
+		}
+		if ao.Hint != nil {
+			aggOpts.Hint = ao.Hint
+		}
+	}
+
+	return aggOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go
new file mode 100644
index 0000000..86282fa
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/bulkwriteoptions.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DefaultOrdered is the default order for a BulkWriteOptions struct created from BulkWrite.
+var DefaultOrdered = true
+
+// BulkWriteOptions represent all possible options for a bulkWrite operation.
+type BulkWriteOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt out of document-level validation.
+	Ordered                  *bool // If true, when a write fails, return without performing remaining writes. Defaults to true.
+}
+
+// BulkWrite creates a new *BulkWriteOptions
+func BulkWrite() *BulkWriteOptions {
+	return &BulkWriteOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
+// remaining writes. Defaults to true.
+func (b *BulkWriteOptions) SetOrdered(ordered bool) *BulkWriteOptions {
+	b.Ordered = &ordered
+	return b
+}
+
+// SetBypassDocumentValidation specifies if the write should opt out of document-level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (b *BulkWriteOptions) SetBypassDocumentValidation(bypass bool) *BulkWriteOptions {
+	b.BypassDocumentValidation = &bypass
+	return b
+}
+
+// MergeBulkWriteOptions combines the given *BulkWriteOptions into a single *BulkWriteOptions in a last one wins fashion.
+func MergeBulkWriteOptions(opts ...*BulkWriteOptions) *BulkWriteOptions {
+	b := BulkWrite()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Ordered != nil {
+			b.Ordered = opt.Ordered
+		}
+		if opt.BypassDocumentValidation != nil {
+			b.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+	}
+
+	return b
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go
new file mode 100644
index 0000000..c8776ac
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/changestreamoptions.go
@@ -0,0 +1,97 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"time"
+)
+
+// ChangeStreamOptions represents all possible options to a change stream
+type ChangeStreamOptions struct {
+	BatchSize            *int32               // The number of documents to return per batch
+	Collation            *Collation           // Specifies a collation
+	FullDocument         *FullDocument        // When set to ‘updateLookup’, the change notification for partial updates will include both a delta describing the changes to the document, as well as a copy of the entire document that was changed from some time after the change occurred.
+	MaxAwaitTime         *time.Duration       // The maximum amount of time for the server to wait on new documents to satisfy a change stream query
+	ResumeAfter          interface{}          // Specifies the logical starting point for the new change stream
+	StartAtOperationTime *primitive.Timestamp // Ensures that a change stream will only provide changes that occurred after a timestamp.
+}
+
+// ChangeStream returns a pointer to a new ChangeStreamOptions
+func ChangeStream() *ChangeStreamOptions {
+	return &ChangeStreamOptions{}
+}
+
+// SetBatchSize specifies the number of documents to return per batch
+func (cso *ChangeStreamOptions) SetBatchSize(i int32) *ChangeStreamOptions {
+	cso.BatchSize = &i
+	return cso
+}
+
+// SetCollation specifies a collation
+func (cso *ChangeStreamOptions) SetCollation(c Collation) *ChangeStreamOptions {
+	cso.Collation = &c
+	return cso
+}
+
+// SetFullDocument specifies the fullDocument option.
+// When set to ‘updateLookup’, the change notification for partial updates will
+// include both a delta describing the changes to the document, as well as a
+// copy of the entire document that was changed from some time after the change
+// occurred.
+func (cso *ChangeStreamOptions) SetFullDocument(fd FullDocument) *ChangeStreamOptions {
+	cso.FullDocument = &fd
+	return cso
+}
+
+// SetMaxAwaitTime specifies the maximum amount of time for the server to wait on new documents to satisfy a change stream query
+func (cso *ChangeStreamOptions) SetMaxAwaitTime(d time.Duration) *ChangeStreamOptions {
+	cso.MaxAwaitTime = &d
+	return cso
+}
+
+// SetResumeAfter specifies the logical starting point for the new change stream
+func (cso *ChangeStreamOptions) SetResumeAfter(rt interface{}) *ChangeStreamOptions {
+	cso.ResumeAfter = rt
+	return cso
+}
+
+// SetStartAtOperationTime ensures that a change stream will only provide changes that occurred after a specified timestamp.
+func (cso *ChangeStreamOptions) SetStartAtOperationTime(t *primitive.Timestamp) *ChangeStreamOptions {
+	cso.StartAtOperationTime = t
+	return cso
+}
+
+// MergeChangeStreamOptions combines the argued ChangeStreamOptions into a single ChangeStreamOptions in a last-one-wins fashion
+func MergeChangeStreamOptions(opts ...*ChangeStreamOptions) *ChangeStreamOptions {
+	csOpts := ChangeStream()
+	for _, cso := range opts {
+		if cso == nil {
+			continue
+		}
+		if cso.BatchSize != nil {
+			csOpts.BatchSize = cso.BatchSize
+		}
+		if cso.Collation != nil {
+			csOpts.Collation = cso.Collation
+		}
+		if cso.FullDocument != nil {
+			csOpts.FullDocument = cso.FullDocument
+		}
+		if cso.MaxAwaitTime != nil {
+			csOpts.MaxAwaitTime = cso.MaxAwaitTime
+		}
+		if cso.ResumeAfter != nil {
+			csOpts.ResumeAfter = cso.ResumeAfter
+		}
+		if cso.StartAtOperationTime != nil {
+			csOpts.StartAtOperationTime = cso.StartAtOperationTime
+		}
+	}
+
+	return csOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go
new file mode 100644
index 0000000..e09a9c1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/clientoptions.go
@@ -0,0 +1,424 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"context"
+	"net"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+)
+
+// ContextDialer makes new network connections
+type ContextDialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// SSLOpt holds client SSL options.
+//
+// Enabled indicates whether SSL should be enabled.
+//
+// ClientCertificateKeyFile specifies the file containing the client certificate and private key
+// used for authentication.
+//
+// ClientCertificateKeyPassword provides a callback that returns a password used for decrypting the
+// private key of a PEM file (if one is provided).
+//
+// Insecure indicates whether to skip the verification of the server certificate and hostname.
+//
+// CaFile specifies the file containing the certificate authority used for SSL connections.
+type SSLOpt struct {
+	Enabled                      bool
+	ClientCertificateKeyFile     string
+	ClientCertificateKeyPassword func() string
+	Insecure                     bool
+	CaFile                       string
+}
+
+// Credential holds auth options.
+//
+// AuthMechanism indicates the mechanism to use for authentication.
+// Supported values include "SCRAM-SHA-256", "SCRAM-SHA-1", "MONGODB-CR", "PLAIN", "GSSAPI", and "MONGODB-X509".
+//
+// AuthMechanismProperties specifies additional configuration options which may be used by certain
+// authentication mechanisms.
+//
+// AuthSource specifies the database to authenticate against.
+//
+// Username specifies the username that will be authenticated.
+//
+// Password specifies the password used for authentication.
+type Credential struct {
+	AuthMechanism           string
+	AuthMechanismProperties map[string]string
+	AuthSource              string
+	Username                string
+	Password                string
+}
+
+// ClientOptions represents all possbile options to configure a client.
+type ClientOptions struct {
+	TopologyOptions []topology.Option
+	ConnString      connstring.ConnString
+	RetryWrites     *bool
+	ReadPreference  *readpref.ReadPref
+	ReadConcern     *readconcern.ReadConcern
+	WriteConcern    *writeconcern.WriteConcern
+	Registry        *bsoncodec.Registry
+}
+
+// Client creates a new ClientOptions instance.
+func Client() *ClientOptions {
+	return &ClientOptions{
+		TopologyOptions: make([]topology.Option, 0),
+	}
+}
+
+// SetAppName specifies the client application name. This value is used by MongoDB when it logs
+// connection information and profile information, such as slow queries.
+func (c *ClientOptions) SetAppName(s string) *ClientOptions {
+	c.ConnString.AppName = s
+
+	return c
+}
+
+// SetAuth sets the authentication options.
+func (c *ClientOptions) SetAuth(auth Credential) *ClientOptions {
+	c.ConnString.AuthMechanism = auth.AuthMechanism
+	c.ConnString.AuthMechanismProperties = auth.AuthMechanismProperties
+	c.ConnString.AuthSource = auth.AuthSource
+	c.ConnString.Username = auth.Username
+	c.ConnString.Password = auth.Password
+
+	return c
+}
+
+// SetConnectTimeout specifies the timeout for an initial connection to a server.
+// If a custom Dialer is used, this method won't be set and the user is
+// responsible for setting the ConnectTimeout for connections on the dialer
+// themselves.
+func (c *ClientOptions) SetConnectTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.ConnectTimeout = d
+	c.ConnString.ConnectTimeoutSet = true
+
+	return c
+}
+
+// SetDialer specifies a custom dialer used to dial new connections to a server.
+// If a custom dialer is not set, a net.Dialer with a 300 second keepalive time will be used by default.
+func (c *ClientOptions) SetDialer(d ContextDialer) *ClientOptions {
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+					return append(
+						opts,
+						connection.WithDialer(func(connection.Dialer) connection.Dialer {
+							return d
+						}),
+					)
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetMonitor specifies a command monitor used to see commands for a client.
+func (c *ClientOptions) SetMonitor(m *event.CommandMonitor) *ClientOptions {
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+					return append(
+						opts,
+						connection.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
+							return m
+						}),
+					)
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetHeartbeatInterval specifies the interval to wait between server monitoring checks.
+func (c *ClientOptions) SetHeartbeatInterval(d time.Duration) *ClientOptions {
+	c.ConnString.HeartbeatInterval = d
+	c.ConnString.HeartbeatIntervalSet = true
+
+	return c
+}
+
+// SetHosts specifies the initial list of addresses from which to discover the rest of the cluster.
+func (c *ClientOptions) SetHosts(s []string) *ClientOptions {
+	c.ConnString.Hosts = s
+
+	return c
+}
+
+// SetLocalThreshold specifies how far to distribute queries, beyond the server with the fastest
+// round-trip time. If a server's roundtrip time is more than LocalThreshold slower than the
+// the fastest, the driver will not send queries to that server.
+func (c *ClientOptions) SetLocalThreshold(d time.Duration) *ClientOptions {
+	c.ConnString.LocalThreshold = d
+	c.ConnString.LocalThresholdSet = true
+
+	return c
+}
+
+// SetMaxConnIdleTime specifies the maximum number of milliseconds that a connection can remain idle
+// in a connection pool before being removed and closed.
+func (c *ClientOptions) SetMaxConnIdleTime(d time.Duration) *ClientOptions {
+	c.ConnString.MaxConnIdleTime = d
+	c.ConnString.MaxConnIdleTimeSet = true
+
+	return c
+}
+
+// SetMaxPoolSize specifies the max size of a server's connection pool.
+func (c *ClientOptions) SetMaxPoolSize(u uint16) *ClientOptions {
+	c.ConnString.MaxPoolSize = u
+	c.ConnString.MaxPoolSizeSet = true
+
+	return c
+}
+
+// SetReadConcern specifies the read concern.
+func (c *ClientOptions) SetReadConcern(rc *readconcern.ReadConcern) *ClientOptions {
+	c.ReadConcern = rc
+
+	return c
+}
+
+// SetReadPreference specifies the read preference.
+func (c *ClientOptions) SetReadPreference(rp *readpref.ReadPref) *ClientOptions {
+	c.ReadPreference = rp
+
+	return c
+}
+
+// SetRegistry specifies the bsoncodec.Registry.
+func (c *ClientOptions) SetRegistry(registry *bsoncodec.Registry) *ClientOptions {
+	c.Registry = registry
+
+	// add registry to the server options so that it will be used for the cursors built by this client
+	c.TopologyOptions = append(
+		c.TopologyOptions,
+		topology.WithServerOptions(func(opts ...topology.ServerOption) []topology.ServerOption {
+			return append(
+				opts,
+				topology.WithRegistry(func(*bsoncodec.Registry) *bsoncodec.Registry {
+					return registry
+				}),
+			)
+		}),
+	)
+
+	return c
+}
+
+// SetReplicaSet specifies the name of the replica set of the cluster.
+func (c *ClientOptions) SetReplicaSet(s string) *ClientOptions {
+	c.ConnString.ReplicaSet = s
+
+	return c
+}
+
+// SetRetryWrites specifies whether the client has retryable writes enabled.
+func (c *ClientOptions) SetRetryWrites(b bool) *ClientOptions {
+	c.RetryWrites = &b
+
+	return c
+}
+
+// SetServerSelectionTimeout specifies a timeout in milliseconds to block for server selection.
+func (c *ClientOptions) SetServerSelectionTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.ServerSelectionTimeout = d
+	c.ConnString.ServerSelectionTimeoutSet = true
+
+	return c
+}
+
+// SetSingle specifies whether the driver should connect directly to the server instead of
+// auto-discovering other servers in the cluster.
+func (c *ClientOptions) SetSingle(b bool) *ClientOptions {
+	if b {
+		c.ConnString.Connect = connstring.SingleConnect
+	} else {
+		c.ConnString.Connect = connstring.AutoConnect
+	}
+	c.ConnString.ConnectSet = true
+
+	return c
+}
+
+// SetSocketTimeout specifies the time in milliseconds to attempt to send or receive on a socket
+// before the attempt times out.
+func (c *ClientOptions) SetSocketTimeout(d time.Duration) *ClientOptions {
+	c.ConnString.SocketTimeout = d
+	c.ConnString.SocketTimeoutSet = true
+
+	return c
+}
+
+// SetSSL sets SSL options.
+func (c *ClientOptions) SetSSL(ssl *SSLOpt) *ClientOptions {
+	c.ConnString.SSL = ssl.Enabled
+	c.ConnString.SSLSet = true
+
+	if ssl.ClientCertificateKeyFile != "" {
+		c.ConnString.SSLClientCertificateKeyFile = ssl.ClientCertificateKeyFile
+		c.ConnString.SSLClientCertificateKeyFileSet = true
+	}
+
+	if ssl.ClientCertificateKeyPassword != nil {
+		c.ConnString.SSLClientCertificateKeyPassword = ssl.ClientCertificateKeyPassword
+		c.ConnString.SSLClientCertificateKeyPasswordSet = true
+	}
+
+	c.ConnString.SSLInsecure = ssl.Insecure
+	c.ConnString.SSLInsecureSet = true
+
+	if ssl.CaFile != "" {
+		c.ConnString.SSLCaFile = ssl.CaFile
+		c.ConnString.SSLCaFileSet = true
+	}
+
+	return c
+}
+
+// SetWriteConcern sets the write concern.
+func (c *ClientOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *ClientOptions {
+	c.WriteConcern = wc
+
+	return c
+}
+
+// MergeClientOptions combines the given connstring and *ClientOptions into a single *ClientOptions in a last one wins
+// fashion. The given connstring will be used for the default options, which can be overwritten using the given
+// *ClientOptions.
+func MergeClientOptions(cs connstring.ConnString, opts ...*ClientOptions) *ClientOptions {
+	c := Client()
+	c.ConnString = cs
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		c.TopologyOptions = append(c.TopologyOptions, opt.TopologyOptions...)
+
+		if an := opt.ConnString.AppName; an != "" {
+			c.ConnString.AppName = an
+		}
+		if am := opt.ConnString.AuthMechanism; len(am) != 0 {
+			c.ConnString.AuthMechanism = am
+		}
+		if amp := opt.ConnString.AuthMechanismProperties; amp != nil {
+			c.ConnString.AuthMechanismProperties = amp
+		}
+		if as := opt.ConnString.AuthSource; len(as) != 0 {
+			c.ConnString.AuthSource = as
+		}
+		if u := opt.ConnString.Username; len(u) != 0 {
+			c.ConnString.Username = u
+		}
+		if p := opt.ConnString.Password; len(p) != 0 {
+			c.ConnString.Password = p
+		}
+		if opt.ConnString.ConnectTimeoutSet {
+			c.ConnString.ConnectTimeoutSet = true
+			c.ConnString.ConnectTimeout = opt.ConnString.ConnectTimeout
+		}
+		if opt.ConnString.HeartbeatIntervalSet {
+			c.ConnString.HeartbeatIntervalSet = true
+			c.ConnString.HeartbeatInterval = opt.ConnString.HeartbeatInterval
+		}
+		if h := opt.ConnString.Hosts; h != nil {
+			c.ConnString.Hosts = h
+		}
+		if opt.ConnString.LocalThresholdSet {
+			c.ConnString.LocalThresholdSet = true
+			c.ConnString.LocalThreshold = opt.ConnString.LocalThreshold
+		}
+		if opt.ConnString.MaxConnIdleTimeSet {
+			c.ConnString.MaxConnIdleTimeSet = true
+			c.ConnString.MaxConnIdleTime = opt.ConnString.MaxConnIdleTime
+		}
+		if opt.ConnString.MaxPoolSizeSet {
+			c.ConnString.MaxPoolSizeSet = true
+			c.ConnString.MaxPoolSize = opt.ConnString.MaxPoolSize
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+		if rs := opt.ConnString.ReplicaSet; rs != "" {
+			c.ConnString.ReplicaSet = rs
+		}
+		if opt.RetryWrites != nil {
+			c.RetryWrites = opt.RetryWrites
+		}
+		if opt.ConnString.ServerSelectionTimeoutSet {
+			c.ConnString.ServerSelectionTimeoutSet = true
+			c.ConnString.ServerSelectionTimeout = opt.ConnString.ServerSelectionTimeout
+		}
+		if opt.ConnString.ConnectSet {
+			c.ConnString.ConnectSet = true
+			c.ConnString.Connect = opt.ConnString.Connect
+		}
+		if opt.ConnString.SocketTimeoutSet {
+			c.ConnString.SocketTimeoutSet = true
+			c.ConnString.SocketTimeout = opt.ConnString.SocketTimeout
+		}
+		if opt.ConnString.SSLSet {
+			c.ConnString.SSLSet = true
+			c.ConnString.SSL = opt.ConnString.SSL
+		}
+		if opt.ConnString.SSLClientCertificateKeyFileSet {
+			c.ConnString.SSLClientCertificateKeyFileSet = true
+			c.ConnString.SSLClientCertificateKeyFile = opt.ConnString.SSLClientCertificateKeyFile
+		}
+		if opt.ConnString.SSLClientCertificateKeyPasswordSet {
+			c.ConnString.SSLClientCertificateKeyPasswordSet = true
+			c.ConnString.SSLClientCertificateKeyPassword = opt.ConnString.SSLClientCertificateKeyPassword
+		}
+		if opt.ConnString.SSLInsecureSet {
+			c.ConnString.SSLInsecureSet = true
+			c.ConnString.SSLInsecure = opt.ConnString.SSLInsecure
+		}
+		if opt.ConnString.SSLCaFileSet {
+			c.ConnString.SSLCaFileSet = true
+			c.ConnString.SSLCaFile = opt.ConnString.SSLCaFile
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go
new file mode 100644
index 0000000..3415505
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/collectionoptions.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// CollectionOptions represent all possible options to configure a Collection.
+type CollectionOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for operations in the collection.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for operations in the collection.
+	ReadPreference *readpref.ReadPref         // The read preference for operations in the collection.
+	Registry       *bsoncodec.Registry        // The registry to be used to construct BSON encoders and decoders for the collection.
+}
+
+// Collection creates a new CollectionOptions instance
+func Collection() *CollectionOptions {
+	return &CollectionOptions{}
+}
+
+// SetReadConcern sets the read concern for the collection.
+func (c *CollectionOptions) SetReadConcern(rc *readconcern.ReadConcern) *CollectionOptions {
+	c.ReadConcern = rc
+	return c
+}
+
+// SetWriteConcern sets the write concern for the collection.
+func (c *CollectionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *CollectionOptions {
+	c.WriteConcern = wc
+	return c
+}
+
+// SetReadPreference sets the read preference for the collection.
+func (c *CollectionOptions) SetReadPreference(rp *readpref.ReadPref) *CollectionOptions {
+	c.ReadPreference = rp
+	return c
+}
+
+// SetRegistry sets the bsoncodec Registry for the collection.
+func (c *CollectionOptions) SetRegistry(r *bsoncodec.Registry) *CollectionOptions {
+	c.Registry = r
+	return c
+}
+
+// MergeCollectionOptions combines the *CollectionOptions arguments into a single *CollectionOptions in a last one wins
+// fashion.
+func MergeCollectionOptions(opts ...*CollectionOptions) *CollectionOptions {
+	c := Collection()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			c.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			c.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			c.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			c.Registry = opt.Registry
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go
new file mode 100644
index 0000000..be3baab
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/countoptions.go
@@ -0,0 +1,81 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// CountOptions represents all possible options to the count() function
+type CountOptions struct {
+	Collation *Collation     // Specifies a collation
+	Hint      interface{}    // The index to use
+	Limit     *int64         // The maximum number of documents to count
+	MaxTime   *time.Duration // The maximum amount of time to allow the operation to run
+	Skip      *int64         // The number of documents to skip before counting
+}
+
+// Count returns a pointer to a new CountOptions
+func Count() *CountOptions {
+	return &CountOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for server versions >= 3.4
+func (co *CountOptions) SetCollation(c *Collation) *CountOptions {
+	co.Collation = c
+	return co
+}
+
+// SetHint specifies the index to use
+func (co *CountOptions) SetHint(h interface{}) *CountOptions {
+	co.Hint = h
+	return co
+}
+
+// SetLimit specifies the maximum number of documents to count
+func (co *CountOptions) SetLimit(i int64) *CountOptions {
+	co.Limit = &i
+	return co
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (co *CountOptions) SetMaxTime(d time.Duration) *CountOptions {
+	co.MaxTime = &d
+	return co
+}
+
+// SetSkip specifies the number of documents to skip before counting
+func (co *CountOptions) SetSkip(i int64) *CountOptions {
+	co.Skip = &i
+	return co
+}
+
+// MergeCountOptions combines the argued CountOptions into a single CountOptions in a last-one-wins fashion
+func MergeCountOptions(opts ...*CountOptions) *CountOptions {
+	countOpts := Count()
+	for _, co := range opts {
+		if co == nil {
+			continue
+		}
+		if co.Collation != nil {
+			countOpts.Collation = co.Collation
+		}
+		if co.Hint != nil {
+			countOpts.Hint = co.Hint
+		}
+		if co.Limit != nil {
+			countOpts.Limit = co.Limit
+		}
+		if co.MaxTime != nil {
+			countOpts.MaxTime = co.MaxTime
+		}
+		if co.Skip != nil {
+			countOpts.Skip = co.Skip
+		}
+	}
+
+	return countOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go
new file mode 100644
index 0000000..989cb13
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/dboptions.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// DatabaseOptions represent all possible options to configure a Database.
+type DatabaseOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for operations in the database.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for operations in the database.
+	ReadPreference *readpref.ReadPref         // The read preference for operations in the database.
+	Registry       *bsoncodec.Registry        // The registry to be used to construct BSON encoders and decoders for the database.
+}
+
+// Database creates a new DatabaseOptions instance
+func Database() *DatabaseOptions {
+	return &DatabaseOptions{}
+}
+
+// SetReadConcern sets the read concern for the database.
+func (d *DatabaseOptions) SetReadConcern(rc *readconcern.ReadConcern) *DatabaseOptions {
+	d.ReadConcern = rc
+	return d
+}
+
+// SetWriteConcern sets the write concern for the database.
+func (d *DatabaseOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *DatabaseOptions {
+	d.WriteConcern = wc
+	return d
+}
+
+// SetReadPreference sets the read preference for the database.
+func (d *DatabaseOptions) SetReadPreference(rp *readpref.ReadPref) *DatabaseOptions {
+	d.ReadPreference = rp
+	return d
+}
+
+// SetRegistry sets the bsoncodec Registry for the database.
+func (d *DatabaseOptions) SetRegistry(r *bsoncodec.Registry) *DatabaseOptions {
+	d.Registry = r
+	return d
+}
+
+// MergeDatabaseOptions combines the *DatabaseOptions arguments into a single *DatabaseOptions in a last one wins
+// fashion.
+func MergeDatabaseOptions(opts ...*DatabaseOptions) *DatabaseOptions {
+	d := Database()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			d.ReadConcern = opt.ReadConcern
+		}
+		if opt.WriteConcern != nil {
+			d.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadPreference != nil {
+			d.ReadPreference = opt.ReadPreference
+		}
+		if opt.Registry != nil {
+			d.Registry = opt.Registry
+		}
+	}
+
+	return d
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go
new file mode 100644
index 0000000..919d6b8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/deleteoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// DeleteOptions represents all possible options to the deleteOne() and deleteMany() functions
+type DeleteOptions struct {
+	Collation *Collation // Specifies a collation
+}
+
+// Delete returns a pointer to a new DeleteOptions
+func Delete() *DeleteOptions {
+	return &DeleteOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for servers >= 3.4.
+func (do *DeleteOptions) SetCollation(c *Collation) *DeleteOptions {
+	do.Collation = c
+	return do
+}
+
+// MergeDeleteOptions combines the argued DeleteOptions into a single DeleteOptions in a last-one-wins fashion
+func MergeDeleteOptions(opts ...*DeleteOptions) *DeleteOptions {
+	dOpts := Delete()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			dOpts.Collation = do.Collation
+		}
+	}
+
+	return dOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go
new file mode 100644
index 0000000..3b3f588
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/distinctoptions.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// DistinctOptions represents all possible options to the distinct() function
+type DistinctOptions struct {
+	Collation *Collation     // Specifies a collation
+	MaxTime   *time.Duration // The maximum amount of time to allow the operation to run
+}
+
+// Distinct returns a pointer to a new DistinctOptions
+func Distinct() *DistinctOptions {
+	return &DistinctOptions{}
+}
+
+// SetCollation specifies a collation
+// Valid for server versions >= 3.4
+func (do *DistinctOptions) SetCollation(c *Collation) *DistinctOptions {
+	do.Collation = c
+	return do
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (do *DistinctOptions) SetMaxTime(d time.Duration) *DistinctOptions {
+	do.MaxTime = &d
+	return do
+}
+
+// MergeDistinctOptions combines the argued DistinctOptions into a single DistinctOptions in a last-one-wins fashion
+func MergeDistinctOptions(opts ...*DistinctOptions) *DistinctOptions {
+	distinctOpts := Distinct()
+	for _, do := range opts {
+		if do == nil {
+			continue
+		}
+		if do.Collation != nil {
+			distinctOpts.Collation = do.Collation
+		}
+		if do.MaxTime != nil {
+			distinctOpts.MaxTime = do.MaxTime
+		}
+	}
+
+	return distinctOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go
new file mode 100644
index 0000000..f43bb9f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/estimatedcountoptions.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "time"
+
+// EstimatedDocumentCountOptions represents all possible options to the estimatedDocumentCount() function
+type EstimatedDocumentCountOptions struct {
+	MaxTime *time.Duration // The maximum amount of time to allow the operation to run
+}
+
+// EstimatedDocumentCount returns a pointer to a new EstimatedDocumentCountOptions
+func EstimatedDocumentCount() *EstimatedDocumentCountOptions {
+	return &EstimatedDocumentCountOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the operation to run
+func (eco *EstimatedDocumentCountOptions) SetMaxTime(d time.Duration) *EstimatedDocumentCountOptions {
+	eco.MaxTime = &d
+	return eco
+}
+
+// MergeEstimatedDocumentCountOptions combines the given *EstimatedDocumentCountOptions into a single
+// *EstimatedDocumentCountOptions in a last one wins fashion.
+func MergeEstimatedDocumentCountOptions(opts ...*EstimatedDocumentCountOptions) *EstimatedDocumentCountOptions {
+	e := EstimatedDocumentCount()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+
+		if opt.MaxTime != nil {
+			e.MaxTime = opt.MaxTime
+		}
+	}
+
+	return e
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go
new file mode 100644
index 0000000..4ba6133
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/findoptions.go
@@ -0,0 +1,693 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// FindOptions represent all possible options to the find() function.
+type FindOptions struct {
+	AllowPartialResults *bool          // If true, allows partial results to be returned if some shards are down.
+	BatchSize           *int32         // Specifies the number of documents to return in every batch.
+	Collation           *Collation     // Specifies a collation to be used
+	Comment             *string        // Specifies a string to help trace the operation through the database.
+	CursorType          *CursorType    // Specifies the type of cursor to use
+	Hint                interface{}    // Specifies the index to use.
+	Limit               *int64         // Sets a limit on the number of results to return.
+	Max                 interface{}    // Sets an exclusive upper bound for a specific index
+	MaxAwaitTime        *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
+	MaxTime             *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Min                 interface{}    // Specifies the inclusive lower bound for a specific index.
+	NoCursorTimeout     *bool          // If true, prevents cursors from timing out after an inactivity period.
+	OplogReplay         *bool          // Adds an option for internal use only and should not be set.
+	Projection          interface{}    // Limits the fields returned for all documents.
+	ReturnKey           *bool          // If true, only returns index keys for all result documents.
+	ShowRecordID        *bool          // If true, a $recordId field with the record identifier will be added to the returned documents.
+	Skip                *int64         // Specifies the number of documents to skip before returning
+	Snapshot            *bool          // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
+	Sort                interface{}    // Specifies the order in which to return results.
+}
+
+// Find creates a new FindOptions instance.
+func Find() *FindOptions {
+	return &FindOptions{}
+}
+
+// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetAllowPartialResults(b bool) *FindOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *FindOptions) SetBatchSize(i int32) *FindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+// Valid for server versions >= 3.4
+func (f *FindOptions) SetCollation(collation *Collation) *FindOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment specifies a string to help trace the operation through the database.
+func (f *FindOptions) SetComment(comment string) *FindOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType specifes the type of cursor to use.
+func (f *FindOptions) SetCursorType(ct CursorType) *FindOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint specifies the index to use.
+func (f *FindOptions) SetHint(hint interface{}) *FindOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetLimit specifies a limit on the number of results.
+// A negative limit implies that only 1 batch should be returned.
+func (f *FindOptions) SetLimit(i int64) *FindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMax specifies an exclusive upper bound for a specific index.
+func (f *FindOptions) SetMax(max interface{}) *FindOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
+// If the cursor type is not TailableAwait, this option is ignored.
+// For server versions < 3.2, this option is ignored.
+func (f *FindOptions) SetMaxAwaitTime(d time.Duration) *FindOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOptions) SetMaxTime(d time.Duration) *FindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin specifies the inclusive lower bound for a specific index.
+func (f *FindOptions) SetMin(min interface{}) *FindOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetNoCursorTimeout(b bool) *FindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay adds an option for internal use only and should not be set.
+// For server versions < 3.2, this defaults to false.
+func (f *FindOptions) SetOplogReplay(b bool) *FindOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOptions) SetProjection(projection interface{}) *FindOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey adds an option to only return index keys for all result documents.
+func (f *FindOptions) SetReturnKey(b bool) *FindOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
+// If true, a $recordId field will be added to each returned document.
+func (f *FindOptions) SetShowRecordID(b bool) *FindOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+// For server versions < 3.2, this defaults to 0.
+func (f *FindOptions) SetSkip(i int64) *FindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
+func (f *FindOptions) SetSnapshot(b bool) *FindOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOptions) SetSort(sort interface{}) *FindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOptions combines the argued FindOptions into a single FindOptions in a last-one-wins fashion
+func MergeFindOptions(opts ...*FindOptions) *FindOptions {
+	fo := Find()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneOptions represent all possible options to the findOne() function.
+type FindOneOptions struct {
+	AllowPartialResults *bool          // If true, allows partial results to be returned if some shards are down.
+	BatchSize           *int32         // Specifies the number of documents to return in every batch.
+	Collation           *Collation     // Specifies a collation to be used
+	Comment             *string        // Specifies a string to help trace the operation through the database.
+	CursorType          *CursorType    // Specifies the type of cursor to use
+	Hint                interface{}    // Specifies the index to use.
+	Max                 interface{}    // Sets an exclusive upper bound for a specific index
+	MaxAwaitTime        *time.Duration // Specifies the maximum amount of time for the server to wait on new documents.
+	MaxTime             *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Min                 interface{}    // Specifies the inclusive lower bound for a specific index.
+	NoCursorTimeout     *bool          // If true, prevents cursors from timing out after an inactivity period.
+	OplogReplay         *bool          // Adds an option for internal use only and should not be set.
+	Projection          interface{}    // Limits the fields returned for all documents.
+	ReturnKey           *bool          // If true, only returns index keys for all result documents.
+	ShowRecordID        *bool          // If true, a $recordId field with the record identifier will be added to the returned documents.
+	Skip                *int64         // Specifies the number of documents to skip before returning
+	Snapshot            *bool          // If true, prevents the cursor from returning a document more than once because of an intervening write operation.
+	Sort                interface{}    // Specifies the order in which to return results.
+}
+
+// FindOne creates a new FindOneOptions instance.
+func FindOne() *FindOneOptions {
+	return &FindOneOptions{}
+}
+
+// SetAllowPartialResults sets whether partial results can be returned if some shards are down.
+func (f *FindOneOptions) SetAllowPartialResults(b bool) *FindOneOptions {
+	f.AllowPartialResults = &b
+	return f
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *FindOneOptions) SetBatchSize(i int32) *FindOneOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneOptions) SetCollation(collation *Collation) *FindOneOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetComment specifies a string to help trace the operation through the database.
+func (f *FindOneOptions) SetComment(comment string) *FindOneOptions {
+	f.Comment = &comment
+	return f
+}
+
+// SetCursorType specifes the type of cursor to use.
+func (f *FindOneOptions) SetCursorType(ct CursorType) *FindOneOptions {
+	f.CursorType = &ct
+	return f
+}
+
+// SetHint specifies the index to use.
+func (f *FindOneOptions) SetHint(hint interface{}) *FindOneOptions {
+	f.Hint = hint
+	return f
+}
+
+// SetMax specifies an exclusive upper bound for a specific index.
+func (f *FindOneOptions) SetMax(max interface{}) *FindOneOptions {
+	f.Max = max
+	return f
+}
+
+// SetMaxAwaitTime specifies the max amount of time for the server to wait on new documents.
+// For server versions < 3.2, this option is ignored.
+func (f *FindOneOptions) SetMaxAwaitTime(d time.Duration) *FindOneOptions {
+	f.MaxAwaitTime = &d
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneOptions) SetMaxTime(d time.Duration) *FindOneOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetMin specifies the inclusive lower bound for a specific index.
+func (f *FindOneOptions) SetMin(min interface{}) *FindOneOptions {
+	f.Min = min
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+func (f *FindOneOptions) SetNoCursorTimeout(b bool) *FindOneOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetOplogReplay adds an option for internal use only and should not be set.
+func (f *FindOneOptions) SetOplogReplay(b bool) *FindOneOptions {
+	f.OplogReplay = &b
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneOptions) SetProjection(projection interface{}) *FindOneOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnKey adds an option to only return index keys for all result documents.
+func (f *FindOneOptions) SetReturnKey(b bool) *FindOneOptions {
+	f.ReturnKey = &b
+	return f
+}
+
+// SetShowRecordID adds an option to determine whether to return the record identifier for each document.
+// If true, a $recordId field will be added to each returned document.
+func (f *FindOneOptions) SetShowRecordID(b bool) *FindOneOptions {
+	f.ShowRecordID = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+func (f *FindOneOptions) SetSkip(i int64) *FindOneOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSnapshot prevents the cursor from returning a document more than once because of an intervening write operation.
+func (f *FindOneOptions) SetSnapshot(b bool) *FindOneOptions {
+	f.Snapshot = &b
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneOptions) SetSort(sort interface{}) *FindOneOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOneOptions combines the argued FindOneOptions into a single FindOneOptions in a last-one-wins fashion
+func MergeFindOneOptions(opts ...*FindOneOptions) *FindOneOptions {
+	fo := FindOne()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.AllowPartialResults != nil {
+			fo.AllowPartialResults = opt.AllowPartialResults
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.Comment != nil {
+			fo.Comment = opt.Comment
+		}
+		if opt.CursorType != nil {
+			fo.CursorType = opt.CursorType
+		}
+		if opt.Hint != nil {
+			fo.Hint = opt.Hint
+		}
+		if opt.Max != nil {
+			fo.Max = opt.Max
+		}
+		if opt.MaxAwaitTime != nil {
+			fo.MaxAwaitTime = opt.MaxAwaitTime
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Min != nil {
+			fo.Min = opt.Min
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.OplogReplay != nil {
+			fo.OplogReplay = opt.OplogReplay
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnKey != nil {
+			fo.ReturnKey = opt.ReturnKey
+		}
+		if opt.ShowRecordID != nil {
+			fo.ShowRecordID = opt.ShowRecordID
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Snapshot != nil {
+			fo.Snapshot = opt.Snapshot
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndReplaceOptions represent all possible options to the findOne() function.
+type FindOneAndReplaceOptions struct {
+	BypassDocumentValidation *bool           // If true, allows the write to opt out of document-level validation.
+	Collation                *Collation      // Specifies a collation to be used
+	MaxTime                  *time.Duration  // Specifies the maximum amount of time to allow the query to run.
+	Projection               interface{}     // Limits the fields returned for all documents.
+	ReturnDocument           *ReturnDocument // Specifies whether the original or updated document should be returned.
+	Sort                     interface{}     // Specifies the order in which to return results.
+	Upsert                   *bool           // If true, creates a a new document if no document matches the query.
+}
+
+// FindOneAndReplace creates a new FindOneAndReplaceOptions instance.
+func FindOneAndReplace() *FindOneAndReplaceOptions {
+	return &FindOneAndReplaceOptions{}
+}
+
+// SetBypassDocumentValidation specifies whether or not the write should opt out of document-level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (f *FindOneAndReplaceOptions) SetBypassDocumentValidation(b bool) *FindOneAndReplaceOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneAndReplaceOptions) SetCollation(collation *Collation) *FindOneAndReplaceOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndReplaceOptions) SetMaxTime(d time.Duration) *FindOneAndReplaceOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndReplaceOptions) SetProjection(projection interface{}) *FindOneAndReplaceOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument specifies whether the original or updated document should be returned.
+// If set to Before, the original document will be returned. If set to After, the updated document
+// will be returned.
+func (f *FindOneAndReplaceOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndReplaceOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndReplaceOptions) SetSort(sort interface{}) *FindOneAndReplaceOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (f *FindOneAndReplaceOptions) SetUpsert(b bool) *FindOneAndReplaceOptions {
+	f.Upsert = &b
+	return f
+}
+
+// MergeFindOneAndReplaceOptions combines the argued FindOneAndReplaceOptions into a single FindOneAndReplaceOptions in a last-one-wins fashion
+func MergeFindOneAndReplaceOptions(opts ...*FindOneAndReplaceOptions) *FindOneAndReplaceOptions {
+	fo := FindOneAndReplace()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndUpdateOptions represent all possible options to the findOne() function.
+type FindOneAndUpdateOptions struct {
+	ArrayFilters             *ArrayFilters   // A set of filters specifying to which array elements an update should apply.
+	BypassDocumentValidation *bool           // If true, allows the write to opt out of document-level validation.
+	Collation                *Collation      // Specifies a collation to be used
+	MaxTime                  *time.Duration  // Specifies the maximum amount of time to allow the query to run.
+	Projection               interface{}     // Limits the fields returned for all documents.
+	ReturnDocument           *ReturnDocument // Specifies whether the original or updated document should be returned.
+	Sort                     interface{}     // Specifies the order in which to return results.
+	Upsert                   *bool           // If true, creates a a new document if no document matches the query.
+}
+
+// FindOneAndUpdate creates a new FindOneAndUpdateOptions instance.
+func FindOneAndUpdate() *FindOneAndUpdateOptions {
+	return &FindOneAndUpdateOptions{}
+}
+
+// SetBypassDocumentValidation sets filters that specify to which array elements an update should apply.
+func (f *FindOneAndUpdateOptions) SetBypassDocumentValidation(b bool) *FindOneAndUpdateOptions {
+	f.BypassDocumentValidation = &b
+	return f
+}
+
+// SetArrayFilters specifies a set of filters, which
+func (f *FindOneAndUpdateOptions) SetArrayFilters(filters ArrayFilters) *FindOneAndUpdateOptions {
+	f.ArrayFilters = &filters
+	return f
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+func (f *FindOneAndUpdateOptions) SetCollation(collation *Collation) *FindOneAndUpdateOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndUpdateOptions) SetMaxTime(d time.Duration) *FindOneAndUpdateOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndUpdateOptions) SetProjection(projection interface{}) *FindOneAndUpdateOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetReturnDocument specifies whether the original or updated document should be returned.
+// If set to Before, the original document will be returned. If set to After, the updated document
+// will be returned.
+func (f *FindOneAndUpdateOptions) SetReturnDocument(rd ReturnDocument) *FindOneAndUpdateOptions {
+	f.ReturnDocument = &rd
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndUpdateOptions) SetSort(sort interface{}) *FindOneAndUpdateOptions {
+	f.Sort = sort
+	return f
+}
+
+// SetUpsert specifies if a new document should be created if no document matches the query.
+func (f *FindOneAndUpdateOptions) SetUpsert(b bool) *FindOneAndUpdateOptions {
+	f.Upsert = &b
+	return f
+}
+
+// MergeFindOneAndUpdateOptions combines the argued FindOneAndUpdateOptions into a single FindOneAndUpdateOptions in a last-one-wins fashion
+func MergeFindOneAndUpdateOptions(opts ...*FindOneAndUpdateOptions) *FindOneAndUpdateOptions {
+	fo := FindOneAndUpdate()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ArrayFilters != nil {
+			fo.ArrayFilters = opt.ArrayFilters
+		}
+		if opt.BypassDocumentValidation != nil {
+			fo.BypassDocumentValidation = opt.BypassDocumentValidation
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.ReturnDocument != nil {
+			fo.ReturnDocument = opt.ReturnDocument
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+		if opt.Upsert != nil {
+			fo.Upsert = opt.Upsert
+		}
+	}
+
+	return fo
+}
+
+// FindOneAndDeleteOptions represent all possible options to the findOne() function.
+type FindOneAndDeleteOptions struct {
+	Collation  *Collation     // Specifies a collation to be used
+	MaxTime    *time.Duration // Specifies the maximum amount of time to allow the query to run.
+	Projection interface{}    // Limits the fields returned for all documents.
+	Sort       interface{}    // Specifies the order in which to return results.
+}
+
+// FindOneAndDelete creates a new FindOneAndDeleteOptions instance.
+func FindOneAndDelete() *FindOneAndDeleteOptions {
+	return &FindOneAndDeleteOptions{}
+}
+
+// SetCollation specifies a Collation to use for the Find operation.
+// Valid for server versions >= 3.4
+func (f *FindOneAndDeleteOptions) SetCollation(collation *Collation) *FindOneAndDeleteOptions {
+	f.Collation = collation
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *FindOneAndDeleteOptions) SetMaxTime(d time.Duration) *FindOneAndDeleteOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetProjection adds an option to limit the fields returned for all documents.
+func (f *FindOneAndDeleteOptions) SetProjection(projection interface{}) *FindOneAndDeleteOptions {
+	f.Projection = projection
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *FindOneAndDeleteOptions) SetSort(sort interface{}) *FindOneAndDeleteOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeFindOneAndDeleteOptions combines the argued FindOneAndDeleteOptions into a single FindOneAndDeleteOptions in a last-one-wins fashion
+func MergeFindOneAndDeleteOptions(opts ...*FindOneAndDeleteOptions) *FindOneAndDeleteOptions {
+	fo := FindOneAndDelete()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Collation != nil {
+			fo.Collation = opt.Collation
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.Projection != nil {
+			fo.Projection = opt.Projection
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go
new file mode 100644
index 0000000..232a1c8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/gridfsoptions.go
@@ -0,0 +1,268 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// DefaultName is the default name for a GridFS bucket.
+var DefaultName = "fs"
+
+// DefaultChunkSize is the default size of each file chunk in bytes.
+var DefaultChunkSize int32 = 255 * 1000
+
+// DefaultRevision is the default revision number for a download by name operation.
+var DefaultRevision int32 = -1
+
+// BucketOptions represents all possible options to configure a GridFS bucket.
+type BucketOptions struct {
+	Name           *string                    // The bucket name. Defaults to "fs".
+	ChunkSizeBytes *int32                     // The chunk size in bytes. Defaults to 255KB.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for the bucket. Defaults to the write concern of the database.
+	ReadConcern    *readconcern.ReadConcern   // The read concern for the bucket. Defaults to the read concern of the database.
+	ReadPreference *readpref.ReadPref         // The read preference for the bucket. Defaults to the read preference of the database.
+}
+
+// GridFSBucket creates a new *BucketOptions
+func GridFSBucket() *BucketOptions {
+	return &BucketOptions{
+		Name:           &DefaultName,
+		ChunkSizeBytes: &DefaultChunkSize,
+	}
+}
+
+// SetName sets the name for the bucket. Defaults to "fs" if not set.
+func (b *BucketOptions) SetName(name string) *BucketOptions {
+	b.Name = &name
+	return b
+}
+
+// SetChunkSizeBytes sets the chunk size in bytes for the bucket. Defaults to 255KB if not set.
+func (b *BucketOptions) SetChunkSizeBytes(i int32) *BucketOptions {
+	b.ChunkSizeBytes = &i
+	return b
+}
+
+// SetWriteConcern sets the write concern for the bucket.
+func (b *BucketOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *BucketOptions {
+	b.WriteConcern = wc
+	return b
+}
+
+// SetReadConcern sets the read concern for the bucket.
+func (b *BucketOptions) SetReadConcern(rc *readconcern.ReadConcern) *BucketOptions {
+	b.ReadConcern = rc
+	return b
+}
+
+// SetReadPreference sets the read preference for the bucket.
+func (b *BucketOptions) SetReadPreference(rp *readpref.ReadPref) *BucketOptions {
+	b.ReadPreference = rp
+	return b
+}
+
+// MergeBucketOptions combines the given *BucketOptions into a single *BucketOptions.
+// If the name or chunk size is not set in any of the given *BucketOptions, the resulting *BucketOptions will have
+// name "fs" and chunk size 255KB.
+func MergeBucketOptions(opts ...*BucketOptions) *BucketOptions {
+	b := GridFSBucket()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Name != nil {
+			b.Name = opt.Name
+		}
+		if opt.ChunkSizeBytes != nil {
+			b.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.WriteConcern != nil {
+			b.WriteConcern = opt.WriteConcern
+		}
+		if opt.ReadConcern != nil {
+			b.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			b.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return b
+}
+
+// UploadOptions represents all possible options for a GridFS upload operation.
+type UploadOptions struct {
+	ChunkSizeBytes *int32    // Chunk size in bytes. Defaults to the chunk size of the bucket.
+	Metadata       bsonx.Doc // User data for the 'metadata' field of the files collection document.
+}
+
+// GridFSUpload creates a new *UploadOptions
+func GridFSUpload() *UploadOptions {
+	return &UploadOptions{}
+}
+
+// SetChunkSizeBytes sets the chunk size in bytes for the upload. Defaults to 255KB if not set.
+func (u *UploadOptions) SetChunkSizeBytes(i int32) *UploadOptions {
+	u.ChunkSizeBytes = &i
+	return u
+}
+
+// SetMetadata specfies the metadata for the upload.
+func (u *UploadOptions) SetMetadata(doc bsonx.Doc) *UploadOptions {
+	u.Metadata = doc
+	return u
+}
+
+// MergeUploadOptions combines the given *UploadOptions into a single *UploadOptions.
+// If the chunk size is not set in any of the given *UploadOptions, the resulting *UploadOptions will have chunk size
+// 255KB.
+func MergeUploadOptions(opts ...*UploadOptions) *UploadOptions {
+	u := GridFSUpload()
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ChunkSizeBytes != nil {
+			u.ChunkSizeBytes = opt.ChunkSizeBytes
+		}
+		if opt.Metadata != nil {
+			u.Metadata = opt.Metadata
+		}
+	}
+
+	return u
+}
+
+// NameOptions represents all options that can be used for a GridFS download by name operation.
+type NameOptions struct {
+	Revision *int32 // Which revision (documents with the same filename and different uploadDate). Defaults to -1 (the most recent revision).
+}
+
+// GridFSName creates a new *NameOptions
+func GridFSName() *NameOptions {
+	return &NameOptions{}
+}
+
+// SetRevision specifies which revision of the file to retrieve. Defaults to -1.
+// * Revision numbers are defined as follows:
+// * 0 = the original stored file
+// * 1 = the first revision
+// * 2 = the second revision
+// * etc…
+// * -2 = the second most recent revision
+// * -1 = the most recent revision
+func (n *NameOptions) SetRevision(r int32) *NameOptions {
+	n.Revision = &r
+	return n
+}
+
+// MergeNameOptions combines the given *NameOptions into a single *NameOptions in a last one wins fashion.
+func MergeNameOptions(opts ...*NameOptions) *NameOptions {
+	n := GridFSName()
+	n.Revision = &DefaultRevision
+
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.Revision != nil {
+			n.Revision = opt.Revision
+		}
+	}
+
+	return n
+}
+
+// GridFSFindOptions represents all options for a GridFS find operation.
+type GridFSFindOptions struct {
+	BatchSize       *int32
+	Limit           *int32
+	MaxTime         *time.Duration
+	NoCursorTimeout *bool
+	Skip            *int32
+	Sort            interface{}
+}
+
+// GridFSFind creates a new GridFSFindOptions instance.
+func GridFSFind() *GridFSFindOptions {
+	return &GridFSFindOptions{}
+}
+
+// SetBatchSize sets the number of documents to return in each batch.
+func (f *GridFSFindOptions) SetBatchSize(i int32) *GridFSFindOptions {
+	f.BatchSize = &i
+	return f
+}
+
+// SetLimit specifies a limit on the number of results.
+// A negative limit implies that only 1 batch should be returned.
+func (f *GridFSFindOptions) SetLimit(i int32) *GridFSFindOptions {
+	f.Limit = &i
+	return f
+}
+
+// SetMaxTime specifies the max time to allow the query to run.
+func (f *GridFSFindOptions) SetMaxTime(d time.Duration) *GridFSFindOptions {
+	f.MaxTime = &d
+	return f
+}
+
+// SetNoCursorTimeout specifies whether or not cursors should time out after a period of inactivity.
+func (f *GridFSFindOptions) SetNoCursorTimeout(b bool) *GridFSFindOptions {
+	f.NoCursorTimeout = &b
+	return f
+}
+
+// SetSkip specifies the number of documents to skip before returning.
+func (f *GridFSFindOptions) SetSkip(i int32) *GridFSFindOptions {
+	f.Skip = &i
+	return f
+}
+
+// SetSort specifies the order in which to return documents.
+func (f *GridFSFindOptions) SetSort(sort interface{}) *GridFSFindOptions {
+	f.Sort = sort
+	return f
+}
+
+// MergeGridFSFindOptions combines the argued GridFSFindOptions into a single GridFSFindOptions in a last-one-wins fashion
+func MergeGridFSFindOptions(opts ...*GridFSFindOptions) *GridFSFindOptions {
+	fo := GridFSFind()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.BatchSize != nil {
+			fo.BatchSize = opt.BatchSize
+		}
+		if opt.Limit != nil {
+			fo.Limit = opt.Limit
+		}
+		if opt.MaxTime != nil {
+			fo.MaxTime = opt.MaxTime
+		}
+		if opt.NoCursorTimeout != nil {
+			fo.NoCursorTimeout = opt.NoCursorTimeout
+		}
+		if opt.Skip != nil {
+			fo.Skip = opt.Skip
+		}
+		if opt.Sort != nil {
+			fo.Sort = opt.Sort
+		}
+	}
+
+	return fo
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go
new file mode 100644
index 0000000..2fda698
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/indexoptions.go
@@ -0,0 +1,326 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"time"
+)
+
+// CreateIndexesOptions represents all possible options for the create() function.
+type CreateIndexesOptions struct {
+	MaxTime *time.Duration // The maximum amount of time to allow the query to run.
+}
+
+// CreateIndexes creates a new CreateIndexesOptions instance.
+func CreateIndexes() *CreateIndexesOptions {
+	return &CreateIndexesOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (c *CreateIndexesOptions) SetMaxTime(d time.Duration) *CreateIndexesOptions {
+	c.MaxTime = &d
+	return c
+}
+
+// MergeCreateIndexesOptions combines the given *CreateIndexesOptions into a single *CreateIndexesOptions in a last one
+// wins fashion.
+func MergeCreateIndexesOptions(opts ...*CreateIndexesOptions) *CreateIndexesOptions {
+	c := CreateIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// DropIndexesOptions represents all possible options for the create() function.
+type DropIndexesOptions struct {
+	MaxTime *time.Duration
+}
+
+// DropIndexes creates a new DropIndexesOptions instance.
+func DropIndexes() *DropIndexesOptions {
+	return &DropIndexesOptions{}
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (d *DropIndexesOptions) SetMaxTime(duration time.Duration) *DropIndexesOptions {
+	d.MaxTime = &duration
+	return d
+}
+
+// MergeDropIndexesOptions combines the given *DropIndexesOptions into a single *DropIndexesOptions in a last one
+// wins fashion.
+func MergeDropIndexesOptions(opts ...*DropIndexesOptions) *DropIndexesOptions {
+	c := DropIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// ListIndexesOptions represents all possible options for the create() function.
+type ListIndexesOptions struct {
+	BatchSize *int32
+	MaxTime   *time.Duration
+}
+
+// ListIndexes creates a new ListIndexesOptions instance.
+func ListIndexes() *ListIndexesOptions {
+	return &ListIndexesOptions{}
+}
+
+// SetBatchSize specifies the number of documents to return in every batch.
+func (l *ListIndexesOptions) SetBatchSize(i int32) *ListIndexesOptions {
+	l.BatchSize = &i
+	return l
+}
+
+// SetMaxTime specifies the maximum amount of time to allow the query to run.
+func (l *ListIndexesOptions) SetMaxTime(d time.Duration) *ListIndexesOptions {
+	l.MaxTime = &d
+	return l
+}
+
+// MergeListIndexesOptions combines the given *ListIndexesOptions into a single *ListIndexesOptions in a last one
+// wins fashion.
+func MergeListIndexesOptions(opts ...*ListIndexesOptions) *ListIndexesOptions {
+	c := ListIndexes()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.MaxTime != nil {
+			c.MaxTime = opt.MaxTime
+		}
+	}
+
+	return c
+}
+
+// IndexOptions represents all possible options to configure a new index.
+type IndexOptions struct {
+	Background              *bool
+	ExpireAfterSeconds      *int32
+	Name                    *string
+	Sparse                  *bool
+	StorageEngine           interface{}
+	Unique                  *bool
+	Version                 *int32
+	DefaultLanguage         *string
+	LanguageOverride        *string
+	TextVersion             *int32
+	Weights                 interface{}
+	SphereVersion           *int32
+	Bits                    *int32
+	Max                     *float64
+	Min                     *float64
+	BucketSize              *int32
+	PartialFilterExpression interface{}
+	Collation               *Collation
+}
+
+// Index creates a new *IndexOptions
+func Index() *IndexOptions {
+	return &IndexOptions{}
+}
+
+// SetBackground sets the background option. If true, the server will create the index in the background and not block
+// other tasks
+func (i *IndexOptions) SetBackground(background bool) *IndexOptions {
+	i.Background = &background
+	return i
+}
+
+// SetExpireAfterSeconds specifies the number of seconds for a document to remain in a collection.
+func (i *IndexOptions) SetExpireAfterSeconds(seconds int32) *IndexOptions {
+	i.ExpireAfterSeconds = &seconds
+	return i
+}
+
+// SetName specifies a name for the index.
+// If not set, a name will be generated in the format "[field]_[direction]".
+// If multiple indexes are created for the same key pattern with different collations, a name must be provided to avoid
+// ambiguity.
+func (i *IndexOptions) SetName(name string) *IndexOptions {
+	i.Name = &name
+	return i
+}
+
+// SetSparse sets the sparse option.
+// If true, the index will only reference documents with the specified field in the index.
+func (i *IndexOptions) SetSparse(sparse bool) *IndexOptions {
+	i.Sparse = &sparse
+	return i
+}
+
+// SetStorageEngine specifies the storage engine to use.
+// Valid for server versions >= 3.0
+func (i *IndexOptions) SetStorageEngine(engine interface{}) *IndexOptions {
+	i.StorageEngine = engine
+	return i
+}
+
+// SetUnique forces the index to be unique.
+func (i *IndexOptions) SetUnique(unique bool) *IndexOptions {
+	i.Unique = &unique
+	return i
+}
+
+// SetVersion specifies the index version number, either 0 or 1.
+func (i *IndexOptions) SetVersion(version int32) *IndexOptions {
+	i.Version = &version
+	return i
+}
+
+// SetDefaultLanguage specifies the default language for text indexes.
+// If not set, this will default to english.
+func (i *IndexOptions) SetDefaultLanguage(language string) *IndexOptions {
+	i.DefaultLanguage = &language
+	return i
+}
+
+// SetLanguageOverride specifies the field in the document to override the language.
+func (i *IndexOptions) SetLanguageOverride(override string) *IndexOptions {
+	i.LanguageOverride = &override
+	return i
+}
+
+// SetTextVersion specifies the text index version number.
+// MongoDB version 2.4 can only support version 1.
+// MongoDB versions 2.6 and higher can support versions 1 or 2.
+func (i *IndexOptions) SetTextVersion(version int32) *IndexOptions {
+	i.TextVersion = &version
+	return i
+}
+
+// SetWeights specifies fields in the index and their corresponding weight values.
+func (i *IndexOptions) SetWeights(weights interface{}) *IndexOptions {
+	i.Weights = weights
+	return i
+}
+
+// SetSphereVersion specifies the 2dsphere index version number.
+// MongoDB version 2.4 can only support version 1.
+// MongoDB versions 2.6 and higher can support versions 1 or 2.
+func (i *IndexOptions) SetSphereVersion(version int32) *IndexOptions {
+	i.SphereVersion = &version
+	return i
+}
+
+// SetBits specifies the precision of the stored geo hash in the 2d index, from 1 to 32.
+func (i *IndexOptions) SetBits(bits int32) *IndexOptions {
+	i.Bits = &bits
+	return i
+}
+
+// SetMax specifies the maximum boundary for latitude and longitude in the 2d index.
+func (i *IndexOptions) SetMax(max float64) *IndexOptions {
+	i.Max = &max
+	return i
+}
+
+// SetMin specifies the minimum boundary for latitude and longitude in the 2d index.
+func (i *IndexOptions) SetMin(min float64) *IndexOptions {
+	i.Min = &min
+	return i
+}
+
+// SetBucketSize specifies number of units within which to group the location values in a geo haystack index.
+func (i *IndexOptions) SetBucketSize(bucketSize int32) *IndexOptions {
+	i.BucketSize = &bucketSize
+	return i
+}
+
+// SetPartialFilterExpression specifies a filter for use in a partial index. Only documents that match the filter
+// expression are included in the index.
+func (i *IndexOptions) SetPartialFilterExpression(expression interface{}) *IndexOptions {
+	i.PartialFilterExpression = expression
+	return i
+}
+
+// SetCollation specifies a Collation to use for the operation.
+// Valid for server versions >= 3.4
+func (i *IndexOptions) SetCollation(collation *Collation) *IndexOptions {
+	i.Collation = collation
+	return i
+}
+
+// MergeIndexOptions combines the given *IndexOptions into a single *IndexOptions in a last one wins fashion.
+func MergeIndexOptions(opts ...*IndexOptions) *IndexOptions {
+	i := Index()
+
+	for _, opt := range opts {
+		if opt.Background != nil {
+			i.Background = opt.Background
+		}
+		if opt.ExpireAfterSeconds != nil {
+			i.ExpireAfterSeconds = opt.ExpireAfterSeconds
+		}
+		if opt.Name != nil {
+			i.Name = opt.Name
+		}
+		if opt.Sparse != nil {
+			i.Sparse = opt.Sparse
+		}
+		if opt.StorageEngine != nil {
+			i.StorageEngine = opt.StorageEngine
+		}
+		if opt.Unique != nil {
+			i.Unique = opt.Unique
+		}
+		if opt.Version != nil {
+			i.Version = opt.Version
+		}
+		if opt.DefaultLanguage != nil {
+			i.DefaultLanguage = opt.DefaultLanguage
+		}
+		if opt.LanguageOverride != nil {
+			i.LanguageOverride = opt.LanguageOverride
+		}
+		if opt.TextVersion != nil {
+			i.TextVersion = opt.TextVersion
+		}
+		if opt.Weights != nil {
+			i.Weights = opt.Weights
+		}
+		if opt.SphereVersion != nil {
+			i.SphereVersion = opt.SphereVersion
+		}
+		if opt.Bits != nil {
+			i.Bits = opt.Bits
+		}
+		if opt.Max != nil {
+			i.Max = opt.Max
+		}
+		if opt.Min != nil {
+			i.Min = opt.Min
+		}
+		if opt.BucketSize != nil {
+			i.BucketSize = opt.BucketSize
+		}
+		if opt.PartialFilterExpression != nil {
+			i.PartialFilterExpression = opt.PartialFilterExpression
+		}
+		if opt.Collation != nil {
+			i.Collation = opt.Collation
+		}
+	}
+
+	return i
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go
new file mode 100644
index 0000000..064ede3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/insertoptions.go
@@ -0,0 +1,84 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// InsertOneOptions represents all possible options to the insertOne()
+type InsertOneOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
+}
+
+// InsertOne returns a pointer to a new InsertOneOptions
+func InsertOne() *InsertOneOptions {
+	return &InsertOneOptions{}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ioo *InsertOneOptions) SetBypassDocumentValidation(b bool) *InsertOneOptions {
+	ioo.BypassDocumentValidation = &b
+	return ioo
+}
+
+// MergeInsertOneOptions combines the argued InsertOneOptions into a single InsertOneOptions in a last-one-wins fashion
+func MergeInsertOneOptions(opts ...*InsertOneOptions) *InsertOneOptions {
+	ioOpts := InsertOne()
+	for _, ioo := range opts {
+		if ioo == nil {
+			continue
+		}
+		if ioo.BypassDocumentValidation != nil {
+			ioOpts.BypassDocumentValidation = ioo.BypassDocumentValidation
+		}
+	}
+
+	return ioOpts
+}
+
+// InsertManyOptions represents all possible options to the insertMany()
+type InsertManyOptions struct {
+	BypassDocumentValidation *bool // If true, allows the write to opt-out of document level validation
+	Ordered                  *bool // If true, when an insert fails, return without performing the remaining inserts. Defaults to true.
+}
+
+// InsertMany returns a pointer to a new InsertManyOptions
+func InsertMany() *InsertManyOptions {
+	return &InsertManyOptions{
+		Ordered: &DefaultOrdered,
+	}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (imo *InsertManyOptions) SetBypassDocumentValidation(b bool) *InsertManyOptions {
+	imo.BypassDocumentValidation = &b
+	return imo
+}
+
+// SetOrdered configures the ordered option. If true, when a write fails, the function will return without attempting
+// remaining writes. Defaults to true.
+func (imo *InsertManyOptions) SetOrdered(b bool) *InsertManyOptions {
+	imo.Ordered = &b
+	return imo
+}
+
+// MergeInsertManyOptions combines the argued InsertManyOptions into a single InsertManyOptions in a last-one-wins fashion
+func MergeInsertManyOptions(opts ...*InsertManyOptions) *InsertManyOptions {
+	imOpts := InsertMany()
+	for _, imo := range opts {
+		if imo == nil {
+			continue
+		}
+		if imo.BypassDocumentValidation != nil {
+			imOpts.BypassDocumentValidation = imo.BypassDocumentValidation
+		}
+		if imo.Ordered != nil {
+			imOpts.Ordered = imo.Ordered
+		}
+	}
+
+	return imOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go
new file mode 100644
index 0000000..e44ad4a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listcollectionsoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListCollectionsOptions represents all possible options for a listCollections command.
+type ListCollectionsOptions struct {
+	NameOnly *bool // If true, only the collection names will be returned.
+}
+
+// ListCollections creates a new *ListCollectionsOptions
+func ListCollections() *ListCollectionsOptions {
+	return &ListCollectionsOptions{}
+}
+
+// SetNameOnly specifies whether to return only the collection names.
+func (lc *ListCollectionsOptions) SetNameOnly(b bool) *ListCollectionsOptions {
+	lc.NameOnly = &b
+	return lc
+}
+
+// MergeListCollectionsOptions combines the given *ListCollectionsOptions into a single *ListCollectionsOptions in a
+// last one wins fashion.
+func MergeListCollectionsOptions(opts ...*ListCollectionsOptions) *ListCollectionsOptions {
+	lc := ListCollections()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			lc.NameOnly = opt.NameOnly
+		}
+	}
+
+	return lc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go
new file mode 100644
index 0000000..5efb6e8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/listdatabasesoptions.go
@@ -0,0 +1,39 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ListDatabasesOptions represents all possible options for a listDatabases command.
+type ListDatabasesOptions struct {
+	NameOnly *bool // If true, only the database names will be returned.
+}
+
+// ListDatabases creates a new *ListDatabasesOptions
+func ListDatabases() *ListDatabasesOptions {
+	return &ListDatabasesOptions{}
+}
+
+// SetNameOnly specifies whether to return only the database names.
+func (ld *ListDatabasesOptions) SetNameOnly(b bool) *ListDatabasesOptions {
+	ld.NameOnly = &b
+	return ld
+}
+
+// MergeListDatabasesOptions combines the given *ListDatabasesOptions into a single *ListDatabasesOptions in a last one
+// wins fashion.
+func MergeListDatabasesOptions(opts ...*ListDatabasesOptions) *ListDatabasesOptions {
+	ld := ListDatabases()
+	for _, opt := range opts {
+		if opts == nil {
+			continue
+		}
+		if opt.NameOnly != nil {
+			ld.NameOnly = opt.NameOnly
+		}
+	}
+
+	return ld
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go
new file mode 100644
index 0000000..22f383b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/mongooptions.go
@@ -0,0 +1,163 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"fmt"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Collation allows users to specify language-specific rules for string comparison, such as
+// rules for lettercase and accent marks.
+type Collation struct {
+	Locale          string `bson:",omitempty"` // The locale
+	CaseLevel       bool   `bson:",omitempty"` // The case level
+	CaseFirst       string `bson:",omitempty"` // The case ordering
+	Strength        int    `bson:",omitempty"` // The number of comparision levels to use
+	NumericOrdering bool   `bson:",omitempty"` // Whether to order numbers based on numerical order and not collation order
+	Alternate       string `bson:",omitempty"` // Whether spaces and punctuation are considered base characters
+	MaxVariable     string `bson:",omitempty"` // Which characters are affected by alternate: "shifted"
+	Normalization   bool   `bson:",omitempty"` // Causes text to be normalized into Unicode NFD
+	Backwards       bool   `bson:",omitempty"` // Causes secondary differences to be considered in reverse order, as it is done in the French language
+}
+
+// ToDocument converts the Collation to a *bsonx.Document
+func (co *Collation) ToDocument() bsonx.Doc {
+	doc := bsonx.Doc{}
+	if co.Locale != "" {
+		doc = append(doc, bsonx.Elem{"locale", bsonx.String(co.Locale)})
+	}
+	if co.CaseLevel {
+		doc = append(doc, bsonx.Elem{"caseLevel", bsonx.Boolean(true)})
+	}
+	if co.CaseFirst != "" {
+		doc = append(doc, bsonx.Elem{"caseFirst", bsonx.String(co.CaseFirst)})
+	}
+	if co.Strength != 0 {
+		doc = append(doc, bsonx.Elem{"strength", bsonx.Int32(int32(co.Strength))})
+	}
+	if co.NumericOrdering {
+		doc = append(doc, bsonx.Elem{"numericOrdering", bsonx.Boolean(true)})
+	}
+	if co.Alternate != "" {
+		doc = append(doc, bsonx.Elem{"alternate", bsonx.String(co.Alternate)})
+	}
+	if co.MaxVariable != "" {
+		doc = append(doc, bsonx.Elem{"maxVariable", bsonx.String(co.MaxVariable)})
+	}
+	if co.Normalization {
+		doc = append(doc, bsonx.Elem{"normalization", bsonx.Boolean(co.Normalization)})
+	}
+	if co.Backwards {
+		doc = append(doc, bsonx.Elem{"backwards", bsonx.Boolean(true)})
+	}
+	return doc
+}
+
+// CursorType specifies whether a cursor should close when the last data is retrieved. See
+// NonTailable, Tailable, and TailableAwait.
+type CursorType int8
+
+const (
+	// NonTailable specifies that a cursor should close after retrieving the last data.
+	NonTailable CursorType = iota
+	// Tailable specifies that a cursor should not close when the last data is retrieved and can be resumed later.
+	Tailable
+	// TailableAwait specifies that a cursor should not close when the last data is retrieved and
+	// that it should block for a certain amount of time for new data before returning no data.
+	TailableAwait
+)
+
+// ReturnDocument specifies whether a findAndUpdate operation should return the document as it was
+// before the update or as it is after the update.
+type ReturnDocument int8
+
+const (
+	// Before specifies that findAndUpdate should return the document as it was before the update.
+	Before ReturnDocument = iota
+	// After specifies that findAndUpdate should return the document as it is after the update.
+	After
+)
+
+// FullDocument specifies whether a change stream should include a copy of the entire document that was changed from
+// some time after the change occurred.
+type FullDocument string
+
+const (
+	// Default does not include a document copy
+	Default FullDocument = "default"
+	// UpdateLookup includes a delta describing the changes to the document and a copy of the entire document that
+	// was changed
+	UpdateLookup FullDocument = "updateLookup"
+)
+
+// ArrayFilters is used to hold filters for the array filters CRUD option. If a registry is nil, bson.DefaultRegistry
+// will be used when converting the filter interfaces to BSON.
+type ArrayFilters struct {
+	Registry *bsoncodec.Registry // The registry to use for converting filters. Defaults to bson.DefaultRegistry.
+	Filters  []interface{}       // The filters to apply
+}
+
+// ToArray builds a bsonx.Arr from the provided ArrayFilters.
+func (af *ArrayFilters) ToArray() (bsonx.Arr, error) {
+	docs := make([]bsonx.Doc, 0, len(af.Filters))
+	for _, f := range af.Filters {
+		d, err := transformDocument(af.Registry, f)
+		if err != nil {
+			return nil, err
+		}
+		docs = append(docs, d)
+	}
+
+	arr := bsonx.Arr{}
+	for _, doc := range docs {
+		arr = append(arr, bsonx.Document(doc))
+	}
+
+	return arr, nil
+}
+
+// MarshalError is returned when attempting to transform a value into a document
+// results in an error.
+type MarshalError struct {
+	Value interface{}
+	Err   error
+}
+
+// Error implements the error interface.
+func (me MarshalError) Error() string {
+	return fmt.Sprintf("cannot transform type %s to a *bsonx.Document", reflect.TypeOf(me.Value))
+}
+
+var defaultRegistry = bson.DefaultRegistry
+
+func transformDocument(registry *bsoncodec.Registry, val interface{}) (bsonx.Doc, error) {
+	if val == nil {
+		return bsonx.Doc{}, nil
+	}
+	reg := defaultRegistry
+	if registry != nil {
+		reg = registry
+	}
+
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(reg, buf, val)
+	if err != nil {
+		return nil, MarshalError{Value: val, Err: err}
+	}
+	return bsonx.ReadDoc(b)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go
new file mode 100644
index 0000000..7a8c2ba
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/replaceoptions.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// ReplaceOptions represents all possible options to the replaceOne() function
+type ReplaceOptions struct {
+	BypassDocumentValidation *bool      // If true, allows the write to opt-out of document level validation
+	Collation                *Collation // Specifies a collation
+	Upsert                   *bool      // When true, creates a new document if no document matches the query
+}
+
+// Replace returns a pointer to a new ReplaceOptions
+func Replace() *ReplaceOptions {
+	return &ReplaceOptions{}
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (ro *ReplaceOptions) SetBypassDocumentValidation(b bool) *ReplaceOptions {
+	ro.BypassDocumentValidation = &b
+	return ro
+}
+
+// SetCollation specifies a collation.
+// Valid for servers >= 3.4
+func (ro *ReplaceOptions) SetCollation(c *Collation) *ReplaceOptions {
+	ro.Collation = c
+	return ro
+}
+
+// SetUpsert allows the creation of a new document if not document matches the query
+func (ro *ReplaceOptions) SetUpsert(b bool) *ReplaceOptions {
+	ro.Upsert = &b
+	return ro
+}
+
+// MergeReplaceOptions combines the argued ReplaceOptions into a single ReplaceOptions in a last-one-wins fashion
+func MergeReplaceOptions(opts ...*ReplaceOptions) *ReplaceOptions {
+	rOpts := Replace()
+	for _, ro := range opts {
+		if ro == nil {
+			continue
+		}
+		if ro.BypassDocumentValidation != nil {
+			rOpts.BypassDocumentValidation = ro.BypassDocumentValidation
+		}
+		if ro.Collation != nil {
+			rOpts.Collation = ro.Collation
+		}
+		if ro.Upsert != nil {
+			rOpts.Upsert = ro.Upsert
+		}
+	}
+
+	return rOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go
new file mode 100644
index 0000000..c7c696d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/runcmdoptions.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import "github.com/mongodb/mongo-go-driver/mongo/readpref"
+
+// RunCmdOptions represents all possible options for a runCommand operation.
+type RunCmdOptions struct {
+	ReadPreference *readpref.ReadPref // The read preference for the operation.
+}
+
+// RunCmd creates a new *RunCmdOptions
+func RunCmd() *RunCmdOptions {
+	return &RunCmdOptions{}
+}
+
+// SetReadPreference sets the read preference for the operation.
+func (rc *RunCmdOptions) SetReadPreference(rp *readpref.ReadPref) *RunCmdOptions {
+	rc.ReadPreference = rp
+	return rc
+}
+
+// MergeRunCmdOptions combines the given *RunCmdOptions into one *RunCmdOptions in a last one wins fashion.
+func MergeRunCmdOptions(opts ...*RunCmdOptions) *RunCmdOptions {
+	rc := RunCmd()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadPreference != nil {
+			rc.ReadPreference = opt.ReadPreference
+		}
+	}
+
+	return rc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go
new file mode 100644
index 0000000..ffe45e6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/sessionoptions.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// DefaultCausalConsistency is the default value for the CausalConsistency option.
+var DefaultCausalConsistency = true
+
+// SessionOptions represents all possible options for creating a new session.
+type SessionOptions struct {
+	CausalConsistency     *bool                      // Specifies if reads should be causally consistent. Defaults to true.
+	DefaultReadConcern    *readconcern.ReadConcern   // The default read concern for transactions started in the session.
+	DefaultReadPreference *readpref.ReadPref         // The default read preference for transactions started in the session.
+	DefaultWriteConcern   *writeconcern.WriteConcern // The default write concern for transactions started in the session.
+}
+
+// Session creates a new *SessionOptions
+func Session() *SessionOptions {
+	return &SessionOptions{
+		CausalConsistency: &DefaultCausalConsistency,
+	}
+}
+
+// SetCausalConsistency specifies if a session should be causally consistent. Defaults to true.
+func (s *SessionOptions) SetCausalConsistency(b bool) *SessionOptions {
+	s.CausalConsistency = &b
+	return s
+}
+
+// SetDefaultReadConcern sets the default read concern for transactions started in a session.
+func (s *SessionOptions) SetDefaultReadConcern(rc *readconcern.ReadConcern) *SessionOptions {
+	s.DefaultReadConcern = rc
+	return s
+}
+
+// SetDefaultReadPreference sets the default read preference for transactions started in a session.
+func (s *SessionOptions) SetDefaultReadPreference(rp *readpref.ReadPref) *SessionOptions {
+	s.DefaultReadPreference = rp
+	return s
+}
+
+// SetDefaultWriteConcern sets the default write concern for transactions started in a session.
+func (s *SessionOptions) SetDefaultWriteConcern(wc *writeconcern.WriteConcern) *SessionOptions {
+	s.DefaultWriteConcern = wc
+	return s
+}
+
+// MergeSessionOptions combines the given *SessionOptions into a single *SessionOptions in a last one wins fashion.
+func MergeSessionOptions(opts ...*SessionOptions) *SessionOptions {
+	s := Session()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			s.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			s.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			s.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			s.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+	}
+
+	return s
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go
new file mode 100644
index 0000000..5aec1b9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/transactionoptions.go
@@ -0,0 +1,65 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// TransactionOptions represents all possible options for starting a transaction.
+type TransactionOptions struct {
+	ReadConcern    *readconcern.ReadConcern   // The read concern for the transaction. Defaults to the session's read concern.
+	ReadPreference *readpref.ReadPref         // The read preference for the transaction. Defaults to the session's read preference.
+	WriteConcern   *writeconcern.WriteConcern // The write concern for the transaction. Defaults to the session's write concern.
+}
+
+// Transaction creates a new *TransactionOptions
+func Transaction() *TransactionOptions {
+	return &TransactionOptions{}
+}
+
+// SetReadConcern sets the read concern for the transaction.
+func (t *TransactionOptions) SetReadConcern(rc *readconcern.ReadConcern) *TransactionOptions {
+	t.ReadConcern = rc
+	return t
+}
+
+// SetReadPreference sets the read preference for the transaction.
+func (t *TransactionOptions) SetReadPreference(rp *readpref.ReadPref) *TransactionOptions {
+	t.ReadPreference = rp
+	return t
+}
+
+// SetWriteConcern sets the write concern for the transaction.
+func (t *TransactionOptions) SetWriteConcern(wc *writeconcern.WriteConcern) *TransactionOptions {
+	t.WriteConcern = wc
+	return t
+}
+
+// MergeTransactionOptions combines the given *TransactionOptions into a single *TransactionOptions in a last one wins
+// fashion.
+func MergeTransactionOptions(opts ...*TransactionOptions) *TransactionOptions {
+	t := Transaction()
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.ReadConcern != nil {
+			t.ReadConcern = opt.ReadConcern
+		}
+		if opt.ReadPreference != nil {
+			t.ReadPreference = opt.ReadPreference
+		}
+		if opt.WriteConcern != nil {
+			t.WriteConcern = opt.WriteConcern
+		}
+	}
+
+	return t
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go
new file mode 100644
index 0000000..468ccda
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/options/updateoptions.go
@@ -0,0 +1,71 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package options
+
+// UpdateOptions represents all possible options to the updateOne() and updateMany() functions
+type UpdateOptions struct {
+	ArrayFilters             *ArrayFilters // A set of filters specifying to which array elements an update should apply
+	BypassDocumentValidation *bool         // If true, allows the write to opt-out of document level validation
+	Collation                *Collation    // Specifies a collation
+	Upsert                   *bool         // When true, creates a new document if no document matches the query
+}
+
+// Update returns a pointer to a new UpdateOptions
+func Update() *UpdateOptions {
+	return &UpdateOptions{}
+}
+
+// SetArrayFilters specifies a set of filters specifying to which array elements an update should apply
+// Valid for server versions >= 3.6.
+func (uo *UpdateOptions) SetArrayFilters(af ArrayFilters) *UpdateOptions {
+	uo.ArrayFilters = &af
+	return uo
+}
+
+// SetBypassDocumentValidation allows the write to opt-out of document level validation.
+// Valid for server versions >= 3.2. For servers < 3.2, this option is ignored.
+func (uo *UpdateOptions) SetBypassDocumentValidation(b bool) *UpdateOptions {
+	uo.BypassDocumentValidation = &b
+	return uo
+}
+
+// SetCollation specifies a collation.
+// Valid for server versions >= 3.4.
+func (uo *UpdateOptions) SetCollation(c *Collation) *UpdateOptions {
+	uo.Collation = c
+	return uo
+}
+
+// SetUpsert allows the creation of a new document if not document matches the query
+func (uo *UpdateOptions) SetUpsert(b bool) *UpdateOptions {
+	uo.Upsert = &b
+	return uo
+}
+
+// MergeUpdateOptions combines the argued UpdateOptions into a single UpdateOptions in a last-one-wins fashion
+func MergeUpdateOptions(opts ...*UpdateOptions) *UpdateOptions {
+	uOpts := Update()
+	for _, uo := range opts {
+		if uo == nil {
+			continue
+		}
+		if uo.ArrayFilters != nil {
+			uOpts.ArrayFilters = uo.ArrayFilters
+		}
+		if uo.BypassDocumentValidation != nil {
+			uOpts.BypassDocumentValidation = uo.BypassDocumentValidation
+		}
+		if uo.Collation != nil {
+			uOpts.Collation = uo.Collation
+		}
+		if uo.Upsert != nil {
+			uOpts.Upsert = uo.Upsert
+		}
+	}
+
+	return uOpts
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go
new file mode 100644
index 0000000..c185fff
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readconcern/readconcern.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readconcern
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ReadConcern for replica sets and replica set shards determines which data to return from a query.
+type ReadConcern struct {
+	level string
+}
+
+// Option is an option to provide when creating a ReadConcern.
+type Option func(concern *ReadConcern)
+
+// Level creates an option that sets the level of a ReadConcern.
+func Level(level string) Option {
+	return func(concern *ReadConcern) {
+		concern.level = level
+	}
+}
+
+// Local specifies that the query should return the instance’s most recent data.
+func Local() *ReadConcern {
+	return New(Level("local"))
+}
+
+// Majority specifies that the query should return the instance’s most recent data acknowledged as
+// having been written to a majority of members in the replica set.
+func Majority() *ReadConcern {
+	return New(Level("majority"))
+}
+
+// Linearizable specifies that the query should return data that reflects all successful writes
+// issued with a write concern of "majority" and acknowledged prior to the start of the read operation.
+func Linearizable() *ReadConcern {
+	return New(Level("linearizable"))
+}
+
+// Available specifies that the query should return data from the instance with no guarantee
+// that the data has been written to a majority of the replica set members (i.e. may be rolled back).
+func Available() *ReadConcern {
+	return New(Level("available"))
+}
+
+// Snapshot is only available for operations within multi-document transactions.
+func Snapshot() *ReadConcern {
+	return New(Level("snapshot"))
+}
+
+// New constructs a new read concern from the given string.
+func New(options ...Option) *ReadConcern {
+	concern := &ReadConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+func (rc *ReadConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	var elems []byte
+
+	if len(rc.level) > 0 {
+		elems = bsoncore.AppendStringElement(elems, "level", rc.level)
+	}
+
+	return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go
new file mode 100644
index 0000000..e7030c6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/mode.go
@@ -0,0 +1,56 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"fmt"
+	"strings"
+)
+
+// Mode indicates the user's preference on reads.
+type Mode uint8
+
+// Mode constants
+const (
+	_ Mode = iota
+	// PrimaryMode indicates that only a primary is
+	// considered for reading. This is the default
+	// mode.
+	PrimaryMode
+	// PrimaryPreferredMode indicates that if a primary
+	// is available, use it; otherwise, eligible
+	// secondaries will be considered.
+	PrimaryPreferredMode
+	// SecondaryMode indicates that only secondaries
+	// should be considered.
+	SecondaryMode
+	// SecondaryPreferredMode indicates that only secondaries
+	// should be considered when one is available. If none
+	// are available, then a primary will be considered.
+	SecondaryPreferredMode
+	// NearestMode indicates that all primaries and secondaries
+	// will be considered.
+	NearestMode
+)
+
+// ModeFromString returns a mode corresponding to
+// mode.
+func ModeFromString(mode string) (Mode, error) {
+	switch strings.ToLower(mode) {
+	case "primary":
+		return PrimaryMode, nil
+	case "primarypreferred":
+		return PrimaryPreferredMode, nil
+	case "secondary":
+		return SecondaryMode, nil
+	case "secondarypreferred":
+		return SecondaryPreferredMode, nil
+	case "nearest":
+		return NearestMode, nil
+	}
+	return Mode(0), fmt.Errorf("unknown read preference %v", mode)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go
new file mode 100644
index 0000000..a81cf3e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/options.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+// ErrInvalidTagSet indicates that an invalid set of tags was specified.
+var ErrInvalidTagSet = errors.New("an even number of tags must be specified")
+
+// Option configures a read preference
+type Option func(*ReadPref) error
+
+// WithMaxStaleness sets the maximum staleness a
+// server is allowed.
+func WithMaxStaleness(ms time.Duration) Option {
+	return func(rp *ReadPref) error {
+		rp.maxStaleness = ms
+		rp.maxStalenessSet = true
+		return nil
+	}
+}
+
+// WithTags sets a single tag set used to match
+// a server. The last call to WithTags or WithTagSets
+// overrides all previous calls to either method.
+func WithTags(tags ...string) Option {
+	return func(rp *ReadPref) error {
+		length := len(tags)
+		if length < 2 || length%2 != 0 {
+			return ErrInvalidTagSet
+		}
+
+		tagset := make(tag.Set, 0, length/2)
+
+		for i := 1; i < length; i += 2 {
+			tagset = append(tagset, tag.Tag{Name: tags[i-1], Value: tags[i]})
+		}
+
+		return WithTagSets(tagset)(rp)
+	}
+}
+
+// WithTagSets sets the tag sets used to match
+// a server. The last call to WithTags or WithTagSets
+// overrides all previous calls to either method.
+func WithTagSets(tagSets ...tag.Set) Option {
+	return func(rp *ReadPref) error {
+		rp.tagSets = tagSets
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go
new file mode 100644
index 0000000..0d624ff
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/readpref/readpref.go
@@ -0,0 +1,99 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package readpref
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+var (
+	errInvalidReadPreference = errors.New("can not specify tags or max staleness on primary")
+)
+
+var primary = ReadPref{mode: PrimaryMode}
+
+// Primary constructs a read preference with a PrimaryMode.
+func Primary() *ReadPref {
+	return &primary
+}
+
+// PrimaryPreferred constructs a read preference with a PrimaryPreferredMode.
+func PrimaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(PrimaryPreferredMode, opts...)
+	return rp
+}
+
+// SecondaryPreferred constructs a read preference with a SecondaryPreferredMode.
+func SecondaryPreferred(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryPreferredMode, opts...)
+	return rp
+}
+
+// Secondary constructs a read preference with a SecondaryMode.
+func Secondary(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(SecondaryMode, opts...)
+	return rp
+}
+
+// Nearest constructs a read preference with a NearestMode.
+func Nearest(opts ...Option) *ReadPref {
+	// New only returns an error with a mode of Primary
+	rp, _ := New(NearestMode, opts...)
+	return rp
+}
+
+// New creates a new ReadPref.
+func New(mode Mode, opts ...Option) (*ReadPref, error) {
+	rp := &ReadPref{
+		mode: mode,
+	}
+
+	if mode == PrimaryMode && len(opts) != 0 {
+		return nil, errInvalidReadPreference
+	}
+
+	for _, opt := range opts {
+		err := opt(rp)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return rp, nil
+}
+
+// ReadPref determines which servers are considered suitable for read operations.
+type ReadPref struct {
+	maxStaleness    time.Duration
+	maxStalenessSet bool
+	mode            Mode
+	tagSets         []tag.Set
+}
+
+// MaxStaleness is the maximum amount of time to allow
+// a server to be considered eligible for selection. The
+// second return value indicates if this value has been set.
+func (r *ReadPref) MaxStaleness() (time.Duration, bool) {
+	return r.maxStaleness, r.maxStalenessSet
+}
+
+// Mode indicates the mode of the read preference.
+func (r *ReadPref) Mode() Mode {
+	return r.mode
+}
+
+// TagSets are multiple tag sets indicating
+// which servers should be considered.
+func (r *ReadPref) TagSets() []tag.Set {
+	return r.tagSets
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go
new file mode 100644
index 0000000..b4bcd02
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/results.go
@@ -0,0 +1,139 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// BulkWriteResult holds the result of a bulk write operation.
+type BulkWriteResult struct {
+	InsertedCount int64
+	MatchedCount  int64
+	ModifiedCount int64
+	DeletedCount  int64
+	UpsertedCount int64
+	UpsertedIDs   map[int64]interface{}
+}
+
+// InsertOneResult is a result of an InsertOne operation.
+//
+// InsertedID will be a Go type that corresponds to a BSON type.
+type InsertOneResult struct {
+	// The identifier that was inserted.
+	InsertedID interface{}
+}
+
+// InsertManyResult is a result of an InsertMany operation.
+type InsertManyResult struct {
+	// Maps the indexes of inserted documents to their _id fields.
+	InsertedIDs []interface{}
+}
+
+// DeleteResult is a result of an DeleteOne operation.
+type DeleteResult struct {
+	// The number of documents that were deleted.
+	DeletedCount int64 `bson:"n"`
+}
+
+// ListDatabasesResult is a result of a ListDatabases operation. Each specification
+// is a description of the datbases on the server.
+type ListDatabasesResult struct {
+	Databases []DatabaseSpecification
+	TotalSize int64
+}
+
+func (ldr ListDatabasesResult) fromResult(res result.ListDatabases) ListDatabasesResult {
+	ldr.Databases = make([]DatabaseSpecification, 0, len(res.Databases))
+	for _, spec := range res.Databases {
+		ldr.Databases = append(
+			ldr.Databases,
+			DatabaseSpecification{Name: spec.Name, SizeOnDisk: spec.SizeOnDisk, Empty: spec.Empty},
+		)
+	}
+	ldr.TotalSize = res.TotalSize
+	return ldr
+}
+
+// DatabaseSpecification is the information for a single database returned
+// from a ListDatabases operation.
+type DatabaseSpecification struct {
+	Name       string
+	SizeOnDisk int64
+	Empty      bool
+}
+
+// UpdateResult is a result of an update operation.
+//
+// UpsertedID will be a Go type that corresponds to a BSON type.
+type UpdateResult struct {
+	// The number of documents that matched the filter.
+	MatchedCount int64
+	// The number of documents that were modified.
+	ModifiedCount int64
+	// The number of documents that were upserted.
+	UpsertedCount int64
+	// The identifier of the inserted document if an upsert took place.
+	UpsertedID interface{}
+}
+
+// UnmarshalBSON implements the bson.Unmarshaler interface.
+func (result *UpdateResult) UnmarshalBSON(b []byte) error {
+	elems, err := bson.Raw(b).Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "n":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.MatchedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.MatchedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for n, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "nModified":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				result.ModifiedCount = int64(elem.Value().Int32())
+			case bson.TypeInt64:
+				result.ModifiedCount = elem.Value().Int64()
+			default:
+				return fmt.Errorf("Received invalid type for nModified, should be Int32 or Int64, received %s", elem.Value().Type)
+			}
+		case "upserted":
+			switch elem.Value().Type {
+			case bson.TypeArray:
+				e, err := elem.Value().Array().IndexErr(0)
+				if err != nil {
+					break
+				}
+				if e.Value().Type != bson.TypeEmbeddedDocument {
+					break
+				}
+				var d struct {
+					ID interface{} `bson:"_id"`
+				}
+				err = bson.Unmarshal(e.Value().Document(), &d)
+				if err != nil {
+					return err
+				}
+				result.UpsertedID = d.ID
+			default:
+				return fmt.Errorf("Received invalid type for upserted, should be Array, received %s", elem.Value().Type)
+			}
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go
new file mode 100644
index 0000000..381714d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/session.go
@@ -0,0 +1,181 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrWrongClient is returned when a user attempts to pass in a session created by a different client than
+// the method call is using.
+var ErrWrongClient = errors.New("session was not created by this client")
+
+// SessionContext is a hybrid interface. It combines a context.Context with
+// a mongo.Session. This type can be used as a regular context.Context or
+// Session type. It is not goroutine safe and should not be used in multiple goroutines concurrently.
+type SessionContext interface {
+	context.Context
+	Session
+}
+
+type sessionContext struct {
+	context.Context
+	Session
+}
+
+type sessionKey struct {
+}
+
+// Session is the interface that represents a sequential set of operations executed.
+// Instances of this interface can be used to use transactions against the server
+// and to enable causally consistent behavior for applications.
+type Session interface {
+	EndSession(context.Context)
+	StartTransaction(...*options.TransactionOptions) error
+	AbortTransaction(context.Context) error
+	CommitTransaction(context.Context) error
+	ClusterTime() bson.Raw
+	AdvanceClusterTime(bson.Raw) error
+	OperationTime() *primitive.Timestamp
+	AdvanceOperationTime(*primitive.Timestamp) error
+	session()
+}
+
+// sessionImpl represents a set of sequential operations executed by an application that are related in some way.
+type sessionImpl struct {
+	*session.Client
+	topo                *topology.Topology
+	didCommitAfterStart bool // true if commit was called after start with no other operations
+}
+
+// EndSession ends the session.
+func (s *sessionImpl) EndSession(ctx context.Context) {
+	if s.TransactionInProgress() {
+		// ignore all errors aborting during an end session
+		_ = s.AbortTransaction(ctx)
+	}
+	s.Client.EndSession()
+}
+
+// StartTransaction starts a transaction for this session.
+func (s *sessionImpl) StartTransaction(opts ...*options.TransactionOptions) error {
+	err := s.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	s.didCommitAfterStart = false
+
+	topts := options.MergeTransactionOptions(opts...)
+	coreOpts := &session.TransactionOptions{
+		ReadConcern:    topts.ReadConcern,
+		ReadPreference: topts.ReadPreference,
+		WriteConcern:   topts.WriteConcern,
+	}
+
+	return s.Client.StartTransaction(coreOpts)
+}
+
+// AbortTransaction aborts the session's transaction, returning any errors and error codes
+func (s *sessionImpl) AbortTransaction(ctx context.Context) error {
+	err := s.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+
+	cmd := command.AbortTransaction{
+		Session: s.Client,
+	}
+
+	s.Aborting = true
+	_, err = driver.AbortTransaction(ctx, cmd, s.topo, description.WriteSelector())
+
+	_ = s.Client.AbortTransaction()
+	return err
+}
+
+// CommitTransaction commits the sesson's transaction.
+func (s *sessionImpl) CommitTransaction(ctx context.Context) error {
+	err := s.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+
+	// Do not run the commit command if the transaction is in started state
+	if s.TransactionStarting() || s.didCommitAfterStart {
+		s.didCommitAfterStart = true
+		return s.Client.CommitTransaction()
+	}
+
+	if s.Client.TransactionCommitted() {
+		s.RetryingCommit = true
+	}
+
+	cmd := command.CommitTransaction{
+		Session: s.Client,
+	}
+
+	// Hack to ensure that session stays in committed state
+	if s.TransactionCommitted() {
+		s.Committing = true
+		defer func() {
+			s.Committing = false
+		}()
+	}
+	_, err = driver.CommitTransaction(ctx, cmd, s.topo, description.WriteSelector())
+	if err == nil {
+		return s.Client.CommitTransaction()
+	}
+	return err
+}
+
+func (s *sessionImpl) ClusterTime() bson.Raw {
+	return s.Client.ClusterTime
+}
+
+func (s *sessionImpl) AdvanceClusterTime(d bson.Raw) error {
+	return s.Client.AdvanceClusterTime(d)
+}
+
+func (s *sessionImpl) OperationTime() *primitive.Timestamp {
+	return s.Client.OperationTime
+}
+
+func (s *sessionImpl) AdvanceOperationTime(ts *primitive.Timestamp) error {
+	return s.Client.AdvanceOperationTime(ts)
+}
+
+func (*sessionImpl) session() {
+}
+
+// sessionFromContext checks for a sessionImpl in the argued context and returns the session if it
+// exists
+func sessionFromContext(ctx context.Context) *session.Client {
+	s := ctx.Value(sessionKey{})
+	if ses, ok := s.(*sessionImpl); ses != nil && ok {
+		return ses.Client
+	}
+
+	return nil
+}
+
+func contextWithSession(ctx context.Context, sess Session) SessionContext {
+	return &sessionContext{
+		Context: context.WithValue(ctx, sessionKey{}, sess),
+		Session: sess,
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go
new file mode 100644
index 0000000..9a929db
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/single_result.go
@@ -0,0 +1,93 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+)
+
+// ErrNoDocuments is returned by Decode when an operation that returns a
+// SingleResult doesn't return any documents.
+var ErrNoDocuments = errors.New("mongo: no documents in result")
+
+// SingleResult represents a single document returned from an operation. If
+// the operation returned an error, the Err method of SingleResult will
+// return that error.
+type SingleResult struct {
+	err error
+	cur *Cursor
+	rdr bson.Raw
+	reg *bsoncodec.Registry
+}
+
+// Decode will attempt to decode the first document into v. If there was an
+// error from the operation that created this SingleResult then the error
+// will be returned. If there were no returned documents, ErrNoDocuments is
+// returned.
+func (sr *SingleResult) Decode(v interface{}) error {
+	if sr.err != nil {
+		return sr.err
+	}
+	if sr.reg == nil {
+		return bson.ErrNilRegistry
+	}
+	switch {
+	case sr.rdr != nil:
+		if v == nil {
+			return nil
+		}
+		return bson.UnmarshalWithRegistry(sr.reg, sr.rdr, v)
+	case sr.cur != nil:
+		defer sr.cur.Close(context.TODO())
+		if !sr.cur.Next(context.TODO()) {
+			if err := sr.cur.Err(); err != nil {
+				return err
+			}
+			return ErrNoDocuments
+		}
+		if v == nil {
+			return nil
+		}
+		return sr.cur.Decode(v)
+	}
+
+	return ErrNoDocuments
+}
+
+// DecodeBytes will return a copy of the document as a bson.Raw. If there was an
+// error from the operation that created this SingleResult then the error
+// will be returned. If there were no returned documents, ErrNoDocuments is
+// returned.
+func (sr *SingleResult) DecodeBytes() (bson.Raw, error) {
+	switch {
+	case sr.err != nil:
+		return nil, sr.err
+	case sr.rdr != nil:
+		return sr.rdr, nil
+	case sr.cur != nil:
+		defer sr.cur.Close(context.TODO())
+		if !sr.cur.Next(context.TODO()) {
+			if err := sr.cur.Err(); err != nil {
+				return nil, err
+			}
+			return nil, ErrNoDocuments
+		}
+		return sr.cur.Current, nil
+	}
+
+	return nil, ErrNoDocuments
+}
+
+// Err will return the error from the operation that created this SingleResult.
+// If there was no error, nil is returned.
+func (sr *SingleResult) Err() error {
+	return sr.err
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go
new file mode 100644
index 0000000..270fa24
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/util.go
@@ -0,0 +1,7 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package mongo
diff --git a/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go b/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go
new file mode 100644
index 0000000..234ba19
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/mongo/writeconcern/writeconcern.go
@@ -0,0 +1,186 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package writeconcern
+
+import (
+	"errors"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrInconsistent indicates that an inconsistent write concern was specified.
+var ErrInconsistent = errors.New("a write concern cannot have both w=0 and j=true")
+
+// ErrEmptyWriteConcern indicates that a write concern has no fields set.
+var ErrEmptyWriteConcern = errors.New("a write concern must have at least one field set")
+
+// ErrNegativeW indicates that a negative integer `w` field was specified.
+var ErrNegativeW = errors.New("write concern `w` field cannot be a negative number")
+
+// ErrNegativeWTimeout indicates that a negative WTimeout was specified.
+var ErrNegativeWTimeout = errors.New("write concern `wtimeout` field cannot be negative")
+
+// WriteConcern describes the level of acknowledgement requested from MongoDB for write operations
+// to a standalone mongod or to replica sets or to sharded clusters.
+type WriteConcern struct {
+	w        interface{}
+	j        bool
+	wTimeout time.Duration
+}
+
+// Option is an option to provide when creating a ReadConcern.
+type Option func(concern *WriteConcern)
+
+// New constructs a new WriteConcern.
+func New(options ...Option) *WriteConcern {
+	concern := &WriteConcern{}
+
+	for _, option := range options {
+		option(concern)
+	}
+
+	return concern
+}
+
+// W requests acknowledgement that write operations propagate to the specified number of mongod
+// instances.
+func W(w int) Option {
+	return func(concern *WriteConcern) {
+		concern.w = w
+	}
+}
+
+// WMajority requests acknowledgement that write operations propagate to the majority of mongod
+// instances.
+func WMajority() Option {
+	return func(concern *WriteConcern) {
+		concern.w = "majority"
+	}
+}
+
+// WTagSet requests acknowledgement that write operations propagate to the specified mongod
+// instance.
+func WTagSet(tag string) Option {
+	return func(concern *WriteConcern) {
+		concern.w = tag
+	}
+}
+
+// J requests acknowledgement from MongoDB that write operations are written to
+// the journal.
+func J(j bool) Option {
+	return func(concern *WriteConcern) {
+		concern.j = j
+	}
+}
+
+// WTimeout specifies specifies a time limit for the write concern.
+func WTimeout(d time.Duration) Option {
+	return func(concern *WriteConcern) {
+		concern.wTimeout = d
+	}
+}
+
+// MarshalBSONValue implements the bson.ValueMarshaler interface.
+func (wc *WriteConcern) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if !wc.IsValid() {
+		return bsontype.Type(0), nil, ErrInconsistent
+	}
+
+	var elems []byte
+
+	if wc.w != nil {
+		switch t := wc.w.(type) {
+		case int:
+			if t < 0 {
+				return bsontype.Type(0), nil, ErrNegativeW
+			}
+
+			elems = bsoncore.AppendInt32Element(elems, "w", int32(t))
+		case string:
+			elems = bsoncore.AppendStringElement(elems, "w", string(t))
+		}
+	}
+
+	if wc.j {
+		elems = bsoncore.AppendBooleanElement(elems, "j", wc.j)
+	}
+
+	if wc.wTimeout < 0 {
+		return bsontype.Type(0), nil, ErrNegativeWTimeout
+	}
+
+	if wc.wTimeout != 0 {
+		elems = bsoncore.AppendInt64Element(elems, "wtimeout", int64(wc.wTimeout/time.Millisecond))
+	}
+
+	if len(elems) == 0 {
+		return bsontype.Type(0), nil, ErrEmptyWriteConcern
+	}
+	return bsontype.EmbeddedDocument, bsoncore.BuildDocument(nil, elems), nil
+}
+
+// AcknowledgedValue returns true if a BSON RawValue for a write concern represents an acknowledged write concern.
+// The element's value must be a document representing a write concern.
+func AcknowledgedValue(rawv bson.RawValue) bool {
+	doc, ok := bsoncore.Value{Type: rawv.Type, Data: rawv.Value}.DocumentOK()
+	if !ok {
+		return false
+	}
+
+	val, err := doc.LookupErr("w")
+	if err != nil {
+		// key w not found --> acknowledged
+		return true
+	}
+
+	i32, ok := val.Int32OK()
+	if !ok {
+		return false
+	}
+	return i32 != 0
+}
+
+// Acknowledged indicates whether or not a write with the given write concern will be acknowledged.
+func (wc *WriteConcern) Acknowledged() bool {
+	if wc == nil || wc.j {
+		return true
+	}
+
+	switch v := wc.w.(type) {
+	case int:
+		if v == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+// IsValid checks whether the write concern is invalid.
+func (wc *WriteConcern) IsValid() bool {
+	if !wc.j {
+		return true
+	}
+
+	switch v := wc.w.(type) {
+	case int:
+		if v == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+// AckWrite returns true if a write concern represents an acknowledged write
+func AckWrite(wc *WriteConcern) bool {
+	return wc == nil || wc.Acknowledged()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go b/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go
new file mode 100644
index 0000000..c10bd3e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/tag/tag.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package tag
+
+// Tag is a name/vlaue pair.
+type Tag struct {
+	Name  string
+	Value string
+}
+
+// NewTagSetFromMap creates a new tag set from a map.
+func NewTagSetFromMap(m map[string]string) Set {
+	var set Set
+	for k, v := range m {
+		set = append(set, Tag{Name: k, Value: v})
+	}
+
+	return set
+}
+
+// NewTagSetsFromMaps creates new tag sets from maps.
+func NewTagSetsFromMaps(maps []map[string]string) []Set {
+	sets := make([]Set, 0, len(maps))
+	for _, m := range maps {
+		sets = append(sets, NewTagSetFromMap(m))
+	}
+	return sets
+}
+
+// Set is an ordered list of Tags.
+type Set []Tag
+
+// Contains indicates whether the name/value pair exists in the tagset.
+func (ts Set) Contains(name, value string) bool {
+	for _, t := range ts {
+		if t.Name == name && t.Value == value {
+			return true
+		}
+	}
+
+	return false
+}
+
+// ContainsAll indicates whether all the name/value pairs exist in the tagset.
+func (ts Set) ContainsAll(other []Tag) bool {
+	for _, ot := range other {
+		if !ts.Contains(ot.Name, ot.Value) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/version/version.go b/vendor/github.com/mongodb/mongo-go-driver/version/version.go
new file mode 100644
index 0000000..1aee619
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/version/version.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package version
+
+// Driver is the current version of the driver.
+var Driver = "v0.3.0"
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go
new file mode 100644
index 0000000..8001d70
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/array.go
@@ -0,0 +1,97 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"strconv"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilArray indicates that an operation was attempted on a nil *Array.
+var ErrNilArray = errors.New("array is nil")
+
+// Arr represents an array in BSON.
+type Arr []Val
+
+// String implements the fmt.Stringer interface.
+func (a Arr) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Array["))
+	for idx, val := range a {
+		if idx > 0 {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%s", val)
+	}
+	buf.WriteByte(']')
+
+	return buf.String()
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+func (a Arr) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if a == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+
+	idx, dst := bsoncore.ReserveLength(nil)
+	for idx, value := range a {
+		t, data, _ := value.MarshalBSONValue() // marshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, strconv.Itoa(idx)...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return bsontype.Array, dst, nil
+}
+
+// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface.
+func (a *Arr) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if a == nil {
+		return ErrNilArray
+	}
+	*a = (*a)[:0]
+
+	elements, err := bsoncore.Document(data).Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elements {
+		var val Val
+		rawval := elem.Value()
+		err = val.UnmarshalBSONValue(rawval.Type, rawval.Data)
+		if err != nil {
+			return err
+		}
+		*a = append(*a, val)
+	}
+	return nil
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (a Arr) Equal(a2 Arr) bool {
+	if len(a) != len(a2) {
+		return false
+	}
+	for idx := range a {
+		if !a[idx].Equal(a2[idx]) {
+			return false
+		}
+	}
+	return true
+}
+
+func (Arr) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go
new file mode 100644
index 0000000..8d2946a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/bsoncore.go
@@ -0,0 +1,791 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package bsoncore contains functions that can be used to encode and decode BSON
+// elements and values to or from a slice of bytes. These functions are aimed at
+// allowing low level manipulation of BSON and can be used to build a higher
+// level BSON library.
+//
+// The Read* functions within this package return the values of the element and
+// a boolean indicating if the values are valid. A boolean was used instead of
+// an error because any error that would be returned would be the same: not
+// enough bytes. This library attempts to do no validation, it will only return
+// false if there are not enough bytes for an item to be read. For example, the
+// ReadDocument function checks the length, if that length is larger than the
+// number of bytes availble, it will return false, if there are enough bytes, it
+// will return those bytes and true. It is the consumers responsibility to
+// validate those bytes.
+//
+// The Append* functions within this package will append the type value to the
+// given dst slice. If the slice has enough capacity, it will not grow the
+// slice. The Append*Element functions within this package operate in the same
+// way, but additionally append the BSON type and the key before the value.
+package bsoncore
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// AppendType will append t to dst and return the extended buffer.
+func AppendType(dst []byte, t bsontype.Type) []byte { return append(dst, byte(t)) }
+
+// AppendKey will append key to dst and return the extended buffer.
+func AppendKey(dst []byte, key string) []byte { return append(dst, key+string(0x00)...) }
+
+// AppendHeader will append Type t and key to dst and return the extended
+// buffer.
+func AppendHeader(dst []byte, t bsontype.Type, key string) []byte {
+	dst = AppendType(dst, t)
+	dst = append(dst, key...)
+	return append(dst, 0x00)
+	// return append(AppendType(dst, t), key+string(0x00)...)
+}
+
+// TODO(skriptble): All of the Read* functions should return src resliced to start just after what
+// was read.
+
+// ReadType will return the first byte of the provided []byte as a type. If
+// there is no availble byte, false is returned.
+func ReadType(src []byte) (bsontype.Type, []byte, bool) {
+	if len(src) < 1 {
+		return 0, src, false
+	}
+	return bsontype.Type(src[0]), src[1:], true
+}
+
+// ReadKey will read a key from src. The 0x00 byte will not be present
+// in the returned string. If there are not enough bytes available, false is
+// returned.
+func ReadKey(src []byte) (string, []byte, bool) { return readcstring(src) }
+
+// ReadKeyBytes will read a key from src as bytes. The 0x00 byte will
+// not be present in the returned string. If there are not enough bytes
+// available, false is returned.
+func ReadKeyBytes(src []byte) ([]byte, []byte, bool) { return readcstringbytes(src) }
+
+// ReadHeader will read a type byte and a key from src. If both of these
+// values cannot be read, false is returned.
+func ReadHeader(src []byte) (t bsontype.Type, key string, rem []byte, ok bool) {
+	t, rem, ok = ReadType(src)
+	if !ok {
+		return 0, "", src, false
+	}
+	key, rem, ok = ReadKey(rem)
+	if !ok {
+		return 0, "", src, false
+	}
+
+	return t, key, rem, true
+}
+
+// ReadHeaderBytes will read a type and a key from src and the remainder of the bytes
+// are returned as rem. If either the type or key cannot be red, ok will be false.
+func ReadHeaderBytes(src []byte) (header []byte, rem []byte, ok bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+// ReadElement reads the next full element from src. It returns the element, the remaining bytes in
+// the slice, and a boolean indicating if the read was successful.
+func ReadElement(src []byte) (Element, []byte, bool) {
+	if len(src) < 1 {
+		return nil, src, false
+	}
+	t := bsontype.Type(src[0])
+	idx := bytes.IndexByte(src[1:], 0x00)
+	if idx == -1 {
+		return nil, src, false
+	}
+	length, ok := valueLength(src[idx+2:], t) // We add 2 here because we called IndexByte with src[1:]
+	if !ok {
+		return nil, src, false
+	}
+	elemLength := 1 + idx + 1 + int(length)
+	if elemLength > len(src) {
+		return nil, src, false
+	}
+	return src[:elemLength], src[elemLength:], true
+}
+
+// ReadValue reads the next value as the provided types and returns a Value, the remaining bytes,
+// and a boolean indicating if the read was successful.
+func ReadValue(src []byte, t bsontype.Type) (Value, []byte, bool) {
+	data, rem, ok := readValue(src, t)
+	if !ok {
+		return Value{}, src, false
+	}
+	return Value{Type: t, Data: data}, rem, true
+}
+
+// AppendDouble will append f to dst and return the extended buffer.
+func AppendDouble(dst []byte, f float64) []byte {
+	return appendu64(dst, math.Float64bits(f))
+}
+
+// AppendDoubleElement will append a BSON double element using key and f to dst
+// and return the extended buffer.
+func AppendDoubleElement(dst []byte, key string, f float64) []byte {
+	return AppendDouble(AppendHeader(dst, bsontype.Double, key), f)
+}
+
+// ReadDouble will read a float64 from src. If there are not enough bytes it
+// will return false.
+func ReadDouble(src []byte) (float64, []byte, bool) {
+	bits, src, ok := readu64(src)
+	if !ok {
+		return 0, src, false
+	}
+	return math.Float64frombits(bits), src, true
+}
+
+// AppendString will append s to dst and return the extended buffer.
+func AppendString(dst []byte, s string) []byte {
+	return appendstring(dst, s)
+}
+
+// AppendStringElement will append a BSON string element using key and val to dst
+// and return the extended buffer.
+func AppendStringElement(dst []byte, key, val string) []byte {
+	return AppendString(AppendHeader(dst, bsontype.String, key), val)
+}
+
+// ReadString will read a string from src. If there are not enough bytes it
+// will return false.
+func ReadString(src []byte) (string, []byte, bool) {
+	return readstring(src)
+}
+
+// AppendDocumentStart reserves a document's length and returns the index where the length begins.
+// This index can later be used to write the length of the document.
+//
+// TODO(skriptble): We really need AppendDocumentStart and AppendDocumentEnd.
+// AppendDocumentStart would handle calling ReserveLength and providing the index of the start of
+// the document. AppendDocumentEnd would handle taking that start index, adding the null byte,
+// calculating the length, and filling in the length at the start of the document.
+func AppendDocumentStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
+
+// AppendDocumentStartInline functions the same as AppendDocumentStart but takes a pointer to the
+// index int32 which allows this function to be used inline.
+func AppendDocumentStartInline(dst []byte, index *int32) []byte {
+	idx, doc := AppendDocumentStart(dst)
+	*index = idx
+	return doc
+}
+
+// AppendDocumentElementStart writes a document element header and then reserves the length bytes.
+func AppendDocumentElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendDocumentStart(AppendHeader(dst, bsontype.EmbeddedDocument, key))
+}
+
+// AppendDocumentEnd writes the null byte for a document and updates the length of the document.
+// The index should be the beginning of the document's length bytes.
+func AppendDocumentEnd(dst []byte, index int32) ([]byte, error) {
+	if int(index) > len(dst)-4 {
+		return dst, fmt.Errorf("not enough bytes available after index to write length")
+	}
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, index, int32(len(dst[index:])))
+	return dst, nil
+}
+
+// AppendDocument will append doc to dst and return the extended buffer.
+func AppendDocument(dst []byte, doc []byte) []byte { return append(dst, doc...) }
+
+// AppendDocumentElement will append a BSON embeded document element using key
+// and doc to dst and return the extended buffer.
+func AppendDocumentElement(dst []byte, key string, doc []byte) []byte {
+	return AppendDocument(AppendHeader(dst, bsontype.EmbeddedDocument, key), doc)
+}
+
+// BuildDocument will create a document with the given elements and will append it to dst.
+func BuildDocument(dst []byte, elems []byte) []byte {
+	idx, dst := ReserveLength(dst)
+	dst = append(dst, elems...)
+	dst = append(dst, 0x00)
+	dst = UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst
+}
+
+// ReadDocument will read a document from src. If there are not enough bytes it
+// will return false.
+func ReadDocument(src []byte) (doc Document, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendArrayStart appends the length bytes to an array and then returns the index of the start
+// of those length bytes.
+func AppendArrayStart(dst []byte) (index int32, b []byte) { return ReserveLength(dst) }
+
+// AppendArrayElementStart appends an array element header and then the length bytes for an array,
+// returning the index where the length starts.
+func AppendArrayElementStart(dst []byte, key string) (index int32, b []byte) {
+	return AppendArrayStart(AppendHeader(dst, bsontype.Array, key))
+}
+
+// AppendArrayEnd appends the null byte to an array and calculates the length, inserting that
+// calculated length starting at index.
+func AppendArrayEnd(dst []byte, index int32) ([]byte, error) { return AppendDocumentEnd(dst, index) }
+
+// AppendArray will append arr to dst and return the extended buffer.
+func AppendArray(dst []byte, arr []byte) []byte { return append(dst, arr...) }
+
+// AppendArrayElement will append a BSON array element using key and arr to dst
+// and return the extended buffer.
+func AppendArrayElement(dst []byte, key string, arr []byte) []byte {
+	return AppendArray(AppendHeader(dst, bsontype.Array, key), arr)
+}
+
+// ReadArray will read an array from src. If there are not enough bytes it
+// will return false.
+func ReadArray(src []byte) (arr Document, rem []byte, ok bool) { return readLengthBytes(src) }
+
+// AppendBinary will append subtype and b to dst and return the extended buffer.
+func AppendBinary(dst []byte, subtype byte, b []byte) []byte {
+	if subtype == 0x02 {
+		return appendBinarySubtype2(dst, subtype, b)
+	}
+	dst = append(appendLength(dst, int32(len(b))), subtype)
+	return append(dst, b...)
+}
+
+// AppendBinaryElement will append a BSON binary element using key, subtype, and
+// b to dst and return the extended buffer.
+func AppendBinaryElement(dst []byte, key string, subtype byte, b []byte) []byte {
+	return AppendBinary(AppendHeader(dst, bsontype.Binary, key), subtype, b)
+}
+
+// ReadBinary will read a subtype and bin from src. If there are not enough bytes it
+// will return false.
+func ReadBinary(src []byte) (subtype byte, bin []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok {
+		return 0x00, nil, src, false
+	}
+	if len(rem) < 1 { // subtype
+		return 0x00, nil, src, false
+	}
+	subtype, rem = rem[0], rem[1:]
+
+	if len(rem) < int(length) {
+		return 0x00, nil, src, false
+	}
+
+	if subtype == 0x02 {
+		length, rem, ok = ReadLength(rem)
+		if !ok || len(rem) < int(length) {
+			return 0x00, nil, src, false
+		}
+	}
+
+	return subtype, rem[:length], rem[length:], true
+}
+
+// AppendUndefinedElement will append a BSON undefined element using key to dst
+// and return the extended buffer.
+func AppendUndefinedElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.Undefined, key)
+}
+
+// AppendObjectID will append oid to dst and return the extended buffer.
+func AppendObjectID(dst []byte, oid primitive.ObjectID) []byte { return append(dst, oid[:]...) }
+
+// AppendObjectIDElement will append a BSON ObjectID element using key and oid to dst
+// and return the extended buffer.
+func AppendObjectIDElement(dst []byte, key string, oid primitive.ObjectID) []byte {
+	return AppendObjectID(AppendHeader(dst, bsontype.ObjectID, key), oid)
+}
+
+// ReadObjectID will read an ObjectID from src. If there are not enough bytes it
+// will return false.
+func ReadObjectID(src []byte) (primitive.ObjectID, []byte, bool) {
+	if len(src) < 12 {
+		return primitive.ObjectID{}, src, false
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], src[0:12])
+	return oid, src[12:], true
+}
+
+// AppendBoolean will append b to dst and return the extended buffer.
+func AppendBoolean(dst []byte, b bool) []byte {
+	if b {
+		return append(dst, 0x01)
+	}
+	return append(dst, 0x00)
+}
+
+// AppendBooleanElement will append a BSON boolean element using key and b to dst
+// and return the extended buffer.
+func AppendBooleanElement(dst []byte, key string, b bool) []byte {
+	return AppendBoolean(AppendHeader(dst, bsontype.Boolean, key), b)
+}
+
+// ReadBoolean will read a bool from src. If there are not enough bytes it
+// will return false.
+func ReadBoolean(src []byte) (bool, []byte, bool) {
+	if len(src) < 1 {
+		return false, src, false
+	}
+
+	return src[0] == 0x01, src[1:], true
+}
+
+// AppendDateTime will append dt to dst and return the extended buffer.
+func AppendDateTime(dst []byte, dt int64) []byte { return appendi64(dst, dt) }
+
+// AppendDateTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendDateTimeElement(dst []byte, key string, dt int64) []byte {
+	return AppendDateTime(AppendHeader(dst, bsontype.DateTime, key), dt)
+}
+
+// ReadDateTime will read an int64 datetime from src. If there are not enough bytes it
+// will return false.
+func ReadDateTime(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendTime will append time as a BSON DateTime to dst and return the extended buffer.
+func AppendTime(dst []byte, t time.Time) []byte {
+	return AppendDateTime(dst, t.Unix()*1000+int64(t.Nanosecond()/1e6))
+}
+
+// AppendTimeElement will append a BSON datetime element using key and dt to dst
+// and return the extended buffer.
+func AppendTimeElement(dst []byte, key string, t time.Time) []byte {
+	return AppendTime(AppendHeader(dst, bsontype.DateTime, key), t)
+}
+
+// ReadTime will read an time.Time datetime from src. If there are not enough bytes it
+// will return false.
+func ReadTime(src []byte) (time.Time, []byte, bool) {
+	dt, rem, ok := readi64(src)
+	return time.Unix(dt/1e3, dt%1e3*1e6), rem, ok
+}
+
+// AppendNullElement will append a BSON null element using key to dst
+// and return the extended buffer.
+func AppendNullElement(dst []byte, key string) []byte { return AppendHeader(dst, bsontype.Null, key) }
+
+// AppendRegex will append pattern and options to dst and return the extended buffer.
+func AppendRegex(dst []byte, pattern, options string) []byte {
+	return append(dst, pattern+string(0x00)+options+string(0x00)...)
+}
+
+// AppendRegexElement will append a BSON regex element using key, pattern, and
+// options to dst and return the extended buffer.
+func AppendRegexElement(dst []byte, key, pattern, options string) []byte {
+	return AppendRegex(AppendHeader(dst, bsontype.Regex, key), pattern, options)
+}
+
+// ReadRegex will read a pattern and options from src. If there are not enough bytes it
+// will return false.
+func ReadRegex(src []byte) (pattern, options string, rem []byte, ok bool) {
+	pattern, rem, ok = readcstring(src)
+	if !ok {
+		return "", "", src, false
+	}
+	options, rem, ok = readcstring(rem)
+	if !ok {
+		return "", "", src, false
+	}
+	return pattern, options, rem, true
+}
+
+// AppendDBPointer will append ns and oid to dst and return the extended buffer.
+func AppendDBPointer(dst []byte, ns string, oid primitive.ObjectID) []byte {
+	return append(appendstring(dst, ns), oid[:]...)
+}
+
+// AppendDBPointerElement will append a BSON DBPointer element using key, ns,
+// and oid to dst and return the extended buffer.
+func AppendDBPointerElement(dst []byte, key, ns string, oid primitive.ObjectID) []byte {
+	return AppendDBPointer(AppendHeader(dst, bsontype.DBPointer, key), ns, oid)
+}
+
+// ReadDBPointer will read a ns and oid from src. If there are not enough bytes it
+// will return false.
+func ReadDBPointer(src []byte) (ns string, oid primitive.ObjectID, rem []byte, ok bool) {
+	ns, rem, ok = readstring(src)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	oid, rem, ok = ReadObjectID(rem)
+	if !ok {
+		return "", primitive.ObjectID{}, src, false
+	}
+	return ns, oid, rem, true
+}
+
+// AppendJavaScript will append js to dst and return the extended buffer.
+func AppendJavaScript(dst []byte, js string) []byte { return appendstring(dst, js) }
+
+// AppendJavaScriptElement will append a BSON JavaScript element using key and
+// js to dst and return the extended buffer.
+func AppendJavaScriptElement(dst []byte, key, js string) []byte {
+	return AppendJavaScript(AppendHeader(dst, bsontype.JavaScript, key), js)
+}
+
+// ReadJavaScript will read a js string from src. If there are not enough bytes it
+// will return false.
+func ReadJavaScript(src []byte) (js string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendSymbol will append symbol to dst and return the extended buffer.
+func AppendSymbol(dst []byte, symbol string) []byte { return appendstring(dst, symbol) }
+
+// AppendSymbolElement will append a BSON symbol element using key and symbol to dst
+// and return the extended buffer.
+func AppendSymbolElement(dst []byte, key, symbol string) []byte {
+	return AppendSymbol(AppendHeader(dst, bsontype.Symbol, key), symbol)
+}
+
+// ReadSymbol will read a symbol string from src. If there are not enough bytes it
+// will return false.
+func ReadSymbol(src []byte) (symbol string, rem []byte, ok bool) { return readstring(src) }
+
+// AppendCodeWithScope will append code and scope to dst and return the extended buffer.
+func AppendCodeWithScope(dst []byte, code string, scope []byte) []byte {
+	length := int32(4 + 4 + len(code) + 1 + len(scope)) // length of cws, length of code, code, 0x00, scope
+	dst = appendLength(dst, length)
+
+	return append(appendstring(dst, code), scope...)
+}
+
+// AppendCodeWithScopeElement will append a BSON code with scope element using
+// key, code, and scope to dst
+// and return the extended buffer.
+func AppendCodeWithScopeElement(dst []byte, key, code string, scope []byte) []byte {
+	return AppendCodeWithScope(AppendHeader(dst, bsontype.CodeWithScope, key), code, scope)
+}
+
+// ReadCodeWithScope will read code and scope from src. If there are not enough bytes it
+// will return false.
+func ReadCodeWithScope(src []byte) (code string, scope []byte, rem []byte, ok bool) {
+	length, rem, ok := ReadLength(src)
+	if !ok || len(src) < int(length) {
+		return "", nil, src, false
+	}
+
+	code, rem, ok = readstring(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+
+	scope, rem, ok = ReadDocument(rem)
+	if !ok {
+		return "", nil, src, false
+	}
+	return code, scope, rem, true
+}
+
+// AppendInt32 will append i32 to dst and return the extended buffer.
+func AppendInt32(dst []byte, i32 int32) []byte { return appendi32(dst, i32) }
+
+// AppendInt32Element will append a BSON int32 element using key and i32 to dst
+// and return the extended buffer.
+func AppendInt32Element(dst []byte, key string, i32 int32) []byte {
+	return AppendInt32(AppendHeader(dst, bsontype.Int32, key), i32)
+}
+
+// ReadInt32 will read an int32 from src. If there are not enough bytes it
+// will return false.
+func ReadInt32(src []byte) (int32, []byte, bool) { return readi32(src) }
+
+// AppendTimestamp will append t and i to dst and return the extended buffer.
+func AppendTimestamp(dst []byte, t, i uint32) []byte {
+	return appendu32(appendu32(dst, i), t) // i is the lower 4 bytes, t is the higher 4 bytes
+}
+
+// AppendTimestampElement will append a BSON timestamp element using key, t, and
+// i to dst and return the extended buffer.
+func AppendTimestampElement(dst []byte, key string, t, i uint32) []byte {
+	return AppendTimestamp(AppendHeader(dst, bsontype.Timestamp, key), t, i)
+}
+
+// ReadTimestamp will read t and i from src. If there are not enough bytes it
+// will return false.
+func ReadTimestamp(src []byte) (t, i uint32, rem []byte, ok bool) {
+	i, rem, ok = readu32(src)
+	if !ok {
+		return 0, 0, src, false
+	}
+	t, rem, ok = readu32(rem)
+	if !ok {
+		return 0, 0, src, false
+	}
+	return t, i, rem, true
+}
+
+// AppendInt64 will append i64 to dst and return the extended buffer.
+func AppendInt64(dst []byte, i64 int64) []byte { return appendi64(dst, i64) }
+
+// AppendInt64Element will append a BSON int64 element using key and i64 to dst
+// and return the extended buffer.
+func AppendInt64Element(dst []byte, key string, i64 int64) []byte {
+	return AppendInt64(AppendHeader(dst, bsontype.Int64, key), i64)
+}
+
+// ReadInt64 will read an int64 from src. If there are not enough bytes it
+// will return false.
+func ReadInt64(src []byte) (int64, []byte, bool) { return readi64(src) }
+
+// AppendDecimal128 will append d128 to dst and return the extended buffer.
+func AppendDecimal128(dst []byte, d128 primitive.Decimal128) []byte {
+	high, low := d128.GetBytes()
+	return appendu64(appendu64(dst, low), high)
+}
+
+// AppendDecimal128Element will append a BSON primitive.28 element using key and
+// d128 to dst and return the extended buffer.
+func AppendDecimal128Element(dst []byte, key string, d128 primitive.Decimal128) []byte {
+	return AppendDecimal128(AppendHeader(dst, bsontype.Decimal128, key), d128)
+}
+
+// ReadDecimal128 will read a primitive.Decimal128 from src. If there are not enough bytes it
+// will return false.
+func ReadDecimal128(src []byte) (primitive.Decimal128, []byte, bool) {
+	l, rem, ok := readu64(src)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	h, rem, ok := readu64(rem)
+	if !ok {
+		return primitive.Decimal128{}, src, false
+	}
+
+	return primitive.NewDecimal128(h, l), rem, true
+}
+
+// AppendMaxKeyElement will append a BSON max key element using key to dst
+// and return the extended buffer.
+func AppendMaxKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MaxKey, key)
+}
+
+// AppendMinKeyElement will append a BSON min key element using key to dst
+// and return the extended buffer.
+func AppendMinKeyElement(dst []byte, key string) []byte {
+	return AppendHeader(dst, bsontype.MinKey, key)
+}
+
+// EqualValue will return true if the two values are equal.
+func EqualValue(t1, t2 bsontype.Type, v1, v2 []byte) bool {
+	if t1 != t2 {
+		return false
+	}
+	v1, _, ok := readValue(v1, t1)
+	if !ok {
+		return false
+	}
+	v2, _, ok = readValue(v2, t2)
+	if !ok {
+		return false
+	}
+	return bytes.Equal(v1, v2)
+}
+
+// valueLength will determine the length of the next value contained in src as if it
+// is type t. The returned bool will be false if there are not enough bytes in src for
+// a value of type t.
+func valueLength(src []byte, t bsontype.Type) (int32, bool) {
+	var length int32
+	ok := true
+	switch t {
+	case bsontype.Array, bsontype.EmbeddedDocument, bsontype.CodeWithScope:
+		length, _, ok = ReadLength(src)
+	case bsontype.Binary:
+		length, _, ok = ReadLength(src)
+		length += 4 + 1 // binary length + subtype byte
+	case bsontype.Boolean:
+		length = 1
+	case bsontype.DBPointer:
+		length, _, ok = ReadLength(src)
+		length += 4 + 12 // string length + ObjectID length
+	case bsontype.DateTime, bsontype.Double, bsontype.Int64, bsontype.Timestamp:
+		length = 8
+	case bsontype.Decimal128:
+		length = 16
+	case bsontype.Int32:
+		length = 4
+	case bsontype.JavaScript, bsontype.String, bsontype.Symbol:
+		length, _, ok = ReadLength(src)
+		length += 4
+	case bsontype.MaxKey, bsontype.MinKey, bsontype.Null, bsontype.Undefined:
+		length = 0
+	case bsontype.ObjectID:
+		length = 12
+	case bsontype.Regex:
+		regex := bytes.IndexByte(src, 0x00)
+		if regex < 0 {
+			ok = false
+			break
+		}
+		pattern := bytes.IndexByte(src[regex+1:], 0x00)
+		if pattern < 0 {
+			ok = false
+			break
+		}
+		length = int32(int64(regex) + 1 + int64(pattern) + 1)
+	default:
+		ok = false
+	}
+
+	return length, ok
+}
+
+func readValue(src []byte, t bsontype.Type) ([]byte, []byte, bool) {
+	length, ok := valueLength(src, t)
+	if !ok || int(length) > len(src) {
+		return nil, src, false
+	}
+
+	return src[:length], src[length:], true
+}
+
+// ReserveLength reserves the space required for length and returns the index where to write the length
+// and the []byte with reserved space.
+func ReserveLength(dst []byte) (int32, []byte) {
+	index := len(dst)
+	return int32(index), append(dst, 0x00, 0x00, 0x00, 0x00)
+}
+
+// UpdateLength updates the length at index with length and returns the []byte.
+func UpdateLength(dst []byte, index, length int32) []byte {
+	dst[index] = byte(length)
+	dst[index+1] = byte(length >> 8)
+	dst[index+2] = byte(length >> 16)
+	dst[index+3] = byte(length >> 24)
+	return dst
+}
+
+func appendLength(dst []byte, l int32) []byte { return appendi32(dst, l) }
+
+func appendi32(dst []byte, i32 int32) []byte {
+	return append(dst, byte(i32), byte(i32>>8), byte(i32>>16), byte(i32>>24))
+}
+
+// ReadLength reads an int32 length from src and returns the length and the remaining bytes. If
+// there aren't enough bytes to read a valid length, src is returned unomdified and the returned
+// bool will be false.
+func ReadLength(src []byte) (int32, []byte, bool) { return readi32(src) }
+
+func readi32(src []byte) (int32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+
+	return (int32(src[0]) | int32(src[1])<<8 | int32(src[2])<<16 | int32(src[3])<<24), src[4:], true
+}
+
+func appendi64(dst []byte, i64 int64) []byte {
+	return append(dst,
+		byte(i64), byte(i64>>8), byte(i64>>16), byte(i64>>24),
+		byte(i64>>32), byte(i64>>40), byte(i64>>48), byte(i64>>56),
+	)
+}
+
+func readi64(src []byte) (int64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	i64 := (int64(src[0]) | int64(src[1])<<8 | int64(src[2])<<16 | int64(src[3])<<24 |
+		int64(src[4])<<32 | int64(src[5])<<40 | int64(src[6])<<48 | int64(src[7])<<56)
+	return i64, src[8:], true
+}
+
+func appendu32(dst []byte, u32 uint32) []byte {
+	return append(dst, byte(u32), byte(u32>>8), byte(u32>>16), byte(u32>>24))
+}
+
+func readu32(src []byte) (uint32, []byte, bool) {
+	if len(src) < 4 {
+		return 0, src, false
+	}
+
+	return (uint32(src[0]) | uint32(src[1])<<8 | uint32(src[2])<<16 | uint32(src[3])<<24), src[4:], true
+}
+
+func appendu64(dst []byte, u64 uint64) []byte {
+	return append(dst,
+		byte(u64), byte(u64>>8), byte(u64>>16), byte(u64>>24),
+		byte(u64>>32), byte(u64>>40), byte(u64>>48), byte(u64>>56),
+	)
+}
+
+func readu64(src []byte) (uint64, []byte, bool) {
+	if len(src) < 8 {
+		return 0, src, false
+	}
+	u64 := (uint64(src[0]) | uint64(src[1])<<8 | uint64(src[2])<<16 | uint64(src[3])<<24 |
+		uint64(src[4])<<32 | uint64(src[5])<<40 | uint64(src[6])<<48 | uint64(src[7])<<56)
+	return u64, src[8:], true
+}
+
+// keep in sync with readcstringbytes
+func readcstring(src []byte) (string, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return "", src, false
+	}
+	return string(src[:idx]), src[idx+1:], true
+}
+
+// keep in sync with readcstring
+func readcstringbytes(src []byte) ([]byte, []byte, bool) {
+	idx := bytes.IndexByte(src, 0x00)
+	if idx < 0 {
+		return nil, src, false
+	}
+	return src[:idx], src[idx+1:], true
+}
+
+func appendstring(dst []byte, s string) []byte {
+	l := int32(len(s) + 1)
+	dst = appendLength(dst, l)
+	dst = append(dst, s...)
+	return append(dst, 0x00)
+}
+
+func readstring(src []byte) (string, []byte, bool) {
+	l, rem, ok := ReadLength(src)
+	if !ok {
+		return "", src, false
+	}
+	if len(src[4:]) < int(l) {
+		return "", src, false
+	}
+
+	return string(rem[:l-1]), rem[l:], true
+}
+
+// readLengthBytes attempts to read a length and that number of bytes. This
+// function requires that the length include the four bytes for itself.
+func readLengthBytes(src []byte) ([]byte, []byte, bool) {
+	l, _, ok := ReadLength(src)
+	if !ok {
+		return nil, src, false
+	}
+	if len(src) < int(l) {
+		return nil, src, false
+	}
+	return src[:l], src[l:], true
+}
+
+func appendBinarySubtype2(dst []byte, subtype byte, b []byte) []byte {
+	dst = appendLength(dst, int32(len(b)+4)) // The bytes we'll encode need to be 4 larger for the length bytes
+	dst = append(dst, subtype)
+	dst = appendLength(dst, int32(len(b)))
+	return append(dst, b...)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go
new file mode 100644
index 0000000..e655a51
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/document.go
@@ -0,0 +1,396 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"strconv"
+
+	"github.com/go-stack/stack"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// DocumentValidationError is an error type returned when attempting to validate a document.
+type DocumentValidationError string
+
+func (dve DocumentValidationError) Error() string { return string(dve) }
+
+// NewDocumentLengthError creates and returns an error for when the length of a document exceeds the
+// bytes available.
+func NewDocumentLengthError(length, rem int) error {
+	return DocumentValidationError(
+		fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem),
+	)
+}
+
+// InsufficientBytesError indicates that there were not enough bytes to read the next component.
+type InsufficientBytesError struct {
+	Source    []byte
+	Remaining []byte
+	Stack     stack.CallStack
+}
+
+// NewInsufficientBytesError creates a new InsufficientBytesError with the given Document, remaining
+// bytes, and the current stack.
+func NewInsufficientBytesError(src, rem []byte) InsufficientBytesError {
+	return InsufficientBytesError{Source: src, Remaining: rem, Stack: stack.Trace().TrimRuntime()}
+}
+
+// Error implements the error interface.
+func (ibe InsufficientBytesError) Error() string {
+	return "too few bytes to read next component"
+}
+
+// ErrorStack returns a string representing the stack at the point where the error occurred.
+func (ibe InsufficientBytesError) ErrorStack() string {
+	s := bytes.NewBufferString("too few bytes to read next component: [")
+
+	for i, call := range ibe.Stack {
+		if i != 0 {
+			s.WriteString(", ")
+		}
+
+		// go vet doesn't like %k even though it's part of stack's API, so we move the format
+		// string so it doesn't complain. (We also can't make it a constant, or go vet still
+		// complains.)
+		callFormat := "%k.%n %v"
+
+		s.WriteString(fmt.Sprintf(callFormat, call, call, call))
+	}
+
+	s.WriteRune(']')
+
+	return s.String()
+}
+
+// Equal checks that err2 also is an ErrTooSmall.
+func (ibe InsufficientBytesError) Equal(err2 error) bool {
+	switch err2.(type) {
+	case InsufficientBytesError:
+		return true
+	default:
+		return false
+	}
+}
+
+// InvalidDepthTraversalError is returned when attempting a recursive Lookup when one component of
+// the path is neither an embedded document nor an array.
+type InvalidDepthTraversalError struct {
+	Key  string
+	Type bsontype.Type
+}
+
+func (idte InvalidDepthTraversalError) Error() string {
+	return fmt.Sprintf(
+		"attempt to traverse into %s, but it's type is %s, not %s nor %s",
+		idte.Key, idte.Type, bsontype.EmbeddedDocument, bsontype.Array,
+	)
+}
+
+// ErrMissingNull is returned when a document's last byte is not null.
+const ErrMissingNull DocumentValidationError = "document end is missing null byte"
+
+// ErrNilReader indicates that an operation was attempted on a nil io.Reader.
+var ErrNilReader = errors.New("nil reader")
+
+// ErrInvalidLength indicates that a length in a binary representation of a BSON document is invalid.
+var ErrInvalidLength = errors.New("document length is invalid")
+
+// ErrEmptyKey indicates that no key was provided to a Lookup method.
+var ErrEmptyKey = errors.New("empty key provided")
+
+// ErrElementNotFound indicates that an Element matching a certain condition does not exist.
+var ErrElementNotFound = errors.New("element not found")
+
+// ErrOutOfBounds indicates that an index provided to access something was invalid.
+var ErrOutOfBounds = errors.New("out of bounds")
+
+// Document is a raw bytes representation of a BSON document.
+type Document []byte
+
+// NewDocumentFromReader reads a document from r. This function will only validate the length is
+// correct and that the document ends with a null byte.
+func NewDocumentFromReader(r io.Reader) (Document, error) {
+	if r == nil {
+		return nil, ErrNilReader
+	}
+
+	var lengthBytes [4]byte
+
+	// ReadFull guarantees that we will have read at least len(lengthBytes) if err == nil
+	_, err := io.ReadFull(r, lengthBytes[:])
+	if err != nil {
+		return nil, err
+	}
+
+	length, _, _ := readi32(lengthBytes[:]) // ignore ok since we always have enough bytes to read a length
+	if length < 0 {
+		return nil, ErrInvalidLength
+	}
+	document := make([]byte, length)
+
+	copy(document, lengthBytes[:])
+
+	_, err = io.ReadFull(r, document[4:])
+	if err != nil {
+		return nil, err
+	}
+
+	if document[length-1] != 0x00 {
+		return nil, ErrMissingNull
+	}
+
+	return document, nil
+}
+
+// Lookup searches the document, potentially recursively, for the given key. If there are multiple
+// keys provided, this method will recurse down, as long as the top and intermediate nodes are
+// either documents or arrays. If an error occurs or if the value doesn't exist, an empty Value is
+// returned.
+func (d Document) Lookup(key ...string) Value {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr is the same as Lookup, except it returns an error in addition to an empty Value.
+func (d Document) LookupErr(key ...string) (Value, error) {
+	if len(key) < 1 {
+		return Value{}, ErrEmptyKey
+	}
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return Value{}, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return Value{}, NewInsufficientBytesError(d, rem)
+		}
+		if elem.Key() != key[0] {
+			continue
+		}
+		if len(key) > 1 {
+			tt := bsontype.Type(elem[0])
+			switch tt {
+			case bsontype.EmbeddedDocument:
+				val, err := elem.Value().Document().LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			case bsontype.Array:
+				val, err := elem.Value().Array().LookupErr(key[1:]...)
+				if err != nil {
+					return Value{}, err
+				}
+				return val, nil
+			default:
+				return Value{}, InvalidDepthTraversalError{Key: elem.Key(), Type: tt}
+			}
+		}
+		return elem.ValueErr()
+	}
+	return Value{}, ErrElementNotFound
+}
+
+// Index searches for and retrieves the element at the given index. This method will panic if
+// the document is invalid or if the index is out of bounds.
+func (d Document) Index(index uint) Element {
+	elem, err := d.IndexErr(index)
+	if err != nil {
+		panic(err)
+	}
+	return elem
+}
+
+// IndexErr searches for and retrieves the element at the given index.
+func (d Document) IndexErr(index uint) (Element, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var current uint
+	var elem Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return nil, NewInsufficientBytesError(d, rem)
+		}
+		if current != index {
+			current++
+			continue
+		}
+		return elem, nil
+	}
+	return nil, ErrOutOfBounds
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (d Document) DebugString() string {
+	if len(d) < 5 {
+		return "<malformed>"
+	}
+	var buf bytes.Buffer
+	buf.WriteString("Document")
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+	buf.WriteByte('(')
+	buf.WriteString(strconv.Itoa(int(length)))
+	length -= 4
+	buf.WriteString("){")
+	var elem Element
+	var ok bool
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			buf.WriteString(fmt.Sprintf("<malformed (%d)>", length))
+			break
+		}
+		fmt.Fprintf(&buf, "%s ", elem.DebugString())
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// String outputs an ExtendedJSON version of Document. If the document is not valid, this method
+// returns an empty string.
+func (d Document) String() string {
+	if len(d) < 5 {
+		return ""
+	}
+	var buf bytes.Buffer
+	buf.WriteByte('{')
+
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	first := true
+	for length > 1 {
+		if !first {
+			buf.WriteByte(',')
+		}
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		fmt.Fprintf(&buf, "%s", elem.String())
+		first = false
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+// Elements returns this document as a slice of elements. The returned slice will contain valid
+// elements. If the document is not valid, the elements up to the invalid point will be returned
+// along with an error.
+func (d Document) Elements() ([]Element, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var elems []Element
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return elems, NewInsufficientBytesError(d, rem)
+		}
+		if err := elem.Validate(); err != nil {
+			return elems, err
+		}
+		elems = append(elems, elem)
+	}
+	return elems, nil
+}
+
+// Values returns this document as a slice of values. The returned slice will contain valid values.
+// If the document is not valid, the values up to the invalid point will be returned along with an
+// error.
+func (d Document) Values() ([]Value, error) {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return nil, NewInsufficientBytesError(d, rem)
+	}
+
+	length -= 4
+
+	var elem Element
+	var vals []Value
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return vals, NewInsufficientBytesError(d, rem)
+		}
+		if err := elem.Value().Validate(); err != nil {
+			return vals, err
+		}
+		vals = append(vals, elem.Value())
+	}
+	return vals, nil
+}
+
+// Validate validates the document and ensures the elements contained within are valid.
+func (d Document) Validate() error {
+	length, rem, ok := ReadLength(d)
+	if !ok {
+		return NewInsufficientBytesError(d, rem)
+	}
+	if int(length) > len(d) {
+		return d.lengtherror(int(length), len(d))
+	}
+	if d[length-1] != 0x00 {
+		return ErrMissingNull
+	}
+
+	length -= 4
+	var elem Element
+
+	for length > 1 {
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return NewInsufficientBytesError(d, rem)
+		}
+		err := elem.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	if len(rem) < 1 || rem[0] != 0x00 {
+		return ErrMissingNull
+	}
+	return nil
+}
+
+func (Document) lengtherror(length, rem int) error {
+	return DocumentValidationError(fmt.Sprintf("document length exceeds available bytes. length=%d remainingBytes=%d", length, rem))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go
new file mode 100644
index 0000000..ccf9075
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/element.go
@@ -0,0 +1,152 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+// MalformedElementError represents a class of errors that RawElement methods return.
+type MalformedElementError string
+
+func (mee MalformedElementError) Error() string { return string(mee) }
+
+// ErrElementMissingKey is returned when a RawElement is missing a key.
+const ErrElementMissingKey MalformedElementError = "element is missing key"
+
+// ErrElementMissingType is returned when a RawElement is missing a type.
+const ErrElementMissingType MalformedElementError = "element is missing type"
+
+// Element is a raw bytes representation of a BSON element.
+type Element []byte
+
+// Key returns the key for this element. If the element is not valid, this method returns an empty
+// string. If knowing if the element is valid is important, use KeyErr.
+func (e Element) Key() string {
+	key, _ := e.KeyErr()
+	return key
+}
+
+// KeyBytes returns the key for this element as a []byte. If the element is not valid, this method
+// returns an empty string. If knowing if the element is valid is important, use KeyErr. This method
+// will not include the null byte at the end of the key in the slice of bytes.
+func (e Element) KeyBytes() []byte {
+	key, _ := e.KeyBytesErr()
+	return key
+}
+
+// KeyErr returns the key for this element, returning an error if the element is not valid.
+func (e Element) KeyErr() (string, error) {
+	key, err := e.KeyBytesErr()
+	return string(key), err
+}
+
+// KeyBytesErr returns the key for this element as a []byte, returning an error if the element is
+// not valid.
+func (e Element) KeyBytesErr() ([]byte, error) {
+	if len(e) <= 0 {
+		return nil, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return nil, ErrElementMissingKey
+	}
+	return e[1 : idx+1], nil
+}
+
+// Validate ensures the element is a valid BSON element.
+func (e Element) Validate() error {
+	if len(e) < 1 {
+		return ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ErrElementMissingKey
+	}
+	return Value{Type: bsontype.Type(e[0]), Data: e[idx+2:]}.Validate()
+}
+
+// CompareKey will compare this element's key to key. This method makes it easy to compare keys
+// without needing to allocate a string. The key may be null terminated. If a valid key cannot be
+// read this method will return false.
+func (e Element) CompareKey(key []byte) bool {
+	if len(e) < 2 {
+		return false
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return false
+	}
+	if index := bytes.IndexByte(key, 0x00); index > -1 {
+		key = key[:index]
+	}
+	return bytes.Equal(e[1:idx+1], key)
+}
+
+// Value returns the value of this element. If the element is not valid, this method returns an
+// empty Value. If knowing if the element is valid is important, use ValueErr.
+func (e Element) Value() Value {
+	val, _ := e.ValueErr()
+	return val
+}
+
+// ValueErr returns the value for this element, returning an error if the element is not valid.
+func (e Element) ValueErr() (Value, error) {
+	if len(e) <= 0 {
+		return Value{}, ErrElementMissingType
+	}
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return Value{}, ErrElementMissingKey
+	}
+
+	val, rem, exists := ReadValue(e[idx+2:], bsontype.Type(e[0]))
+	if !exists {
+		return Value{}, NewInsufficientBytesError(e, rem)
+	}
+	return val, nil
+}
+
+// String implements the fmt.String interface. The output will be in extended JSON format.
+func (e Element) String() string {
+	if len(e) <= 0 {
+		return ""
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return ""
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return ""
+	}
+	return fmt.Sprintf(`"%s": %v`, key, val)
+}
+
+// DebugString outputs a human readable version of RawElement. It will attempt to stringify the
+// valid components of the element even if the entire element is not valid.
+func (e Element) DebugString() string {
+	if len(e) <= 0 {
+		return "<malformed>"
+	}
+	t := bsontype.Type(e[0])
+	idx := bytes.IndexByte(e[1:], 0x00)
+	if idx == -1 {
+		return fmt.Sprintf(`bson.Element{[%s]<malformed>}`, t)
+	}
+	key, valBytes := []byte(e[1:idx+1]), []byte(e[idx+2:])
+	val, _, valid := ReadValue(valBytes, t)
+	if !valid {
+		return fmt.Sprintf(`bson.Element{[%s]"%s": <malformed>}`, t, key)
+	}
+	return fmt.Sprintf(`bson.Element{[%s]"%s": %v}`, t, key, val)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go
new file mode 100644
index 0000000..9fd903f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/tables.go
@@ -0,0 +1,223 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+//
+// Based on github.com/golang/go by The Go Authors
+// See THIRD-PARTY-NOTICES for original license terms.
+
+package bsoncore
+
+import "unicode/utf8"
+
+// safeSet holds the value true if the ASCII character with the given array
+// position can be represented inside a JSON string without any further
+// escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), and the backslash character ("\").
+var safeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      true,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      true,
+	'=':      true,
+	'>':      true,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
+
+// htmlSafeSet holds the value true if the ASCII character with the given
+// array position can be safely represented inside a JSON string, embedded
+// inside of HTML <script> tags, without any additional escaping.
+//
+// All values are true except for the ASCII control characters (0-31), the
+// double quote ("), the backslash character ("\"), HTML opening and closing
+// tags ("<" and ">"), and the ampersand ("&").
+var htmlSafeSet = [utf8.RuneSelf]bool{
+	' ':      true,
+	'!':      true,
+	'"':      false,
+	'#':      true,
+	'$':      true,
+	'%':      true,
+	'&':      false,
+	'\'':     true,
+	'(':      true,
+	')':      true,
+	'*':      true,
+	'+':      true,
+	',':      true,
+	'-':      true,
+	'.':      true,
+	'/':      true,
+	'0':      true,
+	'1':      true,
+	'2':      true,
+	'3':      true,
+	'4':      true,
+	'5':      true,
+	'6':      true,
+	'7':      true,
+	'8':      true,
+	'9':      true,
+	':':      true,
+	';':      true,
+	'<':      false,
+	'=':      true,
+	'>':      false,
+	'?':      true,
+	'@':      true,
+	'A':      true,
+	'B':      true,
+	'C':      true,
+	'D':      true,
+	'E':      true,
+	'F':      true,
+	'G':      true,
+	'H':      true,
+	'I':      true,
+	'J':      true,
+	'K':      true,
+	'L':      true,
+	'M':      true,
+	'N':      true,
+	'O':      true,
+	'P':      true,
+	'Q':      true,
+	'R':      true,
+	'S':      true,
+	'T':      true,
+	'U':      true,
+	'V':      true,
+	'W':      true,
+	'X':      true,
+	'Y':      true,
+	'Z':      true,
+	'[':      true,
+	'\\':     false,
+	']':      true,
+	'^':      true,
+	'_':      true,
+	'`':      true,
+	'a':      true,
+	'b':      true,
+	'c':      true,
+	'd':      true,
+	'e':      true,
+	'f':      true,
+	'g':      true,
+	'h':      true,
+	'i':      true,
+	'j':      true,
+	'k':      true,
+	'l':      true,
+	'm':      true,
+	'n':      true,
+	'o':      true,
+	'p':      true,
+	'q':      true,
+	'r':      true,
+	's':      true,
+	't':      true,
+	'u':      true,
+	'v':      true,
+	'w':      true,
+	'x':      true,
+	'y':      true,
+	'z':      true,
+	'{':      true,
+	'|':      true,
+	'}':      true,
+	'~':      true,
+	'\u007f': true,
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go
new file mode 100644
index 0000000..f0593d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore/value.go
@@ -0,0 +1,901 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsoncore
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value.
+type ElementTypeError struct {
+	Method string
+	Type   bsontype.Type
+}
+
+// Error implements the error interface.
+func (ete ElementTypeError) Error() string {
+	return "Call of " + ete.Method + " on " + ete.Type.String() + " type"
+}
+
+// Value represents a BSON value with a type and raw bytes.
+type Value struct {
+	Type bsontype.Type
+	Data []byte
+}
+
+// Validate ensures the value is a valid BSON value.
+func (v Value) Validate() error {
+	_, _, valid := readValue(v.Data, v.Type)
+	if !valid {
+		return NewInsufficientBytesError(v.Data, v.Data)
+	}
+	return nil
+}
+
+// IsNumber returns true if the type of v is a numeric BSON type.
+func (v Value) IsNumber() bool {
+	switch v.Type {
+	case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128:
+		return true
+	default:
+		return false
+	}
+}
+
+// AsInt32 returns a BSON number as an int32. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt32() int32 { return 0 }
+
+// AsInt32OK functions the same as AsInt32 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt32OK() (int32, bool) { return 0, false }
+
+// AsInt64 returns a BSON number as an int64. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt64() int64 { return 0 }
+
+// AsInt64OK functions the same as AsInt64 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsInt64OK() (int64, bool) { return 0, false }
+
+// AsFloat64 returns a BSON number as an float64. If the BSON type is not a numeric one, this method
+// will panic.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsFloat64() float64 { return 0 }
+
+// AsFloat64OK functions the same as AsFloat64 but returns a boolean instead of panicking. False
+// indicates an error.
+//
+// TODO(skriptble): Add support for Decimal128.
+func (v Value) AsFloat64OK() (float64, bool) { return 0, false }
+
+// Add will add this value to another. This is currently only implemented for strings and numbers.
+// If either value is a string, the other type is coerced into a string and added to the other.
+//
+// This method will alter v and will attempt to reuse the []byte of v. If the []byte is too small,
+// it will be expanded.
+func (v *Value) Add(v2 Value) error { return nil }
+
+// Equal compaes v to v2 and returns true if they are equal.
+func (v Value) Equal(v2 Value) bool {
+	if v.Type != v2.Type {
+		return false
+	}
+
+	return bytes.Equal(v.Data, v2.Data)
+}
+
+// String implements the fmt.String interface. This method will return values in extended JSON
+// format. If the value is not valid, this returns an empty string
+func (v Value) String() string {
+	switch v.Type {
+	case bsontype.Double:
+		f64, ok := v.DoubleOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDouble":"%s"}`, formatDouble(f64))
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return ""
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return ""
+		}
+		return doc.String()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return ""
+		}
+		return docAsArray(arr, false)
+	case bsontype.Binary:
+		subtype, data, ok := v.BinaryOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$binary":{"base64":"%s","subType":"%02x"}}`, base64.StdEncoding.EncodeToString(data), subtype)
+	case bsontype.Undefined:
+		return `{"$undefined":true}`
+	case bsontype.ObjectID:
+		oid, ok := v.ObjectIDOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$oid":%s}`, oid.Hex())
+	case bsontype.Boolean:
+		b, ok := v.BooleanOK()
+		if !ok {
+			return ""
+		}
+		return strconv.FormatBool(b)
+	case bsontype.DateTime:
+		dt, ok := v.DateTimeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$date":{"$numberLong":"%d"}}`, dt)
+	case bsontype.Null:
+		return "null"
+	case bsontype.Regex:
+		pattern, options, ok := v.RegexOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(
+			`{"$regularExpression":{"pattern":%s,"options":"%s"}}`,
+			escapeString(pattern), sortStringAlphebeticAscending(options),
+		)
+	case bsontype.DBPointer:
+		ns, pointer, ok := v.DBPointerOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$dbPointer":{"$ref":%s,"$id":{"$oid":"%s"}}}`, escapeString(ns), pointer.Hex())
+	case bsontype.JavaScript:
+		js, ok := v.JavaScriptOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s}`, escapeString(js))
+	case bsontype.Symbol:
+		symbol, ok := v.SymbolOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$symbol":%s}`, escapeString(symbol))
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope)
+	case bsontype.Int32:
+		i32, ok := v.Int32OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberInt":"%d"}`, i32)
+	case bsontype.Timestamp:
+		t, i, ok := v.TimestampOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$timestamp":{"t":"%s","i":"%s"}}`, strconv.FormatUint(uint64(t), 10), strconv.FormatUint(uint64(i), 10))
+	case bsontype.Int64:
+		i64, ok := v.Int64OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberLong":"%d"}`, i64)
+	case bsontype.Decimal128:
+		d128, ok := v.Decimal128OK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$numberDecimal":"%s"}`, d128.String())
+	case bsontype.MinKey:
+		return `{"$minKey":1}`
+	case bsontype.MaxKey:
+		return `{"$maxKey":1}`
+	default:
+		return ""
+	}
+}
+
+// DebugString outputs a human readable version of Document. It will attempt to stringify the
+// valid components of the document even if the entire document is not valid.
+func (v Value) DebugString() string {
+	switch v.Type {
+	case bsontype.String:
+		str, ok := v.StringValueOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return escapeString(str)
+	case bsontype.EmbeddedDocument:
+		doc, ok := v.DocumentOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return doc.DebugString()
+	case bsontype.Array:
+		arr, ok := v.ArrayOK()
+		if !ok {
+			return "<malformed>"
+		}
+		return docAsArray(arr, true)
+	case bsontype.CodeWithScope:
+		code, scope, ok := v.CodeWithScopeOK()
+		if !ok {
+			return ""
+		}
+		return fmt.Sprintf(`{"$code":%s,"$scope":%s}`, code, scope.DebugString())
+	default:
+		str := v.String()
+		if str == "" {
+			return "<malformed>"
+		}
+		return str
+	}
+}
+
+// Double returns the float64 value for this element.
+// It panics if e's BSON type is not bsontype.Double.
+func (v Value) Double() float64 {
+	if v.Type != bsontype.Double {
+		panic(ElementTypeError{"bsoncore.Value.Double", v.Type})
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return f64
+}
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (v Value) DoubleOK() (float64, bool) {
+	if v.Type != bsontype.Double {
+		return 0, false
+	}
+	f64, _, ok := ReadDouble(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return f64, true
+}
+
+// StringValue returns the string balue for this element.
+// It panics if e's BSON type is not bsontype.String.
+//
+// NOTE: This method is called StringValue to avoid a collision with the String method which
+// implements the fmt.Stringer interface.
+func (v Value) StringValue() string {
+	if v.Type != bsontype.String {
+		panic(ElementTypeError{"bsoncore.Value.StringValue", v.Type})
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return str
+}
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (v Value) StringValueOK() (string, bool) {
+	if v.Type != bsontype.String {
+		return "", false
+	}
+	str, _, ok := ReadString(v.Data)
+	if !ok {
+		return "", false
+	}
+	return str, true
+}
+
+// Document returns the BSON document the Value represents as a Document. It panics if the
+// value is a BSON type other than document.
+func (v Value) Document() Document {
+	if v.Type != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bsoncore.Value.Document", v.Type})
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return doc
+}
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Value) DocumentOK() (Document, bool) {
+	if v.Type != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	doc, _, ok := ReadDocument(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return doc, true
+}
+
+// Array returns the BSON array the Value represents as an Array. It panics if the
+// value is a BSON type other than array.
+func (v Value) Array() Document {
+	if v.Type != bsontype.Array {
+		panic(ElementTypeError{"bsoncore.Value.Array", v.Type})
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return arr
+}
+
+// ArrayOK is the same as Array, except it returns a boolean instead
+// of panicking.
+func (v Value) ArrayOK() (Document, bool) {
+	if v.Type != bsontype.Array {
+		return nil, false
+	}
+	arr, _, ok := ReadArray(v.Data)
+	if !ok {
+		return nil, false
+	}
+	return arr, true
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Value) Binary() (subtype byte, data []byte) {
+	if v.Type != bsontype.Binary {
+		panic(ElementTypeError{"bsoncore.Value.Binary", v.Type})
+	}
+	subtype, data, _, ok := ReadBinary(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return subtype, data
+}
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (v Value) BinaryOK() (subtype byte, data []byte, ok bool) {
+	if v.Type != bsontype.Binary {
+		return 0x00, nil, false
+	}
+	subtype, data, _, ok = ReadBinary(v.Data)
+	if !ok {
+		return 0x00, nil, false
+	}
+	return subtype, data, true
+}
+
+// ObjectID returns the BSON objectid value the Value represents. It panics if the value is a BSON
+// type other than objectid.
+func (v Value) ObjectID() primitive.ObjectID {
+	if v.Type != bsontype.ObjectID {
+		panic(ElementTypeError{"bsoncore.Value.ObjectID", v.Type})
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return oid
+}
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (v Value) ObjectIDOK() (primitive.ObjectID, bool) {
+	if v.Type != bsontype.ObjectID {
+		return primitive.ObjectID{}, false
+	}
+	oid, _, ok := ReadObjectID(v.Data)
+	if !ok {
+		return primitive.ObjectID{}, false
+	}
+	return oid, true
+}
+
+// Boolean returns the boolean value the Value represents. It panics if the
+// value is a BSON type other than boolean.
+func (v Value) Boolean() bool {
+	if v.Type != bsontype.Boolean {
+		panic(ElementTypeError{"bsoncore.Value.Boolean", v.Type})
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return b
+}
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (v Value) BooleanOK() (bool, bool) {
+	if v.Type != bsontype.Boolean {
+		return false, false
+	}
+	b, _, ok := ReadBoolean(v.Data)
+	if !ok {
+		return false, false
+	}
+	return b, true
+}
+
+// DateTime returns the BSON datetime value the Value represents as a
+// unix timestamp. It panics if the value is a BSON type other than datetime.
+func (v Value) DateTime() int64 {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.DateTime", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return dt
+}
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (v Value) DateTimeOK() (int64, bool) {
+	if v.Type != bsontype.DateTime {
+		return 0, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return dt, true
+}
+
+// Time returns the BSON datetime value the Value represents. It panics if the value is a BSON
+// type other than datetime.
+func (v Value) Time() time.Time {
+	if v.Type != bsontype.DateTime {
+		panic(ElementTypeError{"bsoncore.Value.Time", v.Type})
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000)
+}
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (v Value) TimeOK() (time.Time, bool) {
+	if v.Type != bsontype.DateTime {
+		return time.Time{}, false
+	}
+	dt, _, ok := ReadDateTime(v.Data)
+	if !ok {
+		return time.Time{}, false
+	}
+	return time.Unix(int64(dt)/1000, int64(dt)%1000*1000000), true
+}
+
+// Regex returns the BSON regex value the Value represents. It panics if the value is a BSON
+// type other than regex.
+func (v Value) Regex() (pattern, options string) {
+	if v.Type != bsontype.Regex {
+		panic(ElementTypeError{"bsoncore.Value.Regex", v.Type})
+	}
+	pattern, options, _, ok := ReadRegex(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return pattern, options
+}
+
+// RegexOK is the same as Regex, except it returns a boolean instead of
+// panicking.
+func (v Value) RegexOK() (pattern, options string, ok bool) {
+	if v.Type != bsontype.Regex {
+		return "", "", false
+	}
+	pattern, options, _, ok = ReadRegex(v.Data)
+	if !ok {
+		return "", "", false
+	}
+	return pattern, options, true
+}
+
+// DBPointer returns the BSON dbpointer value the Value represents. It panics if the value is a BSON
+// type other than DBPointer.
+func (v Value) DBPointer() (string, primitive.ObjectID) {
+	if v.Type != bsontype.DBPointer {
+		panic(ElementTypeError{"bsoncore.Value.DBPointer", v.Type})
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return ns, pointer
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (v Value) DBPointerOK() (string, primitive.ObjectID, bool) {
+	if v.Type != bsontype.DBPointer {
+		return "", primitive.ObjectID{}, false
+	}
+	ns, pointer, _, ok := ReadDBPointer(v.Data)
+	if !ok {
+		return "", primitive.ObjectID{}, false
+	}
+	return ns, pointer, true
+}
+
+// JavaScript returns the BSON JavaScript code value the Value represents. It panics if the value is
+// a BSON type other than JavaScript code.
+func (v Value) JavaScript() string {
+	if v.Type != bsontype.JavaScript {
+		panic(ElementTypeError{"bsoncore.Value.JavaScript", v.Type})
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return js
+}
+
+// JavaScriptOK is the same as Javascript, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) JavaScriptOK() (string, bool) {
+	if v.Type != bsontype.JavaScript {
+		return "", false
+	}
+	js, _, ok := ReadJavaScript(v.Data)
+	if !ok {
+		return "", false
+	}
+	return js, true
+}
+
+// Symbol returns the BSON symbol value the Value represents. It panics if the value is a BSON
+// type other than symbol.
+func (v Value) Symbol() string {
+	if v.Type != bsontype.Symbol {
+		panic(ElementTypeError{"bsoncore.Value.Symbol", v.Type})
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return symbol
+}
+
+// SymbolOK is the same as Symbol, excepti that it returns a boolean
+// instead of panicking.
+func (v Value) SymbolOK() (string, bool) {
+	if v.Type != bsontype.Symbol {
+		return "", false
+	}
+	symbol, _, ok := ReadSymbol(v.Data)
+	if !ok {
+		return "", false
+	}
+	return symbol, true
+}
+
+// CodeWithScope returns the BSON JavaScript code with scope the Value represents.
+// It panics if the value is a BSON type other than JavaScript code with scope.
+func (v Value) CodeWithScope() (string, Document) {
+	if v.Type != bsontype.CodeWithScope {
+		panic(ElementTypeError{"bsoncore.Value.CodeWithScope", v.Type})
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return code, scope
+}
+
+// CodeWithScopeOK is the same as CodeWithScope, except that it returns a boolean instead of
+// panicking.
+func (v Value) CodeWithScopeOK() (string, Document, bool) {
+	if v.Type != bsontype.CodeWithScope {
+		return "", nil, false
+	}
+	code, scope, _, ok := ReadCodeWithScope(v.Data)
+	if !ok {
+		return "", nil, false
+	}
+	return code, scope, true
+}
+
+// Int32 returns the int32 the Value represents. It panics if the value is a BSON type other than
+// int32.
+func (v Value) Int32() int32 {
+	if v.Type != bsontype.Int32 {
+		panic(ElementTypeError{"bsoncore.Value.Int32", v.Type})
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i32
+}
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int32OK() (int32, bool) {
+	if v.Type != bsontype.Int32 {
+		return 0, false
+	}
+	i32, _, ok := ReadInt32(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i32, true
+}
+
+// Timestamp returns the BSON timestamp value the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (v Value) Timestamp() (t, i uint32) {
+	if v.Type != bsontype.Timestamp {
+		panic(ElementTypeError{"bsoncore.Value.Timestamp", v.Type})
+	}
+	t, i, _, ok := ReadTimestamp(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return t, i
+}
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (v Value) TimestampOK() (t, i uint32, ok bool) {
+	if v.Type != bsontype.Timestamp {
+		return 0, 0, false
+	}
+	t, i, _, ok = ReadTimestamp(v.Data)
+	if !ok {
+		return 0, 0, false
+	}
+	return t, i, true
+}
+
+// Int64 returns the int64 the Value represents. It panics if the value is a BSON type other than
+// int64.
+func (v Value) Int64() int64 {
+	if v.Type != bsontype.Int64 {
+		panic(ElementTypeError{"bsoncore.Value.Int64", v.Type})
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return i64
+}
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (v Value) Int64OK() (int64, bool) {
+	if v.Type != bsontype.Int64 {
+		return 0, false
+	}
+	i64, _, ok := ReadInt64(v.Data)
+	if !ok {
+		return 0, false
+	}
+	return i64, true
+}
+
+// Decimal128 returns the decimal the Value represents. It panics if the value is a BSON type other than
+// decimal.
+func (v Value) Decimal128() primitive.Decimal128 {
+	if v.Type != bsontype.Decimal128 {
+		panic(ElementTypeError{"bsoncore.Value.Decimal128", v.Type})
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		panic(NewInsufficientBytesError(v.Data, v.Data))
+	}
+	return d128
+}
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (v Value) Decimal128OK() (primitive.Decimal128, bool) {
+	if v.Type != bsontype.Decimal128 {
+		return primitive.Decimal128{}, false
+	}
+	d128, _, ok := ReadDecimal128(v.Data)
+	if !ok {
+		return primitive.Decimal128{}, false
+	}
+	return d128, true
+}
+
+var hexChars = "0123456789abcdef"
+
+func escapeString(s string) string {
+	escapeHTML := true
+	var buf bytes.Buffer
+	buf.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if htmlSafeSet[b] || (!escapeHTML && safeSet[b]) {
+				i++
+				continue
+			}
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				buf.WriteByte('\\')
+				buf.WriteByte(b)
+			case '\n':
+				buf.WriteByte('\\')
+				buf.WriteByte('n')
+			case '\r':
+				buf.WriteByte('\\')
+				buf.WriteByte('r')
+			case '\t':
+				buf.WriteByte('\\')
+				buf.WriteByte('t')
+			case '\b':
+				buf.WriteByte('\\')
+				buf.WriteByte('b')
+			case '\f':
+				buf.WriteByte('\\')
+				buf.WriteByte('f')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				buf.WriteString(`\u00`)
+				buf.WriteByte(hexChars[b>>4])
+				buf.WriteByte(hexChars[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				buf.WriteString(s[start:i])
+			}
+			buf.WriteString(`\u202`)
+			buf.WriteByte(hexChars[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		buf.WriteString(s[start:])
+	}
+	buf.WriteByte('"')
+	return buf.String()
+}
+
+func formatDouble(f float64) string {
+	var s string
+	if math.IsInf(f, 1) {
+		s = "Infinity"
+	} else if math.IsInf(f, -1) {
+		s = "-Infinity"
+	} else if math.IsNaN(f) {
+		s = "NaN"
+	} else {
+		// Print exactly one decimalType place for integers; otherwise, print as many are necessary to
+		// perfectly represent it.
+		s = strconv.FormatFloat(f, 'G', -1, 64)
+		if !strings.ContainsRune(s, '.') {
+			s += ".0"
+		}
+	}
+
+	return s
+}
+
+type sortableString []rune
+
+func (ss sortableString) Len() int {
+	return len(ss)
+}
+
+func (ss sortableString) Less(i, j int) bool {
+	return ss[i] < ss[j]
+}
+
+func (ss sortableString) Swap(i, j int) {
+	oldI := ss[i]
+	ss[i] = ss[j]
+	ss[j] = oldI
+}
+
+func sortStringAlphebeticAscending(s string) string {
+	ss := sortableString([]rune(s))
+	sort.Sort(ss)
+	return string([]rune(ss))
+}
+
+func docAsArray(d Document, debug bool) string {
+	if len(d) < 5 {
+		return ""
+	}
+	var buf bytes.Buffer
+	buf.WriteByte('[')
+
+	length, rem, _ := ReadLength(d) // We know we have enough bytes to read the length
+
+	length -= 4
+
+	var elem Element
+	var ok bool
+	first := true
+	for length > 1 {
+		if !first {
+			buf.WriteByte(',')
+		}
+		elem, rem, ok = ReadElement(rem)
+		length -= int32(len(elem))
+		if !ok {
+			return ""
+		}
+		if debug {
+			fmt.Fprintf(&buf, "%s ", elem.Value().DebugString())
+		} else {
+			fmt.Fprintf(&buf, "%s", elem.Value())
+		}
+		first = false
+	}
+	buf.WriteByte(']')
+
+	return buf.String()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go
new file mode 100644
index 0000000..28374ba
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/constructor.go
@@ -0,0 +1,166 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"encoding/binary"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+)
+
+// IDoc is the interface implemented by Doc and MDoc. It allows either of these types to be provided
+// to the Document function to create a Value.
+type IDoc interface {
+	idoc()
+}
+
+// Double constructs a BSON double Value.
+func Double(f64 float64) Val {
+	v := Val{t: bsontype.Double}
+	binary.LittleEndian.PutUint64(v.bootstrap[0:8], math.Float64bits(f64))
+	return v
+}
+
+// String constructs a BSON string Value.
+func String(str string) Val { return Val{t: bsontype.String}.writestring(str) }
+
+// Document constructs a Value from the given IDoc. If nil is provided, a BSON Null value will be
+// returned.
+func Document(doc IDoc) Val {
+	var v Val
+	switch tt := doc.(type) {
+	case Doc:
+		if tt == nil {
+			v.t = bsontype.Null
+			break
+		}
+		v.t = bsontype.EmbeddedDocument
+		v.primitive = tt
+	case MDoc:
+		if tt == nil {
+			v.t = bsontype.Null
+			break
+		}
+		v.t = bsontype.EmbeddedDocument
+		v.primitive = tt
+	default:
+		v.t = bsontype.Null
+	}
+	return v
+}
+
+// Array constructs a Value from arr. If arr is nil, a BSON Null value is returned.
+func Array(arr Arr) Val {
+	if arr == nil {
+		return Val{t: bsontype.Null}
+	}
+	return Val{t: bsontype.Array, primitive: arr}
+}
+
+// Binary constructs a BSON binary Value.
+func Binary(subtype byte, data []byte) Val {
+	return Val{t: bsontype.Binary, primitive: primitive.Binary{Subtype: subtype, Data: data}}
+}
+
+// Undefined constructs a BSON binary Value.
+func Undefined() Val { return Val{t: bsontype.Undefined} }
+
+// ObjectID constructs a BSON objectid Value.
+func ObjectID(oid primitive.ObjectID) Val {
+	v := Val{t: bsontype.ObjectID}
+	copy(v.bootstrap[0:12], oid[:])
+	return v
+}
+
+// Boolean constructs a BSON boolean Value.
+func Boolean(b bool) Val {
+	v := Val{t: bsontype.Boolean}
+	if b {
+		v.bootstrap[0] = 0x01
+	}
+	return v
+}
+
+// DateTime constructs a BSON datetime Value.
+func DateTime(dt int64) Val { return Val{t: bsontype.DateTime}.writei64(dt) }
+
+// Time constructs a BSON datetime Value.
+func Time(t time.Time) Val {
+	return Val{t: bsontype.DateTime}.writei64(t.Unix()*1e3 + int64(t.Nanosecond()/1e6))
+}
+
+// Null constructs a BSON binary Value.
+func Null() Val { return Val{t: bsontype.Null} }
+
+// Regex constructs a BSON regex Value.
+func Regex(pattern, options string) Val {
+	regex := primitive.Regex{Pattern: pattern, Options: options}
+	return Val{t: bsontype.Regex, primitive: regex}
+}
+
+// DBPointer constructs a BSON dbpointer Value.
+func DBPointer(ns string, ptr primitive.ObjectID) Val {
+	dbptr := primitive.DBPointer{DB: ns, Pointer: ptr}
+	return Val{t: bsontype.DBPointer, primitive: dbptr}
+}
+
+// JavaScript constructs a BSON javascript Value.
+func JavaScript(js string) Val {
+	return Val{t: bsontype.JavaScript}.writestring(js)
+}
+
+// Symbol constructs a BSON symbol Value.
+func Symbol(symbol string) Val {
+	return Val{t: bsontype.Symbol}.writestring(symbol)
+}
+
+// CodeWithScope constructs a BSON code with scope Value.
+func CodeWithScope(code string, scope IDoc) Val {
+	cws := primitive.CodeWithScope{Code: primitive.JavaScript(code), Scope: scope}
+	return Val{t: bsontype.CodeWithScope, primitive: cws}
+}
+
+// Int32 constructs a BSON int32 Value.
+func Int32(i32 int32) Val {
+	v := Val{t: bsontype.Int32}
+	v.bootstrap[0] = byte(i32)
+	v.bootstrap[1] = byte(i32 >> 8)
+	v.bootstrap[2] = byte(i32 >> 16)
+	v.bootstrap[3] = byte(i32 >> 24)
+	return v
+}
+
+// Timestamp constructs a BSON timestamp Value.
+func Timestamp(t, i uint32) Val {
+	v := Val{t: bsontype.Timestamp}
+	v.bootstrap[0] = byte(i)
+	v.bootstrap[1] = byte(i >> 8)
+	v.bootstrap[2] = byte(i >> 16)
+	v.bootstrap[3] = byte(i >> 24)
+	v.bootstrap[4] = byte(t)
+	v.bootstrap[5] = byte(t >> 8)
+	v.bootstrap[6] = byte(t >> 16)
+	v.bootstrap[7] = byte(t >> 24)
+	return v
+}
+
+// Int64 constructs a BSON int64 Value.
+func Int64(i64 int64) Val { return Val{t: bsontype.Int64}.writei64(i64) }
+
+// Decimal128 constructs a BSON decimal128 Value.
+func Decimal128(d128 primitive.Decimal128) Val {
+	return Val{t: bsontype.Decimal128, primitive: d128}
+}
+
+// MinKey constructs a BSON minkey Value.
+func MinKey() Val { return Val{t: bsontype.MinKey} }
+
+// MaxKey constructs a BSON maxkey Value.
+func MaxKey() Val { return Val{t: bsontype.MaxKey} }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go
new file mode 100644
index 0000000..f2209a0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/document.go
@@ -0,0 +1,305 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ErrNilDocument indicates that an operation was attempted on a nil *bson.Document.
+var ErrNilDocument = errors.New("document is nil")
+
+// KeyNotFound is an error type returned from the Lookup methods on Document. This type contains
+// information about which key was not found and if it was actually not found or if a component of
+// the key except the last was not a document nor array.
+type KeyNotFound struct {
+	Key   []string      // The keys that were searched for.
+	Depth uint          // Which key either was not found or was an incorrect type.
+	Type  bsontype.Type // The type of the key that was found but was an incorrect type.
+}
+
+func (knf KeyNotFound) Error() string {
+	depth := knf.Depth
+	if depth >= uint(len(knf.Key)) {
+		depth = uint(len(knf.Key)) - 1
+	}
+
+	if len(knf.Key) == 0 {
+		return "no keys were provided for lookup"
+	}
+
+	if knf.Type != bsontype.Type(0) {
+		return fmt.Sprintf(`key "%s" was found but was not valid to traverse BSON type %s`, knf.Key[depth], knf.Type)
+	}
+
+	return fmt.Sprintf(`key "%s" was not found`, knf.Key[depth])
+}
+
+// Doc is a type safe, concise BSON document representation.
+type Doc []Elem
+
+// ReadDoc will create a Document using the provided slice of bytes. If the
+// slice of bytes is not a valid BSON document, this method will return an error.
+func ReadDoc(b []byte) (Doc, error) {
+	doc := make(Doc, 0)
+	err := doc.UnmarshalBSON(b)
+	if err != nil {
+		return nil, err
+	}
+	return doc, nil
+}
+
+// Copy makes a shallow copy of this document.
+func (d Doc) Copy() Doc {
+	d2 := make(Doc, len(d))
+	copy(d2, d)
+	return d2
+}
+
+// Append adds an element to the end of the document, creating it from the key and value provided.
+func (d Doc) Append(key string, val Val) Doc {
+	return append(d, Elem{Key: key, Value: val})
+}
+
+// Prepend adds an element to the beginning of the document, creating it from the key and value provided.
+func (d Doc) Prepend(key string, val Val) Doc {
+	// TODO: should we just modify d itself instead of doing an alloc here?
+	return append(Doc{{Key: key, Value: val}}, d...)
+}
+
+// Set replaces an element of a document. If an element with a matching key is
+// found, the element will be replaced with the one provided. If the document
+// does not have an element with that key, the element is appended to the
+// document instead.
+func (d Doc) Set(key string, val Val) Doc {
+	idx := d.IndexOf(key)
+	if idx == -1 {
+		return append(d, Elem{Key: key, Value: val})
+	}
+	d[idx] = Elem{Key: key, Value: val}
+	return d
+}
+
+// IndexOf returns the index of the first element with a key of key, or -1 if no element with a key
+// was found.
+func (d Doc) IndexOf(key string) int {
+	for i, e := range d {
+		if e.Key == key {
+			return i
+		}
+	}
+	return -1
+}
+
+// Delete removes the element with key if it exists and returns the updated Doc.
+func (d Doc) Delete(key string) Doc {
+	idx := d.IndexOf(key)
+	if idx == -1 {
+		return d
+	}
+	return append(d[:idx], d[idx+1:]...)
+}
+
+// Lookup searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Value if they key does not exist. To know if they key actually
+// exists, use LookupErr.
+func (d Doc) Lookup(key ...string) Val {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d Doc) LookupErr(key ...string) (Val, error) {
+	elem, err := d.LookupElementErr(key...)
+	return elem.Value, err
+}
+
+// LookupElement searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Element if they key does not exist. To know if they key actually
+// exists, use LookupElementErr.
+func (d Doc) LookupElement(key ...string) Elem {
+	elem, _ := d.LookupElementErr(key...)
+	return elem
+}
+
+// LookupElementErr searches the document and potentially subdocuments for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d Doc) LookupElementErr(key ...string) (Elem, error) {
+	// KeyNotFound operates by being created where the error happens and then the depth is
+	// incremented by 1 as each function unwinds. Whenever this function returns, it also assigns
+	// the Key slice to the key slice it has. This ensures that the proper depth is identified and
+	// the proper keys.
+	if len(key) == 0 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	var elem Elem
+	var err error
+	idx := d.IndexOf(key[0])
+	if idx == -1 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	elem = d[idx]
+	if len(key) == 1 {
+		return elem, nil
+	}
+
+	switch elem.Value.Type() {
+	case bsontype.EmbeddedDocument:
+		switch tt := elem.Value.primitive.(type) {
+		case Doc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		case MDoc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		}
+	default:
+		return Elem{}, KeyNotFound{Type: elem.Value.Type()}
+	}
+	switch tt := err.(type) {
+	case KeyNotFound:
+		tt.Depth++
+		tt.Key = key
+		return Elem{}, tt
+	case nil:
+		return elem, nil
+	default:
+		return Elem{}, err // We can't actually hit this.
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+//
+// This method will never return an error.
+func (d Doc) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if d == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+	data, _ := d.MarshalBSON()
+	return bsontype.EmbeddedDocument, data, nil
+}
+
+// MarshalBSON implements the Marshaler interface.
+//
+// This method will never return an error.
+func (d Doc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) }
+
+// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst.
+//
+// This method will never return an error.
+func (d Doc) AppendMarshalBSON(dst []byte) ([]byte, error) {
+	idx, dst := bsoncore.ReserveLength(dst)
+	for _, elem := range d {
+		t, data, _ := elem.Value.MarshalBSONValue() // Value.MarshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, elem.Key...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst, nil
+}
+
+// UnmarshalBSON implements the Unmarshaler interface.
+func (d *Doc) UnmarshalBSON(b []byte) error {
+	if d == nil {
+		return ErrNilDocument
+	}
+
+	if err := bsoncore.Document(b).Validate(); err != nil {
+		return err
+	}
+
+	elems, err := bsoncore.Document(b).Elements()
+	if err != nil {
+		return err
+	}
+	var val Val
+	for _, elem := range elems {
+		rawv := elem.Value()
+		err = val.UnmarshalBSONValue(rawv.Type, rawv.Data)
+		if err != nil {
+			return err
+		}
+		*d = d.Append(elem.Key(), val)
+	}
+	return nil
+}
+
+// UnmarshalBSONValue implements the bson.ValueUnmarshaler interface.
+func (d *Doc) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if t != bsontype.EmbeddedDocument {
+		return fmt.Errorf("cannot unmarshal %s into a bsonx.Doc", t)
+	}
+	return d.UnmarshalBSON(data)
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (d Doc) Equal(id IDoc) bool {
+	switch tt := id.(type) {
+	case Doc:
+		d2 := tt
+		if len(d) != len(d2) {
+			return false
+		}
+		for idx := range d {
+			if !d[idx].Equal(d2[idx]) {
+				return false
+			}
+		}
+	case MDoc:
+		unique := make(map[string]struct{}, 0)
+		for _, elem := range d {
+			unique[elem.Key] = struct{}{}
+			val, ok := tt[elem.Key]
+			if !ok {
+				return false
+			}
+			if !val.Equal(elem.Value) {
+				return false
+			}
+		}
+		if len(unique) != len(tt) {
+			return false
+		}
+	case nil:
+		return d == nil
+	default:
+		return false
+	}
+
+	return true
+}
+
+// String implements the fmt.Stringer interface.
+func (d Doc) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Document{"))
+	for idx, elem := range d {
+		if idx > 0 {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%v", elem)
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+func (Doc) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go
new file mode 100644
index 0000000..b45dbea
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/element.go
@@ -0,0 +1,53 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+const validateMaxDepthDefault = 2048
+
+// ElementTypeError specifies that a method to obtain a BSON value an incorrect type was called on a bson.Value.
+//
+// TODO: rename this ValueTypeError.
+type ElementTypeError struct {
+	Method string
+	Type   bsontype.Type
+}
+
+// Error implements the error interface.
+func (ete ElementTypeError) Error() string {
+	return "Call of " + ete.Method + " on " + ete.Type.String() + " type"
+}
+
+// Elem represents a BSON element.
+//
+// NOTE: Element cannot be the value of a map nor a property of a struct without special handling.
+// The default encoders and decoders will not process Element correctly. To do so would require
+// information loss since an Element contains a key, but the keys used when encoding a struct are
+// the struct field names. Instead of using an Element, use a Value as a value in a map or a
+// property of a struct.
+type Elem struct {
+	Key   string
+	Value Val
+}
+
+// Equal compares e and e2 and returns true if they are equal.
+func (e Elem) Equal(e2 Elem) bool {
+	if e.Key != e2.Key {
+		return false
+	}
+	return e.Value.Equal(e2.Value)
+}
+
+func (e Elem) String() string {
+	// TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here.
+	return fmt.Sprintf(`bson.Element{"%s": %v}`, e.Key, e.Value)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go
new file mode 100644
index 0000000..e483556
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/mdocument.go
@@ -0,0 +1,231 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// MDoc is an unordered, type safe, concise BSON document representation. This type should not be
+// used if you require ordering of values or duplicate keys.
+type MDoc map[string]Val
+
+// ReadMDoc will create a Doc using the provided slice of bytes. If the
+// slice of bytes is not a valid BSON document, this method will return an error.
+func ReadMDoc(b []byte) (MDoc, error) {
+	doc := make(MDoc, 0)
+	err := doc.UnmarshalBSON(b)
+	if err != nil {
+		return nil, err
+	}
+	return doc, nil
+}
+
+// Copy makes a shallow copy of this document.
+func (d MDoc) Copy() MDoc {
+	d2 := make(MDoc, len(d))
+	for k, v := range d {
+		d2[k] = v
+	}
+	return d2
+}
+
+// Lookup searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Value if they key does not exist. To know if they key actually
+// exists, use LookupErr.
+func (d MDoc) Lookup(key ...string) Val {
+	val, _ := d.LookupErr(key...)
+	return val
+}
+
+// LookupErr searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d MDoc) LookupErr(key ...string) (Val, error) {
+	elem, err := d.LookupElementErr(key...)
+	return elem.Value, err
+}
+
+// LookupElement searches the document and potentially subdocuments or arrays for the
+// provided key. Each key provided to this method represents a layer of depth.
+//
+// This method will return an empty Element if they key does not exist. To know if they key actually
+// exists, use LookupElementErr.
+func (d MDoc) LookupElement(key ...string) Elem {
+	elem, _ := d.LookupElementErr(key...)
+	return elem
+}
+
+// LookupElementErr searches the document and potentially subdocuments for the
+// provided key. Each key provided to this method represents a layer of depth.
+func (d MDoc) LookupElementErr(key ...string) (Elem, error) {
+	// KeyNotFound operates by being created where the error happens and then the depth is
+	// incremented by 1 as each function unwinds. Whenever this function returns, it also assigns
+	// the Key slice to the key slice it has. This ensures that the proper depth is identified and
+	// the proper keys.
+	if len(key) == 0 {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	var elem Elem
+	var err error
+	val, ok := d[key[0]]
+	if !ok {
+		return Elem{}, KeyNotFound{Key: key}
+	}
+
+	if len(key) == 1 {
+		return Elem{Key: key[0], Value: val}, nil
+	}
+
+	switch val.Type() {
+	case bsontype.EmbeddedDocument:
+		switch tt := val.primitive.(type) {
+		case Doc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		case MDoc:
+			elem, err = tt.LookupElementErr(key[1:]...)
+		}
+	default:
+		return Elem{}, KeyNotFound{Type: val.Type()}
+	}
+	switch tt := err.(type) {
+	case KeyNotFound:
+		tt.Depth++
+		tt.Key = key
+		return Elem{}, tt
+	case nil:
+		return elem, nil
+	default:
+		return Elem{}, err // We can't actually hit this.
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+//
+// This method will never return an error.
+func (d MDoc) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	if d == nil {
+		// TODO: Should we do this?
+		return bsontype.Null, nil, nil
+	}
+	data, _ := d.MarshalBSON()
+	return bsontype.EmbeddedDocument, data, nil
+}
+
+// MarshalBSON implements the Marshaler interface.
+//
+// This method will never return an error.
+func (d MDoc) MarshalBSON() ([]byte, error) { return d.AppendMarshalBSON(nil) }
+
+// AppendMarshalBSON marshals Doc to BSON bytes, appending to dst.
+//
+// This method will never return an error.
+func (d MDoc) AppendMarshalBSON(dst []byte) ([]byte, error) {
+	idx, dst := bsoncore.ReserveLength(dst)
+	for k, v := range d {
+		t, data, _ := v.MarshalBSONValue() // Value.MarshalBSONValue never returns an error.
+		dst = append(dst, byte(t))
+		dst = append(dst, k...)
+		dst = append(dst, 0x00)
+		dst = append(dst, data...)
+	}
+	dst = append(dst, 0x00)
+	dst = bsoncore.UpdateLength(dst, idx, int32(len(dst[idx:])))
+	return dst, nil
+}
+
+// UnmarshalBSON implements the Unmarshaler interface.
+func (d *MDoc) UnmarshalBSON(b []byte) error {
+	if d == nil {
+		return ErrNilDocument
+	}
+
+	if err := bsoncore.Document(b).Validate(); err != nil {
+		return err
+	}
+
+	elems, err := bsoncore.Document(b).Elements()
+	if err != nil {
+		return err
+	}
+	var val Val
+	for _, elem := range elems {
+		rawv := elem.Value()
+		err = val.UnmarshalBSONValue(rawv.Type, rawv.Data)
+		if err != nil {
+			return err
+		}
+		(*d)[elem.Key()] = val
+	}
+	return nil
+}
+
+// Equal compares this document to another, returning true if they are equal.
+func (d MDoc) Equal(id IDoc) bool {
+	switch tt := id.(type) {
+	case MDoc:
+		d2 := tt
+		if len(d) != len(d2) {
+			return false
+		}
+		for key, value := range d {
+			value2, ok := d2[key]
+			if !ok {
+				return false
+			}
+			if !value.Equal(value2) {
+				return false
+			}
+		}
+	case Doc:
+		unique := make(map[string]struct{}, 0)
+		for _, elem := range tt {
+			unique[elem.Key] = struct{}{}
+			val, ok := d[elem.Key]
+			if !ok {
+				return false
+			}
+			if !val.Equal(elem.Value) {
+				return false
+			}
+		}
+		if len(unique) != len(d) {
+			return false
+		}
+	case nil:
+		return d == nil
+	default:
+		return false
+	}
+
+	return true
+}
+
+// String implements the fmt.Stringer interface.
+func (d MDoc) String() string {
+	var buf bytes.Buffer
+	buf.Write([]byte("bson.Document{"))
+	first := true
+	for key, value := range d {
+		if !first {
+			buf.Write([]byte(", "))
+		}
+		fmt.Fprintf(&buf, "%v", Elem{Key: key, Value: value})
+		first = false
+	}
+	buf.WriteByte('}')
+
+	return buf.String()
+}
+
+func (MDoc) idoc() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go
new file mode 100644
index 0000000..b34f07d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/primitive_codecs.go
@@ -0,0 +1,638 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsonrw"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+)
+
+var primitiveCodecs PrimitiveCodecs
+
+var tDocument = reflect.TypeOf((Doc)(nil))
+var tMDoc = reflect.TypeOf((MDoc)(nil))
+var tArray = reflect.TypeOf((Arr)(nil))
+var tValue = reflect.TypeOf(Val{})
+var tElementSlice = reflect.TypeOf(([]Elem)(nil))
+
+// PrimitiveCodecs is a namespace for all of the default bsoncodec.Codecs for the primitive types
+// defined in this package.
+type PrimitiveCodecs struct{}
+
+// RegisterPrimitiveCodecs will register the encode and decode methods attached to PrimitiveCodecs
+// with the provided RegistryBuilder. if rb is nil, a new empty RegistryBuilder will be created.
+func (pc PrimitiveCodecs) RegisterPrimitiveCodecs(rb *bsoncodec.RegistryBuilder) {
+	if rb == nil {
+		panic(errors.New("argument to RegisterPrimitiveCodecs must not be nil"))
+	}
+
+	rb.
+		RegisterEncoder(tDocument, bsoncodec.ValueEncoderFunc(pc.DocumentEncodeValue)).
+		RegisterEncoder(tArray, bsoncodec.ValueEncoderFunc(pc.ArrayEncodeValue)).
+		RegisterEncoder(tValue, bsoncodec.ValueEncoderFunc(pc.ValueEncodeValue)).
+		RegisterEncoder(tElementSlice, bsoncodec.ValueEncoderFunc(pc.ElementSliceEncodeValue)).
+		RegisterDecoder(tDocument, bsoncodec.ValueDecoderFunc(pc.DocumentDecodeValue)).
+		RegisterDecoder(tArray, bsoncodec.ValueDecoderFunc(pc.ArrayDecodeValue)).
+		RegisterDecoder(tValue, bsoncodec.ValueDecoderFunc(pc.ValueDecodeValue)).
+		RegisterDecoder(tElementSlice, bsoncodec.ValueDecoderFunc(pc.ElementSliceDecodeValue))
+}
+
+// DocumentEncodeValue is the ValueEncoderFunc for *Document.
+func (pc PrimitiveCodecs) DocumentEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tDocument {
+		return bsoncodec.ValueEncoderError{Name: "DocumentEncodeValue", Types: []reflect.Type{tDocument}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	doc := val.Interface().(Doc)
+
+	dw, err := vw.WriteDocument()
+	if err != nil {
+		return err
+	}
+
+	return pc.encodeDocument(ec, dw, doc)
+}
+
+// DocumentDecodeValue is the ValueDecoderFunc for *Document.
+func (pc PrimitiveCodecs) DocumentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tDocument {
+		return bsoncodec.ValueDecoderError{Name: "DocumentDecodeValue", Types: []reflect.Type{tDocument}, Received: val}
+	}
+
+	return pc.documentDecodeValue(dctx, vr, val.Addr().Interface().(*Doc))
+}
+
+func (pc PrimitiveCodecs) documentDecodeValue(dctx bsoncodec.DecodeContext, vr bsonrw.ValueReader, doc *Doc) error {
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+
+	return pc.decodeDocument(dctx, dr, doc)
+}
+
+// ArrayEncodeValue is the ValueEncoderFunc for *Array.
+func (pc PrimitiveCodecs) ArrayEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tArray {
+		return bsoncodec.ValueEncoderError{Name: "ArrayEncodeValue", Types: []reflect.Type{tArray}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	arr := val.Interface().(Arr)
+
+	aw, err := vw.WriteArray()
+	if err != nil {
+		return err
+	}
+
+	for _, val := range arr {
+		dvw, err := aw.WriteArrayElement()
+		if err != nil {
+			return err
+		}
+
+		err = pc.encodeValue(ec, dvw, val)
+
+		if err != nil {
+			return err
+		}
+	}
+
+	return aw.WriteArrayEnd()
+}
+
+// ArrayDecodeValue is the ValueDecoderFunc for *Array.
+func (pc PrimitiveCodecs) ArrayDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tArray {
+		return bsoncodec.ValueDecoderError{Name: "ArrayDecodeValue", Types: []reflect.Type{tArray}, Received: val}
+	}
+
+	ar, err := vr.ReadArray()
+	if err != nil {
+		return err
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(tArray, 0, 0))
+	}
+	val.SetLen(0)
+
+	for {
+		vr, err := ar.ReadValue()
+		if err == bsonrw.ErrEOA {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Val
+		err = pc.valueDecodeValue(dc, vr, &elem)
+		if err != nil {
+			return err
+		}
+
+		val.Set(reflect.Append(val, reflect.ValueOf(elem)))
+	}
+
+	return nil
+}
+
+// ElementSliceEncodeValue is the ValueEncoderFunc for []*Element.
+func (pc PrimitiveCodecs) ElementSliceEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tElementSlice {
+		return bsoncodec.ValueEncoderError{Name: "ElementSliceEncodeValue", Types: []reflect.Type{tElementSlice}, Received: val}
+	}
+
+	if val.IsNil() {
+		return vw.WriteNull()
+	}
+
+	return pc.DocumentEncodeValue(ec, vw, val.Convert(tDocument))
+}
+
+// ElementSliceDecodeValue is the ValueDecoderFunc for []*Element.
+func (pc PrimitiveCodecs) ElementSliceDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tElementSlice {
+		return bsoncodec.ValueDecoderError{Name: "ElementSliceDecodeValue", Types: []reflect.Type{tElementSlice}, Received: val}
+	}
+
+	if val.IsNil() {
+		val.Set(reflect.MakeSlice(val.Type(), 0, 0))
+	}
+
+	val.SetLen(0)
+
+	dr, err := vr.ReadDocument()
+	if err != nil {
+		return err
+	}
+	elems := make([]reflect.Value, 0)
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Elem
+		err = pc.elementDecodeValue(dc, vr, key, &elem)
+		if err != nil {
+			return err
+		}
+
+		elems = append(elems, reflect.ValueOf(elem))
+	}
+
+	val.Set(reflect.Append(val, elems...))
+	return nil
+}
+
+// ValueEncodeValue is the ValueEncoderFunc for *Value.
+func (pc PrimitiveCodecs) ValueEncodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val reflect.Value) error {
+	if !val.IsValid() || val.Type() != tValue {
+		return bsoncodec.ValueEncoderError{Name: "ValueEncodeValue", Types: []reflect.Type{tValue}, Received: val}
+	}
+
+	v := val.Interface().(Val)
+
+	return pc.encodeValue(ec, vw, v)
+}
+
+// ValueDecodeValue is the ValueDecoderFunc for *Value.
+func (pc PrimitiveCodecs) ValueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val reflect.Value) error {
+	if !val.CanSet() || val.Type() != tValue {
+		return bsoncodec.ValueDecoderError{Name: "ValueDecodeValue", Types: []reflect.Type{tValue}, Received: val}
+	}
+
+	return pc.valueDecodeValue(dc, vr, val.Addr().Interface().(*Val))
+}
+
+// encodeDocument is a separate function that we use because CodeWithScope
+// returns us a DocumentWriter and we need to do the same logic that we would do
+// for a document but cannot use a Codec.
+func (pc PrimitiveCodecs) encodeDocument(ec bsoncodec.EncodeContext, dw bsonrw.DocumentWriter, doc Doc) error {
+	for _, elem := range doc {
+		dvw, err := dw.WriteDocumentElement(elem.Key)
+		if err != nil {
+			return err
+		}
+
+		err = pc.encodeValue(ec, dvw, elem.Value)
+
+		if err != nil {
+			return err
+		}
+	}
+
+	return dw.WriteDocumentEnd()
+}
+
+// DecodeDocument haves decoding into a Doc from a bsonrw.DocumentReader.
+func (pc PrimitiveCodecs) DecodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error {
+	return pc.decodeDocument(dctx, dr, pdoc)
+}
+
+func (pc PrimitiveCodecs) decodeDocument(dctx bsoncodec.DecodeContext, dr bsonrw.DocumentReader, pdoc *Doc) error {
+	if *pdoc == nil {
+		*pdoc = make(Doc, 0)
+	}
+	*pdoc = (*pdoc)[:0]
+	for {
+		key, vr, err := dr.ReadElement()
+		if err == bsonrw.ErrEOD {
+			break
+		}
+		if err != nil {
+			return err
+		}
+
+		var elem Elem
+		err = pc.elementDecodeValue(dctx, vr, key, &elem)
+		if err != nil {
+			return err
+		}
+
+		*pdoc = append(*pdoc, elem)
+	}
+	return nil
+}
+
+func (pc PrimitiveCodecs) elementDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, key string, elem *Elem) error {
+	var val Val
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		val = Double(f64)
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return err
+		}
+		val = String(str)
+	case bsontype.EmbeddedDocument:
+		var embeddedDoc Doc
+		err := pc.documentDecodeValue(dc, vr, &embeddedDoc)
+		if err != nil {
+			return err
+		}
+		val = Document(embeddedDoc)
+	case bsontype.Array:
+		arr := reflect.New(tArray).Elem()
+		err := pc.ArrayDecodeValue(dc, vr, arr)
+		if err != nil {
+			return err
+		}
+		val = Array(arr.Interface().(Arr))
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		val = Binary(subtype, data)
+	case bsontype.Undefined:
+		err := vr.ReadUndefined()
+		if err != nil {
+			return err
+		}
+		val = Undefined()
+	case bsontype.ObjectID:
+		oid, err := vr.ReadObjectID()
+		if err != nil {
+			return err
+		}
+		val = ObjectID(oid)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return err
+		}
+		val = Boolean(b)
+	case bsontype.DateTime:
+		dt, err := vr.ReadDateTime()
+		if err != nil {
+			return err
+		}
+		val = DateTime(dt)
+	case bsontype.Null:
+		err := vr.ReadNull()
+		if err != nil {
+			return err
+		}
+		val = Null()
+	case bsontype.Regex:
+		pattern, options, err := vr.ReadRegex()
+		if err != nil {
+			return err
+		}
+		val = Regex(pattern, options)
+	case bsontype.DBPointer:
+		ns, pointer, err := vr.ReadDBPointer()
+		if err != nil {
+			return err
+		}
+		val = DBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		js, err := vr.ReadJavascript()
+		if err != nil {
+			return err
+		}
+		val = JavaScript(js)
+	case bsontype.Symbol:
+		symbol, err := vr.ReadSymbol()
+		if err != nil {
+			return err
+		}
+		val = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		code, scope, err := vr.ReadCodeWithScope()
+		if err != nil {
+			return err
+		}
+		var doc Doc
+		err = pc.decodeDocument(dc, scope, &doc)
+		if err != nil {
+			return err
+		}
+		val = CodeWithScope(code, doc)
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		val = Int32(i32)
+	case bsontype.Timestamp:
+		t, i, err := vr.ReadTimestamp()
+		if err != nil {
+			return err
+		}
+		val = Timestamp(t, i)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		val = Int64(i64)
+	case bsontype.Decimal128:
+		d128, err := vr.ReadDecimal128()
+		if err != nil {
+			return err
+		}
+		val = Decimal128(d128)
+	case bsontype.MinKey:
+		err := vr.ReadMinKey()
+		if err != nil {
+			return err
+		}
+		val = MinKey()
+	case bsontype.MaxKey:
+		err := vr.ReadMaxKey()
+		if err != nil {
+			return err
+		}
+		val = MaxKey()
+	default:
+		return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type())
+	}
+
+	*elem = Elem{Key: key, Value: val}
+	return nil
+}
+
+// encodeValue does not validation, and the callers must perform validation on val before calling
+// this method.
+func (pc PrimitiveCodecs) encodeValue(ec bsoncodec.EncodeContext, vw bsonrw.ValueWriter, val Val) error {
+	var err error
+	switch val.Type() {
+	case bsontype.Double:
+		err = vw.WriteDouble(val.Double())
+	case bsontype.String:
+		err = vw.WriteString(val.StringValue())
+	case bsontype.EmbeddedDocument:
+		var encoder bsoncodec.ValueEncoder
+		encoder, err = ec.LookupEncoder(tDocument)
+		if err != nil {
+			break
+		}
+		err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Document()))
+	case bsontype.Array:
+		var encoder bsoncodec.ValueEncoder
+		encoder, err = ec.LookupEncoder(tArray)
+		if err != nil {
+			break
+		}
+		err = encoder.EncodeValue(ec, vw, reflect.ValueOf(val.Array()))
+	case bsontype.Binary:
+		// TODO: FIX THIS (╯°□°)╯︵ ┻━┻
+		subtype, data := val.Binary()
+		err = vw.WriteBinaryWithSubtype(data, subtype)
+	case bsontype.Undefined:
+		err = vw.WriteUndefined()
+	case bsontype.ObjectID:
+		err = vw.WriteObjectID(val.ObjectID())
+	case bsontype.Boolean:
+		err = vw.WriteBoolean(val.Boolean())
+	case bsontype.DateTime:
+		err = vw.WriteDateTime(val.DateTime())
+	case bsontype.Null:
+		err = vw.WriteNull()
+	case bsontype.Regex:
+		err = vw.WriteRegex(val.Regex())
+	case bsontype.DBPointer:
+		err = vw.WriteDBPointer(val.DBPointer())
+	case bsontype.JavaScript:
+		err = vw.WriteJavascript(val.JavaScript())
+	case bsontype.Symbol:
+		err = vw.WriteSymbol(val.Symbol())
+	case bsontype.CodeWithScope:
+		code, scope := val.CodeWithScope()
+
+		var cwsw bsonrw.DocumentWriter
+		cwsw, err = vw.WriteCodeWithScope(code)
+		if err != nil {
+			break
+		}
+
+		err = pc.encodeDocument(ec, cwsw, scope)
+	case bsontype.Int32:
+		err = vw.WriteInt32(val.Int32())
+	case bsontype.Timestamp:
+		err = vw.WriteTimestamp(val.Timestamp())
+	case bsontype.Int64:
+		err = vw.WriteInt64(val.Int64())
+	case bsontype.Decimal128:
+		err = vw.WriteDecimal128(val.Decimal128())
+	case bsontype.MinKey:
+		err = vw.WriteMinKey()
+	case bsontype.MaxKey:
+		err = vw.WriteMaxKey()
+	default:
+		err = fmt.Errorf("%T is not a valid BSON type to encode", val.Type())
+	}
+
+	return err
+}
+
+func (pc PrimitiveCodecs) valueDecodeValue(dc bsoncodec.DecodeContext, vr bsonrw.ValueReader, val *Val) error {
+	switch vr.Type() {
+	case bsontype.Double:
+		f64, err := vr.ReadDouble()
+		if err != nil {
+			return err
+		}
+		*val = Double(f64)
+	case bsontype.String:
+		str, err := vr.ReadString()
+		if err != nil {
+			return err
+		}
+		*val = String(str)
+	case bsontype.EmbeddedDocument:
+		var embeddedDoc Doc
+		err := pc.documentDecodeValue(dc, vr, &embeddedDoc)
+		if err != nil {
+			return err
+		}
+		*val = Document(embeddedDoc)
+	case bsontype.Array:
+		arr := reflect.New(tArray).Elem()
+		err := pc.ArrayDecodeValue(dc, vr, arr)
+		if err != nil {
+			return err
+		}
+		*val = Array(arr.Interface().(Arr))
+	case bsontype.Binary:
+		data, subtype, err := vr.ReadBinary()
+		if err != nil {
+			return err
+		}
+		*val = Binary(subtype, data)
+	case bsontype.Undefined:
+		err := vr.ReadUndefined()
+		if err != nil {
+			return err
+		}
+		*val = Undefined()
+	case bsontype.ObjectID:
+		oid, err := vr.ReadObjectID()
+		if err != nil {
+			return err
+		}
+		*val = ObjectID(oid)
+	case bsontype.Boolean:
+		b, err := vr.ReadBoolean()
+		if err != nil {
+			return err
+		}
+		*val = Boolean(b)
+	case bsontype.DateTime:
+		dt, err := vr.ReadDateTime()
+		if err != nil {
+			return err
+		}
+		*val = DateTime(dt)
+	case bsontype.Null:
+		err := vr.ReadNull()
+		if err != nil {
+			return err
+		}
+		*val = Null()
+	case bsontype.Regex:
+		pattern, options, err := vr.ReadRegex()
+		if err != nil {
+			return err
+		}
+		*val = Regex(pattern, options)
+	case bsontype.DBPointer:
+		ns, pointer, err := vr.ReadDBPointer()
+		if err != nil {
+			return err
+		}
+		*val = DBPointer(ns, pointer)
+	case bsontype.JavaScript:
+		js, err := vr.ReadJavascript()
+		if err != nil {
+			return err
+		}
+		*val = JavaScript(js)
+	case bsontype.Symbol:
+		symbol, err := vr.ReadSymbol()
+		if err != nil {
+			return err
+		}
+		*val = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		code, scope, err := vr.ReadCodeWithScope()
+		if err != nil {
+			return err
+		}
+		var scopeDoc Doc
+		err = pc.decodeDocument(dc, scope, &scopeDoc)
+		if err != nil {
+			return err
+		}
+		*val = CodeWithScope(code, scopeDoc)
+	case bsontype.Int32:
+		i32, err := vr.ReadInt32()
+		if err != nil {
+			return err
+		}
+		*val = Int32(i32)
+	case bsontype.Timestamp:
+		t, i, err := vr.ReadTimestamp()
+		if err != nil {
+			return err
+		}
+		*val = Timestamp(t, i)
+	case bsontype.Int64:
+		i64, err := vr.ReadInt64()
+		if err != nil {
+			return err
+		}
+		*val = Int64(i64)
+	case bsontype.Decimal128:
+		d128, err := vr.ReadDecimal128()
+		if err != nil {
+			return err
+		}
+		*val = Decimal128(d128)
+	case bsontype.MinKey:
+		err := vr.ReadMinKey()
+		if err != nil {
+			return err
+		}
+		*val = MinKey()
+	case bsontype.MaxKey:
+		err := vr.ReadMaxKey()
+		if err != nil {
+			return err
+		}
+		*val = MaxKey()
+	default:
+		return fmt.Errorf("Cannot read unknown BSON type %s", vr.Type())
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go
new file mode 100644
index 0000000..83dfaa5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/registry.go
@@ -0,0 +1,22 @@
+package bsonx
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+)
+
+// DefaultRegistry is the default bsoncodec.Registry. It contains the default codecs and the
+// primitive codecs.
+var DefaultRegistry = NewRegistryBuilder().Build()
+
+// NewRegistryBuilder creates a new RegistryBuilder configured with the default encoders and
+// deocders from the bsoncodec.DefaultValueEncoders and bsoncodec.DefaultValueDecoders types and the
+// PrimitiveCodecs type in this package.
+func NewRegistryBuilder() *bsoncodec.RegistryBuilder {
+	rb := bsoncodec.NewRegistryBuilder()
+	bsoncodec.DefaultValueEncoders{}.RegisterDefaultEncoders(rb)
+	bsoncodec.DefaultValueDecoders{}.RegisterDefaultDecoders(rb)
+	bson.PrimitiveCodecs{}.RegisterPrimitiveCodecs(rb)
+	primitiveCodecs.RegisterPrimitiveCodecs(rb)
+	return rb
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go
new file mode 100644
index 0000000..033d9d8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/bsonx/value.go
@@ -0,0 +1,899 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package bsonx
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// Val represents a BSON value.
+type Val struct {
+	// NOTE: The bootstrap is a small amount of space that'll be on the stack. At 15 bytes this
+	// doesn't make this type any larger, since there are 7 bytes of padding and we want an int64 to
+	// store small values (e.g. boolean, double, int64, etc...). The primitive property is where all
+	// of the larger values go. They will use either Go primitives or the primitive.* types.
+	t         bsontype.Type
+	bootstrap [15]byte
+	primitive interface{}
+}
+
+func (v Val) reset() Val {
+	v.primitive = nil // clear out any pointers so we don't accidentally stop them from being garbage collected.
+	v.t = bsontype.Type(0)
+	v.bootstrap[0] = 0x00
+	v.bootstrap[1] = 0x00
+	v.bootstrap[2] = 0x00
+	v.bootstrap[3] = 0x00
+	v.bootstrap[4] = 0x00
+	v.bootstrap[5] = 0x00
+	v.bootstrap[6] = 0x00
+	v.bootstrap[7] = 0x00
+	v.bootstrap[8] = 0x00
+	v.bootstrap[9] = 0x00
+	v.bootstrap[10] = 0x00
+	v.bootstrap[11] = 0x00
+	v.bootstrap[12] = 0x00
+	v.bootstrap[13] = 0x00
+	v.bootstrap[14] = 0x00
+	return v
+}
+
+func (v Val) string() string {
+	if v.primitive != nil {
+		return v.primitive.(string)
+	}
+	// The string will either end with a null byte or it fills the entire bootstrap space.
+	length := uint8(v.bootstrap[0])
+	return string(v.bootstrap[1 : length+1])
+}
+
+func (v Val) writestring(str string) Val {
+	switch {
+	case len(str) < 15:
+		v.bootstrap[0] = uint8(len(str))
+		copy(v.bootstrap[1:], str)
+	default:
+		v.primitive = str
+	}
+	return v
+}
+
+func (v Val) i64() int64 {
+	return int64(v.bootstrap[0]) | int64(v.bootstrap[1])<<8 | int64(v.bootstrap[2])<<16 |
+		int64(v.bootstrap[3])<<24 | int64(v.bootstrap[4])<<32 | int64(v.bootstrap[5])<<40 |
+		int64(v.bootstrap[6])<<48 | int64(v.bootstrap[7])<<56
+}
+
+func (v Val) writei64(i64 int64) Val {
+	v.bootstrap[0] = byte(i64)
+	v.bootstrap[1] = byte(i64 >> 8)
+	v.bootstrap[2] = byte(i64 >> 16)
+	v.bootstrap[3] = byte(i64 >> 24)
+	v.bootstrap[4] = byte(i64 >> 32)
+	v.bootstrap[5] = byte(i64 >> 40)
+	v.bootstrap[6] = byte(i64 >> 48)
+	v.bootstrap[7] = byte(i64 >> 56)
+	return v
+}
+
+// IsZero returns true if this value is zero or a BSON null.
+func (v Val) IsZero() bool { return v.t == bsontype.Type(0) || v.t == bsontype.Null }
+
+func (v Val) String() string {
+	// TODO(GODRIVER-612): When bsoncore has appenders for extended JSON use that here.
+	return fmt.Sprintf("%v", v.Interface())
+}
+
+// Interface returns the Go value of this Value as an empty interface.
+//
+// This method will return nil if it is empty, otherwise it will return a Go primitive or a
+// primitive.* instance.
+func (v Val) Interface() interface{} {
+	switch v.Type() {
+	case bsontype.Double:
+		return v.Double()
+	case bsontype.String:
+		return v.StringValue()
+	case bsontype.EmbeddedDocument:
+		switch v.primitive.(type) {
+		case Doc:
+			return v.primitive.(Doc)
+		case MDoc:
+			return v.primitive.(MDoc)
+		default:
+			return primitive.Null{}
+		}
+	case bsontype.Array:
+		return v.Array()
+	case bsontype.Binary:
+		return v.primitive.(primitive.Binary)
+	case bsontype.Undefined:
+		return primitive.Undefined{}
+	case bsontype.ObjectID:
+		return v.ObjectID()
+	case bsontype.Boolean:
+		return v.Boolean()
+	case bsontype.DateTime:
+		return v.DateTime()
+	case bsontype.Null:
+		return primitive.Null{}
+	case bsontype.Regex:
+		return v.primitive.(primitive.Regex)
+	case bsontype.DBPointer:
+		return v.primitive.(primitive.DBPointer)
+	case bsontype.JavaScript:
+		return v.JavaScript()
+	case bsontype.Symbol:
+		return v.Symbol()
+	case bsontype.CodeWithScope:
+		return v.primitive.(primitive.CodeWithScope)
+	case bsontype.Int32:
+		return v.Int32()
+	case bsontype.Timestamp:
+		t, i := v.Timestamp()
+		return primitive.Timestamp{T: t, I: i}
+	case bsontype.Int64:
+		return v.Int64()
+	case bsontype.Decimal128:
+		return v.Decimal128()
+	case bsontype.MinKey:
+		return primitive.MinKey{}
+	case bsontype.MaxKey:
+		return primitive.MaxKey{}
+	default:
+		return primitive.Null{}
+	}
+}
+
+// MarshalBSONValue implements the bsoncodec.ValueMarshaler interface.
+func (v Val) MarshalBSONValue() (bsontype.Type, []byte, error) {
+	return v.MarshalAppendBSONValue(nil)
+}
+
+// MarshalAppendBSONValue is similar to MarshalBSONValue, but allows the caller to specify a slice
+// to add the bytes to.
+func (v Val) MarshalAppendBSONValue(dst []byte) (bsontype.Type, []byte, error) {
+	t := v.Type()
+	switch v.Type() {
+	case bsontype.Double:
+		dst = bsoncore.AppendDouble(dst, v.Double())
+	case bsontype.String:
+		dst = bsoncore.AppendString(dst, v.String())
+	case bsontype.EmbeddedDocument:
+		switch v.primitive.(type) {
+		case Doc:
+			t, dst, _ = v.primitive.(Doc).MarshalBSONValue() // Doc.MarshalBSONValue never returns an error.
+		case MDoc:
+			t, dst, _ = v.primitive.(MDoc).MarshalBSONValue() // MDoc.MarshalBSONValue never returns an error.
+		}
+	case bsontype.Array:
+		t, dst, _ = v.Array().MarshalBSONValue() // Arr.MarshalBSON never returns an error.
+	case bsontype.Binary:
+		subtype, bindata := v.Binary()
+		dst = bsoncore.AppendBinary(dst, subtype, bindata)
+	case bsontype.Undefined:
+	case bsontype.ObjectID:
+		dst = bsoncore.AppendObjectID(dst, v.ObjectID())
+	case bsontype.Boolean:
+		dst = bsoncore.AppendBoolean(dst, v.Boolean())
+	case bsontype.DateTime:
+		dst = bsoncore.AppendDateTime(dst, int64(v.DateTime()))
+	case bsontype.Null:
+	case bsontype.Regex:
+		pattern, options := v.Regex()
+		dst = bsoncore.AppendRegex(dst, pattern, options)
+	case bsontype.DBPointer:
+		ns, ptr := v.DBPointer()
+		dst = bsoncore.AppendDBPointer(dst, ns, ptr)
+	case bsontype.JavaScript:
+		dst = bsoncore.AppendJavaScript(dst, string(v.JavaScript()))
+	case bsontype.Symbol:
+		dst = bsoncore.AppendSymbol(dst, string(v.Symbol()))
+	case bsontype.CodeWithScope:
+		code, doc := v.CodeWithScope()
+		var scope []byte
+		scope, _ = doc.MarshalBSON() // Doc.MarshalBSON never returns an error.
+		dst = bsoncore.AppendCodeWithScope(dst, code, scope)
+	case bsontype.Int32:
+		dst = bsoncore.AppendInt32(dst, v.Int32())
+	case bsontype.Timestamp:
+		t, i := v.Timestamp()
+		dst = bsoncore.AppendTimestamp(dst, t, i)
+	case bsontype.Int64:
+		dst = bsoncore.AppendInt64(dst, v.Int64())
+	case bsontype.Decimal128:
+		dst = bsoncore.AppendDecimal128(dst, v.Decimal128())
+	case bsontype.MinKey:
+	case bsontype.MaxKey:
+	default:
+		panic(fmt.Errorf("invalid BSON type %v", t))
+	}
+
+	return t, dst, nil
+}
+
+// UnmarshalBSONValue implements the bsoncodec.ValueUnmarshaler interface.
+func (v *Val) UnmarshalBSONValue(t bsontype.Type, data []byte) error {
+	if v == nil {
+		return errors.New("cannot unmarshal into nil Value")
+	}
+	var err error
+	var ok = true
+	var rem []byte
+	switch t {
+	case bsontype.Double:
+		var f64 float64
+		f64, rem, ok = bsoncore.ReadDouble(data)
+		*v = Double(f64)
+	case bsontype.String:
+		var str string
+		str, rem, ok = bsoncore.ReadString(data)
+		*v = String(str)
+	case bsontype.EmbeddedDocument:
+		var raw []byte
+		var doc Doc
+		raw, rem, ok = bsoncore.ReadDocument(data)
+		doc, err = ReadDoc(raw)
+		*v = Document(doc)
+	case bsontype.Array:
+		var raw []byte
+		arr := make(Arr, 0)
+		raw, rem, ok = bsoncore.ReadArray(data)
+		err = arr.UnmarshalBSONValue(t, raw)
+		*v = Array(arr)
+	case bsontype.Binary:
+		var subtype byte
+		var bindata []byte
+		subtype, bindata, rem, ok = bsoncore.ReadBinary(data)
+		*v = Binary(subtype, bindata)
+	case bsontype.Undefined:
+		*v = Undefined()
+	case bsontype.ObjectID:
+		var oid primitive.ObjectID
+		oid, rem, ok = bsoncore.ReadObjectID(data)
+		*v = ObjectID(oid)
+	case bsontype.Boolean:
+		var b bool
+		b, rem, ok = bsoncore.ReadBoolean(data)
+		*v = Boolean(b)
+	case bsontype.DateTime:
+		var dt int64
+		dt, rem, ok = bsoncore.ReadDateTime(data)
+		*v = DateTime(dt)
+	case bsontype.Null:
+		*v = Null()
+	case bsontype.Regex:
+		var pattern, options string
+		pattern, options, rem, ok = bsoncore.ReadRegex(data)
+		*v = Regex(pattern, options)
+	case bsontype.DBPointer:
+		var ns string
+		var ptr primitive.ObjectID
+		ns, ptr, rem, ok = bsoncore.ReadDBPointer(data)
+		*v = DBPointer(ns, ptr)
+	case bsontype.JavaScript:
+		var js string
+		js, rem, ok = bsoncore.ReadJavaScript(data)
+		*v = JavaScript(js)
+	case bsontype.Symbol:
+		var symbol string
+		symbol, rem, ok = bsoncore.ReadSymbol(data)
+		*v = Symbol(symbol)
+	case bsontype.CodeWithScope:
+		var raw []byte
+		var code string
+		var scope Doc
+		code, raw, rem, ok = bsoncore.ReadCodeWithScope(data)
+		scope, err = ReadDoc(raw)
+		*v = CodeWithScope(code, scope)
+	case bsontype.Int32:
+		var i32 int32
+		i32, rem, ok = bsoncore.ReadInt32(data)
+		*v = Int32(i32)
+	case bsontype.Timestamp:
+		var i, t uint32
+		t, i, rem, ok = bsoncore.ReadTimestamp(data)
+		*v = Timestamp(t, i)
+	case bsontype.Int64:
+		var i64 int64
+		i64, rem, ok = bsoncore.ReadInt64(data)
+		*v = Int64(i64)
+	case bsontype.Decimal128:
+		var d128 primitive.Decimal128
+		d128, rem, ok = bsoncore.ReadDecimal128(data)
+		*v = Decimal128(d128)
+	case bsontype.MinKey:
+		*v = MinKey()
+	case bsontype.MaxKey:
+		*v = MaxKey()
+	default:
+		err = fmt.Errorf("invalid BSON type %v", t)
+	}
+
+	if !ok && err == nil {
+		err = bsoncore.NewInsufficientBytesError(data, rem)
+	}
+
+	return err
+}
+
+// Type returns the BSON type of this value.
+func (v Val) Type() bsontype.Type {
+	if v.t == bsontype.Type(0) {
+		return bsontype.Null
+	}
+	return v.t
+}
+
+// IsNumber returns true if the type of v is a numberic BSON type.
+func (v Val) IsNumber() bool {
+	switch v.Type() {
+	case bsontype.Double, bsontype.Int32, bsontype.Int64, bsontype.Decimal128:
+		return true
+	default:
+		return false
+	}
+}
+
+// Double returns the BSON double value the Value represents. It panics if the value is a BSON type
+// other than double.
+func (v Val) Double() float64 {
+	if v.t != bsontype.Double {
+		panic(ElementTypeError{"bson.Value.Double", v.t})
+	}
+	return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8]))
+}
+
+// DoubleOK is the same as Double, but returns a boolean instead of panicking.
+func (v Val) DoubleOK() (float64, bool) {
+	if v.t != bsontype.Double {
+		return 0, false
+	}
+	return math.Float64frombits(binary.LittleEndian.Uint64(v.bootstrap[0:8])), true
+}
+
+// StringValue returns the BSON string the Value represents. It panics if the value is a BSON type
+// other than string.
+//
+// NOTE: This method is called StringValue to avoid it implementing the
+// fmt.Stringer interface.
+func (v Val) StringValue() string {
+	if v.t != bsontype.String {
+		panic(ElementTypeError{"bson.Value.StringValue", v.t})
+	}
+	return v.string()
+}
+
+// StringValueOK is the same as StringValue, but returns a boolean instead of
+// panicking.
+func (v Val) StringValueOK() (string, bool) {
+	if v.t != bsontype.String {
+		return "", false
+	}
+	return v.string(), true
+}
+
+func (v Val) asDoc() Doc {
+	doc, ok := v.primitive.(Doc)
+	if ok {
+		return doc
+	}
+	mdoc := v.primitive.(MDoc)
+	for k, v := range mdoc {
+		doc = append(doc, Elem{k, v})
+	}
+	return doc
+}
+
+func (v Val) asMDoc() MDoc {
+	mdoc, ok := v.primitive.(MDoc)
+	if ok {
+		return mdoc
+	}
+	doc := v.primitive.(Doc)
+	for _, elem := range doc {
+		mdoc[elem.Key] = elem.Value
+	}
+	return mdoc
+}
+
+// Document returns the BSON embedded document value the Value represents. It panics if the value
+// is a BSON type other than embedded document.
+func (v Val) Document() Doc {
+	if v.t != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bson.Value.Document", v.t})
+	}
+	return v.asDoc()
+}
+
+// DocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Val) DocumentOK() (Doc, bool) {
+	if v.t != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	return v.asDoc(), true
+}
+
+// MDocument returns the BSON embedded document value the Value represents. It panics if the value
+// is a BSON type other than embedded document.
+func (v Val) MDocument() MDoc {
+	if v.t != bsontype.EmbeddedDocument {
+		panic(ElementTypeError{"bson.Value.MDocument", v.t})
+	}
+	return v.asMDoc()
+}
+
+// MDocumentOK is the same as Document, except it returns a boolean
+// instead of panicking.
+func (v Val) MDocumentOK() (MDoc, bool) {
+	if v.t != bsontype.EmbeddedDocument {
+		return nil, false
+	}
+	return v.asMDoc(), true
+}
+
+// Array returns the BSON array value the Value represents. It panics if the value is a BSON type
+// other than array.
+func (v Val) Array() Arr {
+	if v.t != bsontype.Array {
+		panic(ElementTypeError{"bson.Value.Array", v.t})
+	}
+	return v.primitive.(Arr)
+}
+
+// ArrayOK is the same as Array, except it returns a boolean
+// instead of panicking.
+func (v Val) ArrayOK() (Arr, bool) {
+	if v.t != bsontype.Array {
+		return nil, false
+	}
+	return v.primitive.(Arr), true
+}
+
+// Binary returns the BSON binary value the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Binary() (byte, []byte) {
+	if v.t != bsontype.Binary {
+		panic(ElementTypeError{"bson.Value.Binary", v.t})
+	}
+	bin := v.primitive.(primitive.Binary)
+	return bin.Subtype, bin.Data
+}
+
+// BinaryOK is the same as Binary, except it returns a boolean instead of
+// panicking.
+func (v Val) BinaryOK() (byte, []byte, bool) {
+	if v.t != bsontype.Binary {
+		return 0x00, nil, false
+	}
+	bin := v.primitive.(primitive.Binary)
+	return bin.Subtype, bin.Data, true
+}
+
+// Undefined returns the BSON undefined the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Undefined() {
+	if v.t != bsontype.Undefined {
+		panic(ElementTypeError{"bson.Value.Undefined", v.t})
+	}
+	return
+}
+
+// UndefinedOK is the same as Undefined, except it returns a boolean instead of
+// panicking.
+func (v Val) UndefinedOK() bool {
+	if v.t != bsontype.Undefined {
+		return false
+	}
+	return true
+}
+
+// ObjectID returns the BSON ObjectID the Value represents. It panics if the value is a BSON type
+// other than ObjectID.
+func (v Val) ObjectID() primitive.ObjectID {
+	if v.t != bsontype.ObjectID {
+		panic(ElementTypeError{"bson.Value.ObjectID", v.t})
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], v.bootstrap[:12])
+	return oid
+}
+
+// ObjectIDOK is the same as ObjectID, except it returns a boolean instead of
+// panicking.
+func (v Val) ObjectIDOK() (primitive.ObjectID, bool) {
+	if v.t != bsontype.ObjectID {
+		return primitive.ObjectID{}, false
+	}
+	var oid primitive.ObjectID
+	copy(oid[:], v.bootstrap[:12])
+	return oid, true
+}
+
+// Boolean returns the BSON boolean the Value represents. It panics if the value is a BSON type
+// other than boolean.
+func (v Val) Boolean() bool {
+	if v.t != bsontype.Boolean {
+		panic(ElementTypeError{"bson.Value.Boolean", v.t})
+	}
+	return v.bootstrap[0] == 0x01
+}
+
+// BooleanOK is the same as Boolean, except it returns a boolean instead of
+// panicking.
+func (v Val) BooleanOK() (bool, bool) {
+	if v.t != bsontype.Boolean {
+		return false, false
+	}
+	return v.bootstrap[0] == 0x01, true
+}
+
+// DateTime returns the BSON datetime the Value represents. It panics if the value is a BSON type
+// other than datetime.
+func (v Val) DateTime() int64 {
+	if v.t != bsontype.DateTime {
+		panic(ElementTypeError{"bson.Value.DateTime", v.t})
+	}
+	return v.i64()
+}
+
+// DateTimeOK is the same as DateTime, except it returns a boolean instead of
+// panicking.
+func (v Val) DateTimeOK() (int64, bool) {
+	if v.t != bsontype.DateTime {
+		return 0, false
+	}
+	return v.i64(), true
+}
+
+// Time returns the BSON datetime the Value represents as time.Time. It panics if the value is a BSON
+// type other than datetime.
+func (v Val) Time() time.Time {
+	if v.t != bsontype.DateTime {
+		panic(ElementTypeError{"bson.Value.Time", v.t})
+	}
+	i := v.i64()
+	return time.Unix(int64(i)/1000, int64(i)%1000*1000000)
+}
+
+// TimeOK is the same as Time, except it returns a boolean instead of
+// panicking.
+func (v Val) TimeOK() (time.Time, bool) {
+	if v.t != bsontype.DateTime {
+		return time.Time{}, false
+	}
+	i := v.i64()
+	return time.Unix(int64(i)/1000, int64(i)%1000*1000000), true
+}
+
+// Null returns the BSON undefined the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) Null() {
+	if v.t != bsontype.Null && v.t != bsontype.Type(0) {
+		panic(ElementTypeError{"bson.Value.Null", v.t})
+	}
+	return
+}
+
+// NullOK is the same as Null, except it returns a boolean instead of
+// panicking.
+func (v Val) NullOK() bool {
+	if v.t != bsontype.Null && v.t != bsontype.Type(0) {
+		return false
+	}
+	return true
+}
+
+// Regex returns the BSON regex the Value represents. It panics if the value is a BSON type
+// other than regex.
+func (v Val) Regex() (pattern, options string) {
+	if v.t != bsontype.Regex {
+		panic(ElementTypeError{"bson.Value.Regex", v.t})
+	}
+	regex := v.primitive.(primitive.Regex)
+	return regex.Pattern, regex.Options
+}
+
+// RegexOK is the same as Regex, except that it returns a boolean
+// instead of panicking.
+func (v Val) RegexOK() (pattern, options string, ok bool) {
+	if v.t != bsontype.Regex {
+		return "", "", false
+	}
+	regex := v.primitive.(primitive.Regex)
+	return regex.Pattern, regex.Options, true
+}
+
+// DBPointer returns the BSON dbpointer the Value represents. It panics if the value is a BSON type
+// other than dbpointer.
+func (v Val) DBPointer() (string, primitive.ObjectID) {
+	if v.t != bsontype.DBPointer {
+		panic(ElementTypeError{"bson.Value.DBPointer", v.t})
+	}
+	dbptr := v.primitive.(primitive.DBPointer)
+	return dbptr.DB, dbptr.Pointer
+}
+
+// DBPointerOK is the same as DBPoitner, except that it returns a boolean
+// instead of panicking.
+func (v Val) DBPointerOK() (string, primitive.ObjectID, bool) {
+	if v.t != bsontype.DBPointer {
+		return "", primitive.ObjectID{}, false
+	}
+	dbptr := v.primitive.(primitive.DBPointer)
+	return dbptr.DB, dbptr.Pointer, true
+}
+
+// JavaScript returns the BSON JavaScript the Value represents. It panics if the value is a BSON type
+// other than JavaScript.
+func (v Val) JavaScript() string {
+	if v.t != bsontype.JavaScript {
+		panic(ElementTypeError{"bson.Value.JavaScript", v.t})
+	}
+	return v.string()
+}
+
+// JavaScriptOK is the same as Javascript, except that it returns a boolean
+// instead of panicking.
+func (v Val) JavaScriptOK() (string, bool) {
+	if v.t != bsontype.JavaScript {
+		return "", false
+	}
+	return v.string(), true
+}
+
+// Symbol returns the BSON symbol the Value represents. It panics if the value is a BSON type
+// other than symbol.
+func (v Val) Symbol() string {
+	if v.t != bsontype.Symbol {
+		panic(ElementTypeError{"bson.Value.Symbol", v.t})
+	}
+	return v.string()
+}
+
+// SymbolOK is the same as Javascript, except that it returns a boolean
+// instead of panicking.
+func (v Val) SymbolOK() (string, bool) {
+	if v.t != bsontype.Symbol {
+		return "", false
+	}
+	return v.string(), true
+}
+
+// CodeWithScope returns the BSON code with scope value the Value represents. It panics if the
+// value is a BSON type other than code with scope.
+func (v Val) CodeWithScope() (string, Doc) {
+	if v.t != bsontype.CodeWithScope {
+		panic(ElementTypeError{"bson.Value.CodeWithScope", v.t})
+	}
+	cws := v.primitive.(primitive.CodeWithScope)
+	return string(cws.Code), cws.Scope.(Doc)
+}
+
+// CodeWithScopeOK is the same as JavascriptWithScope,
+// except that it returns a boolean instead of panicking.
+func (v Val) CodeWithScopeOK() (string, Doc, bool) {
+	if v.t != bsontype.CodeWithScope {
+		return "", nil, false
+	}
+	cws := v.primitive.(primitive.CodeWithScope)
+	return string(cws.Code), cws.Scope.(Doc), true
+}
+
+// Int32 returns the BSON int32 the Value represents. It panics if the value is a BSON type
+// other than int32.
+func (v Val) Int32() int32 {
+	if v.t != bsontype.Int32 {
+		panic(ElementTypeError{"bson.Value.Int32", v.t})
+	}
+	return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 |
+		int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24
+}
+
+// Int32OK is the same as Int32, except that it returns a boolean instead of
+// panicking.
+func (v Val) Int32OK() (int32, bool) {
+	if v.t != bsontype.Int32 {
+		return 0, false
+	}
+	return int32(v.bootstrap[0]) | int32(v.bootstrap[1])<<8 |
+			int32(v.bootstrap[2])<<16 | int32(v.bootstrap[3])<<24,
+		true
+}
+
+// Timestamp returns the BSON timestamp the Value represents. It panics if the value is a
+// BSON type other than timestamp.
+func (v Val) Timestamp() (t, i uint32) {
+	if v.t != bsontype.Timestamp {
+		panic(ElementTypeError{"bson.Value.Timestamp", v.t})
+	}
+	return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 |
+			uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24,
+		uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 |
+			uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24
+}
+
+// TimestampOK is the same as Timestamp, except that it returns a boolean
+// instead of panicking.
+func (v Val) TimestampOK() (t uint32, i uint32, ok bool) {
+	if v.t != bsontype.Timestamp {
+		return 0, 0, false
+	}
+	return uint32(v.bootstrap[4]) | uint32(v.bootstrap[5])<<8 |
+			uint32(v.bootstrap[6])<<16 | uint32(v.bootstrap[7])<<24,
+		uint32(v.bootstrap[0]) | uint32(v.bootstrap[1])<<8 |
+			uint32(v.bootstrap[2])<<16 | uint32(v.bootstrap[3])<<24,
+		true
+}
+
+// Int64 returns the BSON int64 the Value represents. It panics if the value is a BSON type
+// other than int64.
+func (v Val) Int64() int64 {
+	if v.t != bsontype.Int64 {
+		panic(ElementTypeError{"bson.Value.Int64", v.t})
+	}
+	return v.i64()
+}
+
+// Int64OK is the same as Int64, except that it returns a boolean instead of
+// panicking.
+func (v Val) Int64OK() (int64, bool) {
+	if v.t != bsontype.Int64 {
+		return 0, false
+	}
+	return v.i64(), true
+}
+
+// Decimal128 returns the BSON decimal128 value the Value represents. It panics if the value is a
+// BSON type other than decimal128.
+func (v Val) Decimal128() primitive.Decimal128 {
+	if v.t != bsontype.Decimal128 {
+		panic(ElementTypeError{"bson.Value.Decimal128", v.t})
+	}
+	return v.primitive.(primitive.Decimal128)
+}
+
+// Decimal128OK is the same as Decimal128, except that it returns a boolean
+// instead of panicking.
+func (v Val) Decimal128OK() (primitive.Decimal128, bool) {
+	if v.t != bsontype.Decimal128 {
+		return primitive.Decimal128{}, false
+	}
+	return v.primitive.(primitive.Decimal128), true
+}
+
+// MinKey returns the BSON minkey the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) MinKey() {
+	if v.t != bsontype.MinKey {
+		panic(ElementTypeError{"bson.Value.MinKey", v.t})
+	}
+	return
+}
+
+// MinKeyOK is the same as MinKey, except it returns a boolean instead of
+// panicking.
+func (v Val) MinKeyOK() bool {
+	if v.t != bsontype.MinKey {
+		return false
+	}
+	return true
+}
+
+// MaxKey returns the BSON maxkey the Value represents. It panics if the value is a BSON type
+// other than binary.
+func (v Val) MaxKey() {
+	if v.t != bsontype.MaxKey {
+		panic(ElementTypeError{"bson.Value.MaxKey", v.t})
+	}
+	return
+}
+
+// MaxKeyOK is the same as MaxKey, except it returns a boolean instead of
+// panicking.
+func (v Val) MaxKeyOK() bool {
+	if v.t != bsontype.MaxKey {
+		return false
+	}
+	return true
+}
+
+// Equal compares v to v2 and returns true if they are equal. Unknown BSON types are
+// never equal. Two empty values are equal.
+func (v Val) Equal(v2 Val) bool {
+	if v.Type() != v2.Type() {
+		return false
+	}
+	if v.IsZero() && v2.IsZero() {
+		return true
+	}
+
+	switch v.Type() {
+	case bsontype.Double, bsontype.DateTime, bsontype.Timestamp, bsontype.Int64:
+		return bytes.Equal(v.bootstrap[0:8], v2.bootstrap[0:8])
+	case bsontype.String:
+		return v.string() == v2.string()
+	case bsontype.EmbeddedDocument:
+		return v.equalDocs(v2)
+	case bsontype.Array:
+		return v.Array().Equal(v2.Array())
+	case bsontype.Binary:
+		return v.primitive.(primitive.Binary).Equal(v2.primitive.(primitive.Binary))
+	case bsontype.Undefined:
+		return true
+	case bsontype.ObjectID:
+		return bytes.Equal(v.bootstrap[0:12], v2.bootstrap[0:12])
+	case bsontype.Boolean:
+		return v.bootstrap[0] == v2.bootstrap[0]
+	case bsontype.Null:
+		return true
+	case bsontype.Regex:
+		return v.primitive.(primitive.Regex).Equal(v2.primitive.(primitive.Regex))
+	case bsontype.DBPointer:
+		return v.primitive.(primitive.DBPointer).Equal(v2.primitive.(primitive.DBPointer))
+	case bsontype.JavaScript:
+		return v.JavaScript() == v2.JavaScript()
+	case bsontype.Symbol:
+		return v.Symbol() == v2.Symbol()
+	case bsontype.CodeWithScope:
+		code1, scope1 := v.primitive.(primitive.CodeWithScope).Code, v.primitive.(primitive.CodeWithScope).Scope
+		code2, scope2 := v2.primitive.(primitive.CodeWithScope).Code, v2.primitive.(primitive.CodeWithScope).Scope
+		return code1 == code2 && v.equalInterfaceDocs(scope1, scope2)
+	case bsontype.Int32:
+		return v.Int32() == v2.Int32()
+	case bsontype.Decimal128:
+		h, l := v.Decimal128().GetBytes()
+		h2, l2 := v2.Decimal128().GetBytes()
+		return h == h2 && l == l2
+	case bsontype.MinKey:
+		return true
+	case bsontype.MaxKey:
+		return true
+	default:
+		return false
+	}
+}
+
+func (v Val) equalDocs(v2 Val) bool {
+	_, ok1 := v.primitive.(MDoc)
+	_, ok2 := v2.primitive.(MDoc)
+	if ok1 || ok2 {
+		return v.asMDoc().Equal(v2.asMDoc())
+	}
+	return v.asDoc().Equal(v2.asDoc())
+}
+
+func (Val) equalInterfaceDocs(i, i2 interface{}) bool {
+	switch d := i.(type) {
+	case MDoc:
+		d2, ok := i2.(IDoc)
+		if !ok {
+			return false
+		}
+		return d.Equal(d2)
+	case Doc:
+		d2, ok := i2.(IDoc)
+		if !ok {
+			return false
+		}
+		return d.Equal(d2)
+	case nil:
+		return i2 == nil
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md
new file mode 100644
index 0000000..7588f4a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/README.md
@@ -0,0 +1,11 @@
+MongoDB Go Driver Low-Level Driver Library
+==========================================
+The packages within this library allow users to build applications using a low-level driver
+interface. Knowledge of the internals of a MongoDB driver are assumed, so this library contains
+advanced features. The aim of this library is to provide an easy to use, high performance
+implementation of a low-level driver.
+
+This Library's API is experimental and subject to change. Packages may be changed or removed without
+notice. These APIs are not stable and do not guarantee backward compatibility.
+
+**THIS LIBRARY IS EXPERIMENTAL AND SUBJECT TO CHANGE.**
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go
new file mode 100644
index 0000000..4798b05
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/abort_transaction.go
@@ -0,0 +1,71 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// AbortTransaction handles the full cycle dispatch and execution of abortting a transaction
+// against the provided topology.
+func AbortTransaction(
+	ctx context.Context,
+	cmd command.AbortTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.TransactionResult, error) {
+	res, err := abortTransaction(ctx, cmd, topo, selector, nil)
+	if cerr, ok := err.(command.Error); ok && err != nil {
+		// Retry if appropriate
+		if cerr.Retryable() {
+			res, err = abortTransaction(ctx, cmd, topo, selector, cerr)
+		}
+	}
+	return res, err
+}
+
+func abortTransaction(
+	ctx context.Context,
+	cmd command.AbortTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	oldErr error,
+) (result.TransactionResult, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		// If retrying server selection, return the original error if it fails
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+
+	desc := ss.Description()
+
+	if oldErr != nil && (!topo.SupportsSessions() || !description.SessionsSupported(desc.WireVersion)) {
+		// Assuming we are retrying (oldErr != nil),
+		// if server doesn't support retryable writes, return the original error
+		// Conditions for retry write support are the same as that of sessions
+		return result.TransactionResult{}, oldErr
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go
new file mode 100644
index 0000000..fd6c949
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/aggregate.go
@@ -0,0 +1,207 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Aggregate handles the full cycle dispatch and execution of an aggregate command against the provided
+// topology.
+func Aggregate(
+	ctx context.Context,
+	cmd command.Aggregate,
+	topo *topology.Topology,
+	readSelector, writeSelector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.AggregateOptions,
+) (*BatchCursor, error) {
+
+	dollarOut := cmd.HasDollarOut()
+
+	var ss *topology.SelectedServer
+	var err error
+	switch dollarOut {
+	case true:
+		ss, err = topo.SelectServer(ctx, writeSelector)
+		if err != nil {
+			return nil, err
+		}
+	case false:
+		ss, err = topo.SelectServer(ctx, readSelector)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	aggOpts := options.MergeAggregateOptions(opts...)
+
+	if aggOpts.AllowDiskUse != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"allowDiskUse", bsonx.Boolean(*aggOpts.AllowDiskUse)})
+	}
+	var batchSize int32
+	if aggOpts.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*aggOpts.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+		batchSize = *aggOpts.BatchSize
+	}
+	if aggOpts.BypassDocumentValidation != nil && desc.WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*aggOpts.BypassDocumentValidation)})
+	}
+	if aggOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return nil, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(aggOpts.Collation.ToDocument())})
+	}
+	if aggOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*aggOpts.MaxTime / time.Millisecond))})
+	}
+	if aggOpts.MaxAwaitTime != nil {
+		// specified as maxTimeMS on getMore commands
+		cmd.CursorOpts = append(cmd.CursorOpts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*aggOpts.MaxAwaitTime / time.Millisecond)),
+		})
+	}
+	if aggOpts.Comment != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"comment", bsonx.String(*aggOpts.Comment)})
+	}
+	if aggOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", aggOpts.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	if desc.WireVersion.Max < 4 {
+		return buildLegacyCommandBatchCursor(res, batchSize, ss.Server)
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+func buildLegacyCommandBatchCursor(rdr bson.Raw, batchSize int32, server *topology.Server) (*BatchCursor, error) {
+	firstBatchDocs, ns, cursorID, err := getCursorValues(rdr)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewLegacyBatchCursor(ns, cursorID, firstBatchDocs, 0, batchSize, server)
+}
+
+// get the firstBatch, cursor ID, and namespace from a bson.Raw
+//
+// TODO(GODRIVER-617): Change the documents return value into []bsoncore.Document.
+func getCursorValues(result bson.Raw) ([]bson.Raw, command.Namespace, int64, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, command.Namespace{}, 0, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, command.Namespace{}, 0, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, command.Namespace{}, 0, err
+	}
+
+	var ok bool
+	var batch []bson.Raw
+	var namespace command.Namespace
+	var cursorID int64
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok := elem.Value().ArrayOK()
+			if !ok {
+				return nil, command.Namespace{}, 0, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+
+			vals, err := arr.Values()
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+
+			for _, val := range vals {
+				if val.Type != bsontype.EmbeddedDocument {
+					return nil, command.Namespace{}, 0, fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+				}
+				batch = append(batch, val.Value)
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, command.Namespace{}, 0, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace = command.ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, command.Namespace{}, 0, err
+			}
+		case "id":
+			cursorID, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, command.Namespace{}, 0, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	return batch, namespace, cursorID, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go
new file mode 100644
index 0000000..2ea1bfc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/auth.go
@@ -0,0 +1,169 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// AuthenticatorFactory constructs an authenticator.
+type AuthenticatorFactory func(cred *Cred) (Authenticator, error)
+
+var authFactories = make(map[string]AuthenticatorFactory)
+
+func init() {
+	RegisterAuthenticatorFactory("", newDefaultAuthenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA1, newScramSHA1Authenticator)
+	RegisterAuthenticatorFactory(SCRAMSHA256, newScramSHA256Authenticator)
+	RegisterAuthenticatorFactory(MONGODBCR, newMongoDBCRAuthenticator)
+	RegisterAuthenticatorFactory(PLAIN, newPlainAuthenticator)
+	RegisterAuthenticatorFactory(GSSAPI, newGSSAPIAuthenticator)
+	RegisterAuthenticatorFactory(MongoDBX509, newMongoDBX509Authenticator)
+}
+
+// CreateAuthenticator creates an authenticator.
+func CreateAuthenticator(name string, cred *Cred) (Authenticator, error) {
+	if f, ok := authFactories[name]; ok {
+		return f(cred)
+	}
+
+	return nil, newAuthError(fmt.Sprintf("unknown authenticator: %s", name), nil)
+}
+
+// RegisterAuthenticatorFactory registers the authenticator factory.
+func RegisterAuthenticatorFactory(name string, factory AuthenticatorFactory) {
+	authFactories[name] = factory
+}
+
+// // Opener returns a connection opener that will open and authenticate the connection.
+// func Opener(opener conn.Opener, authenticator Authenticator) conn.Opener {
+// 	return func(ctx context.Context, addr model.Addr, opts ...conn.Option) (conn.Connection, error) {
+// 		return NewConnection(ctx, authenticator, opener, addr, opts...)
+// 	}
+// }
+//
+// // NewConnection opens a connection and authenticates it.
+// func NewConnection(ctx context.Context, authenticator Authenticator, opener conn.Opener, addr model.Addr, opts ...conn.Option) (conn.Connection, error) {
+// 	conn, err := opener(ctx, addr, opts...)
+// 	if err != nil {
+// 		if conn != nil {
+// 			// Ignore any error that occurs since we're already returning a different one.
+// 			_ = conn.Close()
+// 		}
+// 		return nil, err
+// 	}
+//
+// 	err = authenticator.Auth(ctx, conn)
+// 	if err != nil {
+// 		// Ignore any error that occurs since we're already returning a different one.
+// 		_ = conn.Close()
+// 		return nil, err
+// 	}
+//
+// 	return conn, nil
+// }
+
+// Configurer creates a connection configurer for the given authenticator.
+//
+// TODO(skriptble): Fully implement this once this package is moved over to the new connection type.
+// func Configurer(configurer connection.Configurer, authenticator Authenticator) connection.Configurer {
+// 	return connection.ConfigurerFunc(func(ctx context.Context, conn connection.Connection) (connection.Connection, error) {
+// 		err := authenticator.Auth(ctx, conn)
+// 		if err != nil {
+// 			conn.Close()
+// 			return nil, err
+// 		}
+// 		if configurer == nil {
+// 			return conn, nil
+// 		}
+// 		return configurer.Configure(ctx, conn)
+// 	})
+// }
+
+// HandshakeOptions packages options that can be passed to the Handshaker()
+// function.  DBUser is optional but must be of the form <dbname.username>;
+// if non-empty, then the connection will do SASL mechanism negotiation.
+type HandshakeOptions struct {
+	AppName       string
+	Authenticator Authenticator
+	Compressors   []string
+	DBUser        string
+}
+
+// Handshaker creates a connection handshaker for the given authenticator.
+func Handshaker(h connection.Handshaker, options *HandshakeOptions) connection.Handshaker {
+	return connection.HandshakerFunc(func(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+		desc, err := (&command.Handshake{
+			Client:             command.ClientDoc(options.AppName),
+			Compressors:        options.Compressors,
+			SaslSupportedMechs: options.DBUser,
+		}).Handshake(ctx, addr, rw)
+
+		if err != nil {
+			return description.Server{}, newAuthError("handshake failure", err)
+		}
+
+		err = options.Authenticator.Auth(ctx, desc, rw)
+		if err != nil {
+			return description.Server{}, newAuthError("auth error", err)
+		}
+		if h == nil {
+			return desc, nil
+		}
+		return h.Handshake(ctx, addr, rw)
+	})
+}
+
+// Authenticator handles authenticating a connection.
+type Authenticator interface {
+	// Auth authenticates the connection.
+	Auth(context.Context, description.Server, wiremessage.ReadWriter) error
+}
+
+func newAuthError(msg string, inner error) error {
+	return &Error{
+		message: msg,
+		inner:   inner,
+	}
+}
+
+func newError(err error, mech string) error {
+	return &Error{
+		message: fmt.Sprintf("unable to authenticate using mechanism \"%s\"", mech),
+		inner:   err,
+	}
+}
+
+// Error is an error that occurred during authentication.
+type Error struct {
+	message string
+	inner   error
+}
+
+func (e *Error) Error() string {
+	if e.inner == nil {
+		return e.message
+	}
+	return fmt.Sprintf("%s: %s", e.message, e.inner)
+}
+
+// Inner returns the wrapped error.
+func (e *Error) Inner() error {
+	return e.inner
+}
+
+// Message returns the message.
+func (e *Error) Message() string {
+	return e.message
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go
new file mode 100644
index 0000000..7b2b8f1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/cred.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+// Cred is a user's credential.
+type Cred struct {
+	Source      string
+	Username    string
+	Password    string
+	PasswordSet bool
+	Props       map[string]string
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go
new file mode 100644
index 0000000..52d07e9
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/default.go
@@ -0,0 +1,67 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+func newDefaultAuthenticator(cred *Cred) (Authenticator, error) {
+	return &DefaultAuthenticator{
+		Cred: cred,
+	}, nil
+}
+
+// DefaultAuthenticator uses SCRAM-SHA-1 or MONGODB-CR depending
+// on the server version.
+type DefaultAuthenticator struct {
+	Cred *Cred
+}
+
+// Auth authenticates the connection.
+func (a *DefaultAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	var actual Authenticator
+	var err error
+
+	switch chooseAuthMechanism(desc) {
+	case SCRAMSHA256:
+		actual, err = newScramSHA256Authenticator(a.Cred)
+	case SCRAMSHA1:
+		actual, err = newScramSHA1Authenticator(a.Cred)
+	default:
+		actual, err = newMongoDBCRAuthenticator(a.Cred)
+	}
+
+	if err != nil {
+		return newAuthError("error creating authenticator", err)
+	}
+
+	return actual.Auth(ctx, desc, rw)
+}
+
+// If a server provides a list of supported mechanisms, we choose
+// SCRAM-SHA-256 if it exists or else MUST use SCRAM-SHA-1.
+// Otherwise, we decide based on what is supported.
+func chooseAuthMechanism(desc description.Server) string {
+	if desc.SaslSupportedMechs != nil {
+		for _, v := range desc.SaslSupportedMechs {
+			if v == SCRAMSHA256 {
+				return v
+			}
+		}
+		return SCRAMSHA1
+	}
+
+	if err := description.ScramSHA1Supported(desc.WireVersion); err == nil {
+		return SCRAMSHA1
+	}
+
+	return MONGODBCR
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go
new file mode 100644
index 0000000..9db65cf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/doc.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package auth is not for public use.
+//
+// The API for packages in the 'private' directory have no stability
+// guarantee.
+//
+// The packages within the 'private' directory would normally be put into an
+// 'internal' directory to prohibit their use outside the 'mongo' directory.
+// However, some MongoDB tools require very low-level access to the building
+// blocks of a driver, so we have placed them under 'private' to allow these
+// packages to be imported by projects that need them.
+//
+// These package APIs may be modified in backwards-incompatible ways at any
+// time.
+//
+// You are strongly discouraged from directly using any packages
+// under 'private'.
+package auth
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go
new file mode 100644
index 0000000..f324957
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build windows linux darwin
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	if cred.Source != "" && cred.Source != "$external" {
+		return nil, newAuthError("GSSAPI source must be empty or $external", nil)
+	}
+
+	return &GSSAPIAuthenticator{
+		Username:    cred.Username,
+		Password:    cred.Password,
+		PasswordSet: cred.PasswordSet,
+		Props:       cred.Props,
+	}, nil
+}
+
+// GSSAPIAuthenticator uses the GSSAPI algorithm over SASL to authenticate a connection.
+type GSSAPIAuthenticator struct {
+	Username    string
+	Password    string
+	PasswordSet bool
+	Props       map[string]string
+}
+
+// Auth authenticates the connection.
+func (a *GSSAPIAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	client, err := gssapi.New(desc.Addr.String(), a.Username, a.Password, a.PasswordSet, a.Props)
+
+	if err != nil {
+		return newAuthError("error creating gssapi", err)
+	}
+	return ConductSaslConversation(ctx, desc, rw, "$external", client)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go
new file mode 100644
index 0000000..d88b764
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_enabled.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build !gssapi
+
+package auth
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	return nil, newAuthError("GSSAPI support not enabled during build (-tags gssapi)", nil)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go
new file mode 100644
index 0000000..55caa28
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/gssapi_not_supported.go
@@ -0,0 +1,21 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,!windows,!linux,!darwin
+
+package auth
+
+import (
+	"fmt"
+	"runtime"
+)
+
+// GSSAPI is the mechanism name for GSSAPI.
+const GSSAPI = "GSSAPI"
+
+func newGSSAPIAuthenticator(cred *Cred) (Authenticator, error) {
+	return nil, newAuthError(fmt.Sprintf("GSSAPI is not supported on %s", runtime.GOOS), nil)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go
new file mode 100644
index 0000000..366ab7b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss.go
@@ -0,0 +1,168 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi
+//+build linux darwin
+
+package gssapi
+
+/*
+#cgo linux CFLAGS: -DGOOS_linux
+#cgo linux LDFLAGS: -lgssapi_krb5 -lkrb5
+#cgo darwin CFLAGS: -DGOOS_darwin
+#cgo darwin LDFLAGS: -framework GSS
+#include "gss_wrapper.h"
+*/
+import "C"
+import (
+	"fmt"
+	"net"
+	"runtime"
+	"strings"
+	"unsafe"
+)
+
+// New creates a new SaslClient.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	serviceName := "mongodb"
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			return nil, fmt.Errorf("CANONICALIZE_HOST_NAME is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_REALM":
+			return nil, fmt.Errorf("SERVICE_REALM is not supported when using gssapi on %s", runtime.GOOS)
+		case "SERVICE_NAME":
+			serviceName = value
+		default:
+			return nil, fmt.Errorf("unknown mechanism property %s", key)
+		}
+	}
+
+	hostname, _, err := net.SplitHostPort(target)
+	if err != nil {
+		return nil, fmt.Errorf("invalid endpoint (%s) specified: %s", target, err)
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s@%s", serviceName, hostname)
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.gssapi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.gssapi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	cservicePrincipalName := C.CString(sc.servicePrincipalName)
+	defer C.free(unsafe.Pointer(cservicePrincipalName))
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.gssapi_client_init(&sc.state, cservicePrincipalName, cusername, cpassword)
+
+	if status != C.GSSAPI_OK {
+		return mechName, nil, sc.getError("unable to initialize client")
+	}
+
+	return mechName, nil, nil
+}
+
+func (sc *SaslClient) Next(challenge []byte) ([]byte, error) {
+
+	var buf unsafe.Pointer
+	var bufLen C.size_t
+	var outBuf unsafe.Pointer
+	var outBufLen C.size_t
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.gssapi_client_username(&sc.state, &cusername)
+			if status != C.GSSAPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf = unsafe.Pointer(&bytes[0])
+		bufLen = C.size_t(len(bytes))
+		status := C.gssapi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.GSSAPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		if len(challenge) > 0 {
+			buf = unsafe.Pointer(&challenge[0])
+			bufLen = C.size_t(len(challenge))
+		}
+
+		status := C.gssapi_client_negotiate(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.GSSAPI_OK:
+			sc.contextComplete = true
+		case C.GSSAPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != nil {
+		defer C.free(outBuf)
+	}
+
+	return C.GoBytes(outBuf, C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	var desc *C.char
+
+	status := C.gssapi_error_desc(sc.state.maj_stat, sc.state.min_stat, &desc)
+	if status != C.GSSAPI_OK {
+		if desc != nil {
+			C.free(unsafe.Pointer(desc))
+		}
+
+		return fmt.Errorf("%s: (%v, %v)", prefix, sc.state.maj_stat, sc.state.min_stat)
+	}
+	defer C.free(unsafe.Pointer(desc))
+
+	return fmt.Errorf("%s: %v(%v,%v)", prefix, C.GoString(desc), int32(sc.state.maj_stat), int32(sc.state.min_stat))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
new file mode 100644
index 0000000..0ca591f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.c
@@ -0,0 +1,248 @@
+//+build gssapi
+//+build linux darwin
+
+#include <string.h>
+#include <stdio.h>
+#include "gss_wrapper.h"
+
+OM_uint32 gssapi_canonicalize_name(
+    OM_uint32* minor_status, 
+    char *input_name, 
+    gss_OID input_name_type, 
+    gss_name_t *output_name
+)
+{
+    OM_uint32 major_status;
+    gss_name_t imported_name = GSS_C_NO_NAME;
+    gss_buffer_desc buffer = GSS_C_EMPTY_BUFFER;
+
+    buffer.value = input_name;
+    buffer.length = strlen(input_name);
+    major_status = gss_import_name(minor_status, &buffer, input_name_type, &imported_name);
+    if (GSS_ERROR(major_status)) {
+        return major_status;
+    }
+
+    major_status = gss_canonicalize_name(minor_status, imported_name, (gss_OID)gss_mech_krb5, output_name);
+    if (imported_name != GSS_C_NO_NAME) {
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &imported_name);
+    }
+
+    return major_status;
+}
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat, 
+    OM_uint32 min_stat, 
+    char **desc
+)
+{
+    OM_uint32 stat = maj_stat;
+    int stat_type = GSS_C_GSS_CODE;
+    if (min_stat != 0) {
+        stat = min_stat;
+        stat_type = GSS_C_MECH_CODE;
+    }
+
+    OM_uint32 local_maj_stat, local_min_stat;
+    OM_uint32 msg_ctx = 0;
+    gss_buffer_desc desc_buffer;
+    do
+    {
+        local_maj_stat = gss_display_status(
+            &local_min_stat,
+            stat,
+            stat_type,
+            GSS_C_NO_OID,
+            &msg_ctx,
+            &desc_buffer
+        );
+        if (GSS_ERROR(local_maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (*desc) {
+            free(*desc);
+        }
+
+        *desc = malloc(desc_buffer.length+1);
+        memcpy(*desc, desc_buffer.value, desc_buffer.length+1);
+
+        gss_release_buffer(&local_min_stat, &desc_buffer);
+    }
+    while(msg_ctx != 0);
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+)
+{
+    client->cred = GSS_C_NO_CREDENTIAL;
+    client->ctx = GSS_C_NO_CONTEXT;
+
+    client->maj_stat = gssapi_canonicalize_name(&client->min_stat, spn, GSS_C_NT_HOSTBASED_SERVICE, &client->spn);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    if (username) {
+        gss_name_t name;
+        client->maj_stat = gssapi_canonicalize_name(&client->min_stat, username, GSS_C_NT_USER_NAME, &name);
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        if (password) {
+            gss_buffer_desc password_buffer;
+            password_buffer.value = password;
+            password_buffer.length = strlen(password);
+            client->maj_stat = gss_acquire_cred_with_password(&client->min_stat, name, &password_buffer, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        } else {
+            client->maj_stat = gss_acquire_cred(&client->min_stat, name, GSS_C_INDEFINITE, GSS_C_NO_OID_SET, GSS_C_INITIATE, &client->cred, NULL, NULL);
+        }
+
+        if (GSS_ERROR(client->maj_stat)) {
+            return GSSAPI_ERROR;
+        }
+
+        OM_uint32 ignored;
+        gss_release_name(&ignored, &name);
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+)
+{
+    OM_uint32 ignored;
+    gss_name_t name = GSS_C_NO_NAME;
+
+    client->maj_stat = gss_inquire_context(&client->min_stat, client->ctx, &name, NULL, NULL, NULL, NULL, NULL, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    gss_buffer_desc name_buffer;
+    client->maj_stat = gss_display_name(&client->min_stat, name, &name_buffer, NULL);
+    if (GSS_ERROR(client->maj_stat)) {
+        gss_release_name(&ignored, &name);
+        return GSSAPI_ERROR;
+    }
+
+	*username = malloc(name_buffer.length+1);
+	memcpy(*username, name_buffer.value, name_buffer.length+1);
+
+    gss_release_buffer(&ignored, &name_buffer);
+    gss_release_name(&ignored, &name);
+    return GSSAPI_OK;
+}
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    if (input) {
+        input_buffer.value = input;
+        input_buffer.length = input_length;
+    }
+
+    client->maj_stat = gss_init_sec_context(
+        &client->min_stat,
+        client->cred,
+        &client->ctx,
+        client->spn,
+        GSS_C_NO_OID,
+        GSS_C_MUTUAL_FLAG | GSS_C_SEQUENCE_FLAG,
+        0,
+        GSS_C_NO_CHANNEL_BINDINGS,
+        &input_buffer,
+        NULL,
+        &output_buffer,
+        NULL,
+        NULL
+    );
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        OM_uint32 ignored;
+        gss_release_buffer(&ignored, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    } else if (client->maj_stat == GSS_S_CONTINUE_NEEDED) {
+        return GSSAPI_CONTINUE;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length 
+)
+{
+    gss_buffer_desc input_buffer = GSS_C_EMPTY_BUFFER;
+    gss_buffer_desc output_buffer = GSS_C_EMPTY_BUFFER;
+
+    input_buffer.value = input;
+    input_buffer.length = input_length;
+
+    client->maj_stat = gss_wrap(&client->min_stat, client->ctx, 0, GSS_C_QOP_DEFAULT, &input_buffer, NULL, &output_buffer);
+
+    if (output_buffer.length) {
+        *output = malloc(output_buffer.length);
+        *output_length = output_buffer.length;
+        memcpy(*output, output_buffer.value, output_buffer.length);
+
+        gss_release_buffer(&client->min_stat, &output_buffer);
+    }
+
+    if (GSS_ERROR(client->maj_stat)) {
+        return GSSAPI_ERROR;
+    }
+
+    return GSSAPI_OK;
+}
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+)
+{
+    OM_uint32 ignored;
+    if (client->ctx != GSS_C_NO_CONTEXT) {
+        gss_delete_sec_context(&ignored, &client->ctx, GSS_C_NO_BUFFER);
+    }
+
+    if (client->spn != GSS_C_NO_NAME) {
+        gss_release_name(&ignored, &client->spn);
+    }
+
+    if (client->cred != GSS_C_NO_CREDENTIAL) {
+        gss_release_cred(&ignored, &client->cred);
+    }
+
+    return GSSAPI_OK;
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
new file mode 100644
index 0000000..ca7b907
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/gss_wrapper.h
@@ -0,0 +1,66 @@
+//+build gssapi
+//+build linux darwin
+#ifndef GSS_WRAPPER_H
+#define GSS_WRAPPER_H
+
+#include <stdlib.h>
+#ifdef GOOS_linux
+#include <gssapi/gssapi.h>
+#include <gssapi/gssapi_krb5.h>
+#endif
+#ifdef GOOS_darwin
+#include <GSS/GSS.h>
+#endif
+
+#define GSSAPI_OK 0
+#define GSSAPI_CONTINUE 1
+#define GSSAPI_ERROR 2
+
+typedef struct {
+    gss_name_t spn;
+    gss_cred_id_t cred;
+    gss_ctx_id_t ctx;
+
+    OM_uint32 maj_stat;
+    OM_uint32 min_stat;
+} gssapi_client_state;
+
+int gssapi_error_desc(
+    OM_uint32 maj_stat, 
+    OM_uint32 min_stat, 
+    char **desc
+);
+
+int gssapi_client_init(
+    gssapi_client_state *client,
+    char* spn,
+    char* username,
+    char* password
+);
+
+int gssapi_client_username(
+    gssapi_client_state *client,
+    char** username
+);
+
+int gssapi_client_negotiate(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length
+);
+
+int gssapi_client_wrap_msg(
+    gssapi_client_state *client,
+    void* input,
+    size_t input_length,
+    void** output,
+    size_t* output_length 
+);
+
+int gssapi_client_destroy(
+    gssapi_client_state *client
+);
+
+#endif
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
new file mode 100644
index 0000000..dd547f3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi.go
@@ -0,0 +1,345 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build gssapi,windows
+
+package gssapi
+
+// #include "sspi_wrapper.h"
+import "C"
+import (
+	"fmt"
+	"net"
+	"strconv"
+	"strings"
+	"sync"
+	"unsafe"
+)
+
+// New creates a new SaslClient.
+func New(target, username, password string, passwordSet bool, props map[string]string) (*SaslClient, error) {
+	initOnce.Do(initSSPI)
+	if initError != nil {
+		return nil, initError
+	}
+
+	var err error
+	serviceName := "mongodb"
+	serviceRealm := ""
+	canonicalizeHostName := false
+
+	for key, value := range props {
+		switch strings.ToUpper(key) {
+		case "CANONICALIZE_HOST_NAME":
+			canonicalizeHostName, err = strconv.ParseBool(value)
+			if err != nil {
+				return nil, fmt.Errorf("%s must be a boolean (true, false, 0, 1) but got '%s'", key, value)
+			}
+
+		case "SERVICE_REALM":
+			serviceRealm = value
+		case "SERVICE_NAME":
+			serviceName = value
+		}
+	}
+
+	hostname, _, err := net.SplitHostPort(target)
+	if err != nil {
+		return nil, fmt.Errorf("invalid endpoint (%s) specified: %s", target, err)
+	}
+	if canonicalizeHostName {
+		names, err := net.LookupAddr(hostname)
+		if err != nil || len(names) == 0 {
+			return nil, fmt.Errorf("unable to canonicalize hostname: %s", err)
+		}
+		hostname = names[0]
+		if hostname[len(hostname)-1] == '.' {
+			hostname = hostname[:len(hostname)-1]
+		}
+	}
+
+	servicePrincipalName := fmt.Sprintf("%s/%s", serviceName, hostname)
+	if serviceRealm != "" {
+		servicePrincipalName += "@" + serviceRealm
+	}
+
+	return &SaslClient{
+		servicePrincipalName: servicePrincipalName,
+		username:             username,
+		password:             password,
+		passwordSet:          passwordSet,
+	}, nil
+}
+
+type SaslClient struct {
+	servicePrincipalName string
+	username             string
+	password             string
+	passwordSet          bool
+
+	// state
+	state           C.sspi_client_state
+	contextComplete bool
+	done            bool
+}
+
+func (sc *SaslClient) Close() {
+	C.sspi_client_destroy(&sc.state)
+}
+
+func (sc *SaslClient) Start() (string, []byte, error) {
+	const mechName = "GSSAPI"
+
+	var cusername *C.char
+	var cpassword *C.char
+	if sc.username != "" {
+		cusername = C.CString(sc.username)
+		defer C.free(unsafe.Pointer(cusername))
+		if sc.passwordSet {
+			cpassword = C.CString(sc.password)
+			defer C.free(unsafe.Pointer(cpassword))
+		}
+	}
+	status := C.sspi_client_init(&sc.state, cusername, cpassword)
+
+	if status != C.SSPI_OK {
+		return mechName, nil, sc.getError("unable to intitialize client")
+	}
+
+	return mechName, nil, nil
+}
+
+func (sc *SaslClient) Next(challenge []byte) ([]byte, error) {
+
+	var outBuf C.PVOID
+	var outBufLen C.ULONG
+
+	if sc.contextComplete {
+		if sc.username == "" {
+			var cusername *C.char
+			status := C.sspi_client_username(&sc.state, &cusername)
+			if status != C.SSPI_OK {
+				return nil, sc.getError("unable to acquire username")
+			}
+			defer C.free(unsafe.Pointer(cusername))
+			sc.username = C.GoString((*C.char)(unsafe.Pointer(cusername)))
+		}
+
+		bytes := append([]byte{1, 0, 0, 0}, []byte(sc.username)...)
+		buf := (C.PVOID)(unsafe.Pointer(&bytes[0]))
+		bufLen := C.ULONG(len(bytes))
+		status := C.sspi_client_wrap_msg(&sc.state, buf, bufLen, &outBuf, &outBufLen)
+		if status != C.SSPI_OK {
+			return nil, sc.getError("unable to wrap authz")
+		}
+
+		sc.done = true
+	} else {
+		var buf C.PVOID
+		var bufLen C.ULONG
+		if len(challenge) > 0 {
+			buf = (C.PVOID)(unsafe.Pointer(&challenge[0]))
+			bufLen = C.ULONG(len(challenge))
+		}
+		cservicePrincipalName := C.CString(sc.servicePrincipalName)
+		defer C.free(unsafe.Pointer(cservicePrincipalName))
+
+		status := C.sspi_client_negotiate(&sc.state, cservicePrincipalName, buf, bufLen, &outBuf, &outBufLen)
+		switch status {
+		case C.SSPI_OK:
+			sc.contextComplete = true
+		case C.SSPI_CONTINUE:
+		default:
+			return nil, sc.getError("unable to negotiate with server")
+		}
+	}
+
+	if outBuf != C.PVOID(nil) {
+		defer C.free(unsafe.Pointer(outBuf))
+	}
+
+	return C.GoBytes(unsafe.Pointer(outBuf), C.int(outBufLen)), nil
+}
+
+func (sc *SaslClient) Completed() bool {
+	return sc.done
+}
+
+func (sc *SaslClient) getError(prefix string) error {
+	return getError(prefix, sc.state.status)
+}
+
+var initOnce sync.Once
+var initError error
+
+func initSSPI() {
+	rc := C.sspi_init()
+	if rc != 0 {
+		initError = fmt.Errorf("error initializing sspi: %v", rc)
+	}
+}
+
+func getError(prefix string, status C.SECURITY_STATUS) error {
+	var s string
+	switch status {
+	case C.SEC_E_ALGORITHM_MISMATCH:
+		s = "The client and server cannot communicate because they do not possess a common algorithm."
+	case C.SEC_E_BAD_BINDINGS:
+		s = "The SSPI channel bindings supplied by the client are incorrect."
+	case C.SEC_E_BAD_PKGID:
+		s = "The requested package identifier does not exist."
+	case C.SEC_E_BUFFER_TOO_SMALL:
+		s = "The buffers supplied to the function are not large enough to contain the information."
+	case C.SEC_E_CANNOT_INSTALL:
+		s = "The security package cannot initialize successfully and should not be installed."
+	case C.SEC_E_CANNOT_PACK:
+		s = "The package is unable to pack the context."
+	case C.SEC_E_CERT_EXPIRED:
+		s = "The received certificate has expired."
+	case C.SEC_E_CERT_UNKNOWN:
+		s = "An unknown error occurred while processing the certificate."
+	case C.SEC_E_CERT_WRONG_USAGE:
+		s = "The certificate is not valid for the requested usage."
+	case C.SEC_E_CONTEXT_EXPIRED:
+		s = "The application is referencing a context that has already been closed. A properly written application should not receive this error."
+	case C.SEC_E_CROSSREALM_DELEGATION_FAILURE:
+		s = "The server attempted to make a Kerberos-constrained delegation request for a target outside the server's realm."
+	case C.SEC_E_CRYPTO_SYSTEM_INVALID:
+		s = "The cryptographic system or checksum function is not valid because a required function is unavailable."
+	case C.SEC_E_DECRYPT_FAILURE:
+		s = "The specified data could not be decrypted."
+	case C.SEC_E_DELEGATION_REQUIRED:
+		s = "The requested operation cannot be completed. The computer must be trusted for delegation"
+	case C.SEC_E_DOWNGRADE_DETECTED:
+		s = "The system detected a possible attempt to compromise security. Verify that the server that authenticated you can be contacted."
+	case C.SEC_E_ENCRYPT_FAILURE:
+		s = "The specified data could not be encrypted."
+	case C.SEC_E_ILLEGAL_MESSAGE:
+		s = "The message received was unexpected or badly formatted."
+	case C.SEC_E_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. The context could not be initialized."
+	case C.SEC_E_INCOMPLETE_MESSAGE:
+		s = "The message supplied was incomplete. The signature was not verified."
+	case C.SEC_E_INSUFFICIENT_MEMORY:
+		s = "Not enough memory is available to complete the request."
+	case C.SEC_E_INTERNAL_ERROR:
+		s = "An error occurred that did not map to an SSPI error code."
+	case C.SEC_E_INVALID_HANDLE:
+		s = "The handle passed to the function is not valid."
+	case C.SEC_E_INVALID_TOKEN:
+		s = "The token passed to the function is not valid."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED:
+		s = "An untrusted certification authority (CA) was detected while processing the smart card certificate used for authentication."
+	case C.SEC_E_ISSUING_CA_UNTRUSTED_KDC:
+		s = "An untrusted CA was detected while processing the domain controller certificate used for authentication. The system event log contains additional information."
+	case C.SEC_E_KDC_CERT_EXPIRED:
+		s = "The domain controller certificate used for smart card logon has expired."
+	case C.SEC_E_KDC_CERT_REVOKED:
+		s = "The domain controller certificate used for smart card logon has been revoked."
+	case C.SEC_E_KDC_INVALID_REQUEST:
+		s = "A request that is not valid was sent to the KDC."
+	case C.SEC_E_KDC_UNABLE_TO_REFER:
+		s = "The KDC was unable to generate a referral for the service requested."
+	case C.SEC_E_KDC_UNKNOWN_ETYPE:
+		s = "The requested encryption type is not supported by the KDC."
+	case C.SEC_E_LOGON_DENIED:
+		s = "The logon has been denied"
+	case C.SEC_E_MAX_REFERRALS_EXCEEDED:
+		s = "The number of maximum ticket referrals has been exceeded."
+	case C.SEC_E_MESSAGE_ALTERED:
+		s = "The message supplied for verification has been altered."
+	case C.SEC_E_MULTIPLE_ACCOUNTS:
+		s = "The received certificate was mapped to multiple accounts."
+	case C.SEC_E_MUST_BE_KDC:
+		s = "The local computer must be a Kerberos domain controller (KDC)"
+	case C.SEC_E_NO_AUTHENTICATING_AUTHORITY:
+		s = "No authority could be contacted for authentication."
+	case C.SEC_E_NO_CREDENTIALS:
+		s = "No credentials are available."
+	case C.SEC_E_NO_IMPERSONATION:
+		s = "No impersonation is allowed for this context."
+	case C.SEC_E_NO_IP_ADDRESSES:
+		s = "Unable to accomplish the requested task because the local computer does not have any IP addresses."
+	case C.SEC_E_NO_KERB_KEY:
+		s = "No Kerberos key was found."
+	case C.SEC_E_NO_PA_DATA:
+		s = "Policy administrator (PA) data is needed to determine the encryption type"
+	case C.SEC_E_NO_S4U_PROT_SUPPORT:
+		s = "The Kerberos subsystem encountered an error. A service for user protocol request was made against a domain controller which does not support service for a user."
+	case C.SEC_E_NO_TGT_REPLY:
+		s = "The client is trying to negotiate a context and the server requires a user-to-user connection"
+	case C.SEC_E_NOT_OWNER:
+		s = "The caller of the function does not own the credentials."
+	case C.SEC_E_OK:
+		s = "The operation completed successfully."
+	case C.SEC_E_OUT_OF_SEQUENCE:
+		s = "The message supplied for verification is out of sequence."
+	case C.SEC_E_PKINIT_CLIENT_FAILURE:
+		s = "The smart card certificate used for authentication is not trusted."
+	case C.SEC_E_PKINIT_NAME_MISMATCH:
+		s = "The client certificate does not contain a valid UPN or does not match the client name in the logon request."
+	case C.SEC_E_QOP_NOT_SUPPORTED:
+		s = "The quality of protection attribute is not supported by this package."
+	case C.SEC_E_REVOCATION_OFFLINE_C:
+		s = "The revocation status of the smart card certificate used for authentication could not be determined."
+	case C.SEC_E_REVOCATION_OFFLINE_KDC:
+		s = "The revocation status of the domain controller certificate used for smart card authentication could not be determined. The system event log contains additional information."
+	case C.SEC_E_SECPKG_NOT_FOUND:
+		s = "The security package was not recognized."
+	case C.SEC_E_SECURITY_QOS_FAILED:
+		s = "The security context could not be established due to a failure in the requested quality of service (for example"
+	case C.SEC_E_SHUTDOWN_IN_PROGRESS:
+		s = "A system shutdown is in progress."
+	case C.SEC_E_SMARTCARD_CERT_EXPIRED:
+		s = "The smart card certificate used for authentication has expired."
+	case C.SEC_E_SMARTCARD_CERT_REVOKED:
+		s = "The smart card certificate used for authentication has been revoked. Additional information may exist in the event log."
+	case C.SEC_E_SMARTCARD_LOGON_REQUIRED:
+		s = "Smart card logon is required and was not used."
+	case C.SEC_E_STRONG_CRYPTO_NOT_SUPPORTED:
+		s = "The other end of the security negotiation requires strong cryptography"
+	case C.SEC_E_TARGET_UNKNOWN:
+		s = "The target was not recognized."
+	case C.SEC_E_TIME_SKEW:
+		s = "The clocks on the client and server computers do not match."
+	case C.SEC_E_TOO_MANY_PRINCIPALS:
+		s = "The KDC reply contained more than one principal name."
+	case C.SEC_E_UNFINISHED_CONTEXT_DELETED:
+		s = "A security context was deleted before the context was completed. This is considered a logon failure."
+	case C.SEC_E_UNKNOWN_CREDENTIALS:
+		s = "The credentials provided were not recognized."
+	case C.SEC_E_UNSUPPORTED_FUNCTION:
+		s = "The requested function is not supported."
+	case C.SEC_E_UNSUPPORTED_PREAUTH:
+		s = "An unsupported preauthentication mechanism was presented to the Kerberos package."
+	case C.SEC_E_UNTRUSTED_ROOT:
+		s = "The certificate chain was issued by an authority that is not trusted."
+	case C.SEC_E_WRONG_CREDENTIAL_HANDLE:
+		s = "The supplied credential handle does not match the credential associated with the security context."
+	case C.SEC_E_WRONG_PRINCIPAL:
+		s = "The target principal name is incorrect."
+	case C.SEC_I_COMPLETE_AND_CONTINUE:
+		s = "The function completed successfully"
+	case C.SEC_I_COMPLETE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_CONTEXT_EXPIRED:
+		s = "The message sender has finished using the connection and has initiated a shutdown. For information about initiating or recognizing a shutdown"
+	case C.SEC_I_CONTINUE_NEEDED:
+		s = "The function completed successfully"
+	case C.SEC_I_INCOMPLETE_CREDENTIALS:
+		s = "The credentials supplied were not complete and could not be verified. Additional information can be returned from the context."
+	case C.SEC_I_LOCAL_LOGON:
+		s = "The logon was completed"
+	case C.SEC_I_NO_LSA_CONTEXT:
+		s = "There is no LSA mode context associated with this context."
+	case C.SEC_I_RENEGOTIATE:
+		s = "The context data must be renegotiated with the peer."
+	default:
+		return fmt.Errorf("%s: 0x%x", prefix, uint32(status))
+	}
+
+	return fmt.Errorf("%s: %s(0x%x)", prefix, s, uint32(status))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
new file mode 100644
index 0000000..9d218bd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.c
@@ -0,0 +1,218 @@
+//+build gssapi,windows
+
+#include "sspi_wrapper.h"
+
+static HINSTANCE sspi_secur32_dll = NULL;
+static PSecurityFunctionTable sspi_functions = NULL;
+static const LPSTR SSPI_PACKAGE_NAME = "kerberos";
+
+int sspi_init(
+)
+{
+	sspi_secur32_dll = LoadLibrary("secur32.dll");
+	if (!sspi_secur32_dll) {
+		return GetLastError();
+	}
+
+    INIT_SECURITY_INTERFACE init_security_interface = (INIT_SECURITY_INTERFACE)GetProcAddress(sspi_secur32_dll, SECURITY_ENTRYPOINT);
+    if (!init_security_interface) {
+        return -1;
+    }
+
+    sspi_functions = (*init_security_interface)();
+    if (!sspi_functions) {
+        return -2;
+    }
+
+	return SSPI_OK;
+}
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+)
+{
+	TimeStamp timestamp;
+
+    if (username) {
+        if (password) {
+            SEC_WINNT_AUTH_IDENTITY auth_identity;
+            
+        #ifdef _UNICODE
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_UNICODE;
+        #else
+            auth_identity.Flags = SEC_WINNT_AUTH_IDENTITY_ANSI;
+        #endif
+            auth_identity.User = (LPSTR) username;
+            auth_identity.UserLength = strlen(username);
+            auth_identity.Password = (LPSTR) password;
+            auth_identity.PasswordLength = strlen(password);
+            auth_identity.Domain = NULL;
+            auth_identity.DomainLength = 0;
+            client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, &auth_identity, NULL, NULL, &client->cred, &timestamp);
+        } else {
+            client->status = sspi_functions->AcquireCredentialsHandle(username, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+        }
+    } else {
+        client->status = sspi_functions->AcquireCredentialsHandle(NULL, SSPI_PACKAGE_NAME, SECPKG_CRED_OUTBOUND, NULL, NULL, NULL, NULL, &client->cred, &timestamp);
+    }
+
+    if (client->status != SEC_E_OK) {
+        return SSPI_ERROR;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+)
+{
+    SecPkgCredentials_Names names;
+	client->status = sspi_functions->QueryCredentialsAttributes(&client->cred, SECPKG_CRED_ATTR_NAMES, &names);
+
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	int len = strlen(names.sUserName) + 1;
+	*username = malloc(len);
+	memcpy(*username, names.sUserName, len);
+
+	sspi_functions->FreeContextBuffer(names.sUserName);
+
+    return SSPI_OK;
+}
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+)
+{
+    SecBufferDesc inbuf;
+	SecBuffer in_bufs[1];
+	SecBufferDesc outbuf;
+	SecBuffer out_bufs[1];
+
+	if (client->has_ctx > 0) {
+		inbuf.ulVersion = SECBUFFER_VERSION;
+		inbuf.cBuffers = 1;
+		inbuf.pBuffers = in_bufs;
+		in_bufs[0].pvBuffer = input;
+		in_bufs[0].cbBuffer = input_length;
+		in_bufs[0].BufferType = SECBUFFER_TOKEN;
+	}
+
+	outbuf.ulVersion = SECBUFFER_VERSION;
+	outbuf.cBuffers = 1;
+	outbuf.pBuffers = out_bufs;
+	out_bufs[0].pvBuffer = NULL;
+	out_bufs[0].cbBuffer = 0;
+	out_bufs[0].BufferType = SECBUFFER_TOKEN;
+
+	ULONG context_attr = 0;
+
+	client->status = sspi_functions->InitializeSecurityContext(
+        &client->cred,
+        client->has_ctx > 0 ? &client->ctx : NULL,
+        (LPSTR) spn,
+        ISC_REQ_ALLOCATE_MEMORY | ISC_REQ_MUTUAL_AUTH,
+        0,
+        SECURITY_NETWORK_DREP,
+        client->has_ctx > 0 ? &inbuf : NULL,
+        0,
+        &client->ctx,
+        &outbuf,
+        &context_attr,
+        NULL);
+
+    if (client->status != SEC_E_OK && client->status != SEC_I_CONTINUE_NEEDED) {
+        return SSPI_ERROR;
+    }
+
+    client->has_ctx = 1;
+
+	*output = malloc(out_bufs[0].cbBuffer);
+	*output_length = out_bufs[0].cbBuffer;
+	memcpy(*output, out_bufs[0].pvBuffer, *output_length);
+    sspi_functions->FreeContextBuffer(out_bufs[0].pvBuffer);
+
+    if (client->status == SEC_I_CONTINUE_NEEDED) {
+        return SSPI_CONTINUE;
+    }
+
+    return SSPI_OK;
+}
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length 
+)
+{
+    SecPkgContext_Sizes sizes;
+
+	client->status = sspi_functions->QueryContextAttributes(&client->ctx, SECPKG_ATTR_SIZES, &sizes);
+	if (client->status != SEC_E_OK) {
+		return SSPI_ERROR;
+	}
+
+	char *msg = malloc((sizes.cbSecurityTrailer + input_length + sizes.cbBlockSize) * sizeof(char));
+	memcpy(&msg[sizes.cbSecurityTrailer], input, input_length);
+
+	SecBuffer wrap_bufs[3];
+	SecBufferDesc wrap_buf_desc;
+	wrap_buf_desc.cBuffers = 3;
+	wrap_buf_desc.pBuffers = wrap_bufs;
+	wrap_buf_desc.ulVersion = SECBUFFER_VERSION;
+
+	wrap_bufs[0].cbBuffer = sizes.cbSecurityTrailer;
+	wrap_bufs[0].BufferType = SECBUFFER_TOKEN;
+	wrap_bufs[0].pvBuffer = msg;
+
+	wrap_bufs[1].cbBuffer = input_length;
+	wrap_bufs[1].BufferType = SECBUFFER_DATA;
+	wrap_bufs[1].pvBuffer = msg + sizes.cbSecurityTrailer;
+
+	wrap_bufs[2].cbBuffer = sizes.cbBlockSize;
+	wrap_bufs[2].BufferType = SECBUFFER_PADDING;
+	wrap_bufs[2].pvBuffer = msg + sizes.cbSecurityTrailer + input_length;
+
+	client->status = sspi_functions->EncryptMessage(&client->ctx, SECQOP_WRAP_NO_ENCRYPT, &wrap_buf_desc, 0);
+	if (client->status != SEC_E_OK) {
+		free(msg);
+		return SSPI_ERROR;
+	}
+
+	*output_length = wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer + wrap_bufs[2].cbBuffer;
+	*output = malloc(*output_length);
+
+	memcpy(*output, wrap_bufs[0].pvBuffer, wrap_bufs[0].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer, wrap_bufs[1].pvBuffer, wrap_bufs[1].cbBuffer);
+	memcpy(*output + wrap_bufs[0].cbBuffer + wrap_bufs[1].cbBuffer, wrap_bufs[2].pvBuffer, wrap_bufs[2].cbBuffer);
+
+	free(msg);
+
+	return SSPI_OK;
+}
+
+int sspi_client_destroy(
+    sspi_client_state *client
+)
+{
+    if (client->has_ctx > 0) {
+        sspi_functions->DeleteSecurityContext(&client->ctx);
+    }
+
+    sspi_functions->FreeCredentialsHandle(&client->cred);
+
+    return SSPI_OK;
+}
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
new file mode 100644
index 0000000..ee6e9a7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/internal/gssapi/sspi_wrapper.h
@@ -0,0 +1,58 @@
+//+build gssapi,windows
+
+#ifndef SSPI_WRAPPER_H
+#define SSPI_WRAPPER_H
+
+#define SECURITY_WIN32 1  /* Required for SSPI */
+
+#include <windows.h>
+#include <sspi.h>
+
+#define SSPI_OK 0
+#define SSPI_CONTINUE 1
+#define SSPI_ERROR 2
+
+typedef struct {
+    CredHandle cred;
+    CtxtHandle ctx;
+
+    int has_ctx;
+
+    SECURITY_STATUS status;
+} sspi_client_state;
+
+int sspi_init();
+
+int sspi_client_init(
+    sspi_client_state *client,
+    char* username,
+    char* password
+);
+
+int sspi_client_username(
+    sspi_client_state *client,
+    char** username
+);
+
+int sspi_client_negotiate(
+    sspi_client_state *client,
+    char* spn,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length
+);
+
+int sspi_client_wrap_msg(
+    sspi_client_state *client,
+    PVOID input,
+    ULONG input_length,
+    PVOID* output,
+    ULONG* output_length 
+);
+
+int sspi_client_destroy(
+    sspi_client_state *client
+);
+
+#endif
\ No newline at end of file
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go
new file mode 100644
index 0000000..89dcf08
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/mongodbcr.go
@@ -0,0 +1,100 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"crypto/md5"
+	"fmt"
+
+	"io"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// MONGODBCR is the mechanism name for MONGODB-CR.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+const MONGODBCR = "MONGODB-CR"
+
+func newMongoDBCRAuthenticator(cred *Cred) (Authenticator, error) {
+	return &MongoDBCRAuthenticator{
+		DB:       cred.Source,
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// MongoDBCRAuthenticator uses the MONGODB-CR algorithm to authenticate a connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+type MongoDBCRAuthenticator struct {
+	DB       string
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+//
+// The MONGODB-CR authentication mechanism is deprecated in MongoDB 4.0.
+func (a *MongoDBCRAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+
+	// Arbiters cannot be authenticated
+	if desc.Kind == description.RSArbiter {
+		return nil
+	}
+
+	db := a.DB
+	if db == "" {
+		db = defaultAuthDB
+	}
+
+	cmd := command.Read{DB: db, Command: bsonx.Doc{{"getnonce", bsonx.Int32(1)}}}
+	ssdesc := description.SelectedServer{Server: desc}
+	rdr, err := cmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+
+	var getNonceResult struct {
+		Nonce string `bson:"nonce"`
+	}
+
+	err = bson.Unmarshal(rdr, &getNonceResult)
+	if err != nil {
+		return newAuthError("unmarshal error", err)
+	}
+
+	cmd = command.Read{
+		DB: db,
+		Command: bsonx.Doc{
+			{"authenticate", bsonx.Int32(1)},
+			{"user", bsonx.String(a.Username)},
+			{"nonce", bsonx.String(getNonceResult.Nonce)},
+			{"key", bsonx.String(a.createKey(getNonceResult.Nonce))},
+		},
+	}
+	_, err = cmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, MONGODBCR)
+	}
+
+	return nil
+}
+
+func (a *MongoDBCRAuthenticator) createKey(nonce string) string {
+	h := md5.New()
+
+	_, _ = io.WriteString(h, nonce)
+	_, _ = io.WriteString(h, a.Username)
+	_, _ = io.WriteString(h, mongoPasswordDigest(a.Username, a.Password))
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go
new file mode 100644
index 0000000..3f66aee
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/plain.go
@@ -0,0 +1,56 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// PLAIN is the mechanism name for PLAIN.
+const PLAIN = "PLAIN"
+
+func newPlainAuthenticator(cred *Cred) (Authenticator, error) {
+	return &PlainAuthenticator{
+		Username: cred.Username,
+		Password: cred.Password,
+	}, nil
+}
+
+// PlainAuthenticator uses the PLAIN algorithm over SASL to authenticate a connection.
+type PlainAuthenticator struct {
+	Username string
+	Password string
+}
+
+// Auth authenticates the connection.
+func (a *PlainAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	return ConductSaslConversation(ctx, desc, rw, "$external", &plainSaslClient{
+		username: a.Username,
+		password: a.Password,
+	})
+}
+
+type plainSaslClient struct {
+	username string
+	password string
+}
+
+func (c *plainSaslClient) Start() (string, []byte, error) {
+	b := []byte("\x00" + c.username + "\x00" + c.password)
+	return PLAIN, b, nil
+}
+
+func (c *plainSaslClient) Next(challenge []byte) ([]byte, error) {
+	return nil, newAuthError("unexpected server challenge", nil)
+}
+
+func (c *plainSaslClient) Completed() bool {
+	return true
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go
new file mode 100644
index 0000000..5357f48
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/sasl.go
@@ -0,0 +1,120 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// SaslClient is the client piece of a sasl conversation.
+type SaslClient interface {
+	Start() (string, []byte, error)
+	Next(challenge []byte) ([]byte, error)
+	Completed() bool
+}
+
+// SaslClientCloser is a SaslClient that has resources to clean up.
+type SaslClientCloser interface {
+	SaslClient
+	Close()
+}
+
+// ConductSaslConversation handles running a sasl conversation with MongoDB.
+func ConductSaslConversation(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter, db string, client SaslClient) error {
+	// Arbiters cannot be authenticated
+	if desc.Kind == description.RSArbiter {
+		return nil
+	}
+
+	if db == "" {
+		db = defaultAuthDB
+	}
+
+	if closer, ok := client.(SaslClientCloser); ok {
+		defer closer.Close()
+	}
+
+	mech, payload, err := client.Start()
+	if err != nil {
+		return newError(err, mech)
+	}
+
+	saslStartCmd := command.Read{
+		DB: db,
+		Command: bsonx.Doc{
+			{"saslStart", bsonx.Int32(1)},
+			{"mechanism", bsonx.String(mech)},
+			{"payload", bsonx.Binary(0x00, payload)},
+		},
+	}
+
+	type saslResponse struct {
+		ConversationID int    `bson:"conversationId"`
+		Code           int    `bson:"code"`
+		Done           bool   `bson:"done"`
+		Payload        []byte `bson:"payload"`
+	}
+
+	var saslResp saslResponse
+
+	ssdesc := description.SelectedServer{Server: desc}
+	rdr, err := saslStartCmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newError(err, mech)
+	}
+
+	err = bson.Unmarshal(rdr, &saslResp)
+	if err != nil {
+		return newAuthError("unmarshall error", err)
+	}
+
+	cid := saslResp.ConversationID
+
+	for {
+		if saslResp.Code != 0 {
+			return newError(err, mech)
+		}
+
+		if saslResp.Done && client.Completed() {
+			return nil
+		}
+
+		payload, err = client.Next(saslResp.Payload)
+		if err != nil {
+			return newError(err, mech)
+		}
+
+		if saslResp.Done && client.Completed() {
+			return nil
+		}
+
+		saslContinueCmd := command.Read{
+			DB: db,
+			Command: bsonx.Doc{
+				{"saslContinue", bsonx.Int32(1)},
+				{"conversationId", bsonx.Int32(int32(cid))},
+				{"payload", bsonx.Binary(0x00, payload)},
+			},
+		}
+
+		rdr, err = saslContinueCmd.RoundTrip(ctx, ssdesc, rw)
+		if err != nil {
+			return newError(err, mech)
+		}
+
+		err = bson.Unmarshal(rdr, &saslResp)
+		if err != nil {
+			return newAuthError("unmarshal error", err)
+		}
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go
new file mode 100644
index 0000000..170488d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/scram.go
@@ -0,0 +1,102 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Copyright (C) MongoDB, Inc. 2018-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"github.com/xdg/scram"
+	"github.com/xdg/stringprep"
+)
+
+// SCRAMSHA1 holds the mechanism name "SCRAM-SHA-1"
+const SCRAMSHA1 = "SCRAM-SHA-1"
+
+// SCRAMSHA256 holds the mechanism name "SCRAM-SHA-256"
+const SCRAMSHA256 = "SCRAM-SHA-256"
+
+func newScramSHA1Authenticator(cred *Cred) (Authenticator, error) {
+	passdigest := mongoPasswordDigest(cred.Username, cred.Password)
+	client, err := scram.SHA1.NewClientUnprepped(cred.Username, passdigest, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-1 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA1,
+		source:    cred.Source,
+		client:    client,
+	}, nil
+}
+
+func newScramSHA256Authenticator(cred *Cred) (Authenticator, error) {
+	passprep, err := stringprep.SASLprep.Prepare(cred.Password)
+	if err != nil {
+		return nil, newAuthError(fmt.Sprintf("error SASLprepping password '%s'", cred.Password), err)
+	}
+	client, err := scram.SHA256.NewClientUnprepped(cred.Username, passprep, "")
+	if err != nil {
+		return nil, newAuthError("error initializing SCRAM-SHA-256 client", err)
+	}
+	client.WithMinIterations(4096)
+	return &ScramAuthenticator{
+		mechanism: SCRAMSHA256,
+		source:    cred.Source,
+		client:    client,
+	}, nil
+}
+
+// ScramAuthenticator uses the SCRAM algorithm over SASL to authenticate a connection.
+type ScramAuthenticator struct {
+	mechanism string
+	source    string
+	client    *scram.Client
+}
+
+// Auth authenticates the connection.
+func (a *ScramAuthenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	adapter := &scramSaslAdapter{conversation: a.client.NewConversation(), mechanism: a.mechanism}
+	err := ConductSaslConversation(ctx, desc, rw, a.source, adapter)
+	if err != nil {
+		return newAuthError("sasl conversation error", err)
+	}
+	return nil
+}
+
+type scramSaslAdapter struct {
+	mechanism    string
+	conversation *scram.ClientConversation
+}
+
+func (a *scramSaslAdapter) Start() (string, []byte, error) {
+	step, err := a.conversation.Step("")
+	if err != nil {
+		return a.mechanism, nil, err
+	}
+	return a.mechanism, []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Next(challenge []byte) ([]byte, error) {
+	step, err := a.conversation.Step(string(challenge))
+	if err != nil {
+		return nil, err
+	}
+	return []byte(step), nil
+}
+
+func (a *scramSaslAdapter) Completed() bool {
+	return a.conversation.Done()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go
new file mode 100644
index 0000000..36b8c07
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/util.go
@@ -0,0 +1,23 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"crypto/md5"
+	"fmt"
+	"io"
+)
+
+const defaultAuthDB = "admin"
+
+func mongoPasswordDigest(username, password string) string {
+	h := md5.New()
+	_, _ = io.WriteString(h, username)
+	_, _ = io.WriteString(h, ":mongo:")
+	_, _ = io.WriteString(h, password)
+	return fmt.Sprintf("%x", h.Sum(nil))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go
new file mode 100644
index 0000000..219513b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/auth/x509.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package auth
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// MongoDBX509 is the mechanism name for MongoDBX509.
+const MongoDBX509 = "MONGODB-X509"
+
+func newMongoDBX509Authenticator(cred *Cred) (Authenticator, error) {
+	return &MongoDBX509Authenticator{User: cred.Username}, nil
+}
+
+// MongoDBX509Authenticator uses X.509 certificates over TLS to authenticate a connection.
+type MongoDBX509Authenticator struct {
+	User string
+}
+
+// Auth implements the Authenticator interface.
+func (a *MongoDBX509Authenticator) Auth(ctx context.Context, desc description.Server, rw wiremessage.ReadWriter) error {
+	authRequestDoc := bsonx.Doc{
+		{"authenticate", bsonx.Int32(1)},
+		{"mechanism", bsonx.String(MongoDBX509)},
+	}
+
+	if desc.WireVersion.Max < 5 {
+		authRequestDoc = append(authRequestDoc, bsonx.Elem{"user", bsonx.String(a.User)})
+	}
+
+	authCmd := command.Read{DB: "$external", Command: authRequestDoc}
+	ssdesc := description.SelectedServer{Server: desc}
+	_, err := authCmd.RoundTrip(ctx, ssdesc, rw)
+	if err != nil {
+		return newAuthError("round trip error", err)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go
new file mode 100644
index 0000000..da946c3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/batch_cursor.go
@@ -0,0 +1,424 @@
+package driver
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// BatchCursor is a batch implementation of a cursor. It returns documents in entire batches instead
+// of one at a time. An individual document cursor can be built on top of this batch cursor.
+type BatchCursor struct {
+	clientSession *session.Client
+	clock         *session.ClusterClock
+	namespace     command.Namespace
+	id            int64
+	err           error
+	server        *topology.Server
+	opts          []bsonx.Elem
+	currentBatch  []byte
+	firstBatch    bool
+	batchNumber   int
+
+	// legacy server (< 3.2) fields
+	batchSize   int32
+	limit       int32
+	numReturned int32 // number of docs returned by server
+}
+
+// NewBatchCursor creates a new BatchCursor from the provided parameters.
+func NewBatchCursor(result bsoncore.Document, clientSession *session.Client, clock *session.ClusterClock, server *topology.Server, opts ...bsonx.Elem) (*BatchCursor, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, err
+	}
+	bc := &BatchCursor{
+		clientSession: clientSession,
+		clock:         clock,
+		server:        server,
+		opts:          opts,
+		firstBatch:    true,
+	}
+
+	var ok bool
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok := elem.Value().ArrayOK()
+			if !ok {
+				return nil, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			vals, err := arr.Values()
+			if err != nil {
+				return nil, err
+			}
+
+			for _, val := range vals {
+				if val.Type != bsontype.EmbeddedDocument {
+					return nil, fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+				}
+				bc.currentBatch = append(bc.currentBatch, val.Data...)
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace := command.ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, err
+			}
+			bc.namespace = namespace
+		case "id":
+			bc.id, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	// close session if everything fits in first batch
+	if bc.id == 0 {
+		bc.closeImplicitSession()
+	}
+	return bc, nil
+}
+
+// NewEmptyBatchCursor returns a batch cursor that is empty.
+func NewEmptyBatchCursor() *BatchCursor {
+	return &BatchCursor{}
+}
+
+// NewLegacyBatchCursor creates a new BatchCursor for server versions 3.0 and below from the
+// provided parameters.
+//
+// TODO(GODRIVER-617): The batch parameter here should be []bsoncore.Document. Change it to this
+// once we have the new wiremessage package that uses bsoncore instead of bson.
+func NewLegacyBatchCursor(ns command.Namespace, cursorID int64, batch []bson.Raw, limit int32, batchSize int32, server *topology.Server) (*BatchCursor, error) {
+	bc := &BatchCursor{
+		id:          cursorID,
+		server:      server,
+		namespace:   ns,
+		limit:       limit,
+		batchSize:   batchSize,
+		numReturned: int32(len(batch)),
+		firstBatch:  true,
+	}
+
+	// take as many documents from the batch as needed
+	firstBatchSize := int32(len(batch))
+	if limit != 0 && limit < firstBatchSize {
+		firstBatchSize = limit
+	}
+	batch = batch[:firstBatchSize]
+	for _, doc := range batch {
+		bc.currentBatch = append(bc.currentBatch, doc...)
+	}
+
+	return bc, nil
+}
+
+// ID returns the cursor ID for this batch cursor.
+func (bc *BatchCursor) ID() int64 {
+	return bc.id
+}
+
+// Next indicates if there is another batch available. Returning false does not necessarily indicate
+// that the cursor is closed. This method will return false when an empty batch is returned.
+//
+// If Next returns true, there is a valid batch of documents available. If Next returns false, there
+// is not a valid batch of documents available.
+func (bc *BatchCursor) Next(ctx context.Context) bool {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if bc.firstBatch {
+		bc.firstBatch = false
+		return true
+	}
+
+	if bc.id == 0 || bc.server == nil {
+		return false
+	}
+
+	if bc.legacy() {
+		bc.legacyGetMore(ctx)
+	} else {
+		bc.getMore(ctx)
+	}
+
+	return len(bc.currentBatch) > 0
+}
+
+// Batch will append the current batch of documents to dst. RequiredBytes can be called to determine
+// the length of the current batch of documents.
+//
+// If there is no batch available, this method does nothing.
+func (bc *BatchCursor) Batch(dst []byte) []byte { return append(dst, bc.currentBatch...) }
+
+// RequiredBytes returns the number of bytes required for the current batch.
+func (bc *BatchCursor) RequiredBytes() int { return len(bc.currentBatch) }
+
+// Err returns the latest error encountered.
+func (bc *BatchCursor) Err() error { return bc.err }
+
+// Close closes this batch cursor.
+func (bc *BatchCursor) Close(ctx context.Context) error {
+	if ctx == nil {
+		ctx = context.Background()
+	}
+
+	if bc.server == nil {
+		return nil
+	}
+
+	if bc.legacy() {
+		return bc.legacyKillCursor(ctx)
+	}
+
+	defer bc.closeImplicitSession()
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		return err
+	}
+
+	_, err = (&command.KillCursors{
+		Clock: bc.clock,
+		NS:    bc.namespace,
+		IDs:   []int64{bc.id},
+	}).RoundTrip(ctx, bc.server.SelectedDescription(), conn)
+	if err != nil {
+		_ = conn.Close() // The command response error is more important here
+		return err
+	}
+
+	bc.id = 0
+	return conn.Close()
+}
+
+func (bc *BatchCursor) closeImplicitSession() {
+	if bc.clientSession != nil && bc.clientSession.SessionType == session.Implicit {
+		bc.clientSession.EndSession()
+	}
+}
+
+func (bc *BatchCursor) clearBatch() {
+	bc.currentBatch = bc.currentBatch[:0]
+}
+
+func (bc *BatchCursor) getMore(ctx context.Context) {
+	bc.clearBatch()
+	if bc.id == 0 {
+		return
+	}
+
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	response, err := (&command.GetMore{
+		Clock:   bc.clock,
+		ID:      bc.id,
+		NS:      bc.namespace,
+		Opts:    bc.opts,
+		Session: bc.clientSession,
+	}).RoundTrip(ctx, bc.server.SelectedDescription(), conn)
+	if err != nil {
+		_ = conn.Close() // The command response error is more important here
+		bc.err = err
+		return
+	}
+
+	err = conn.Close()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	id, err := response.LookupErr("cursor", "id")
+	if err != nil {
+		bc.err = err
+		return
+	}
+	var ok bool
+	bc.id, ok = id.Int64OK()
+	if !ok {
+		bc.err = fmt.Errorf("BSON Type %s is not %s", id.Type, bson.TypeInt64)
+		return
+	}
+
+	// if this is the last getMore, close the session
+	if bc.id == 0 {
+		bc.closeImplicitSession()
+	}
+
+	batch, err := response.LookupErr("cursor", "nextBatch")
+	if err != nil {
+		bc.err = err
+		return
+	}
+	var arr bson.Raw
+	arr, ok = batch.ArrayOK()
+	if !ok {
+		bc.err = fmt.Errorf("BSON Type %s is not %s", batch.Type, bson.TypeArray)
+		return
+	}
+	vals, err := arr.Values()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	for _, val := range vals {
+		if val.Type != bsontype.EmbeddedDocument {
+			bc.err = fmt.Errorf("element of cursor batch is not a document, but at %s", val.Type)
+			bc.currentBatch = bc.currentBatch[:0] // don't return a batch on error
+			return
+		}
+		bc.currentBatch = append(bc.currentBatch, val.Value...)
+	}
+
+	return
+}
+
+func (bc *BatchCursor) legacy() bool {
+	return bc.server.Description().WireVersion == nil || bc.server.Description().WireVersion.Max < 4
+}
+
+func (bc *BatchCursor) legacyKillCursor(ctx context.Context) error {
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		return err
+	}
+
+	kc := wiremessage.KillCursors{
+		NumberOfCursorIDs: 1,
+		CursorIDs:         []int64{bc.id},
+		CollectionName:    bc.namespace.Collection,
+		DatabaseName:      bc.namespace.DB,
+	}
+
+	err = conn.WriteWireMessage(ctx, kc)
+	if err != nil {
+		_ = conn.Close()
+		return err
+	}
+
+	err = conn.Close() // no reply from OP_KILL_CURSORS
+	if err != nil {
+		return err
+	}
+
+	bc.id = 0
+	bc.clearBatch()
+	return nil
+}
+
+func (bc *BatchCursor) legacyGetMore(ctx context.Context) {
+	bc.clearBatch()
+	if bc.id == 0 {
+		return
+	}
+
+	conn, err := bc.server.Connection(ctx)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	numToReturn := bc.batchSize
+	if bc.limit != 0 && bc.numReturned+bc.batchSize > bc.limit {
+		numToReturn = bc.limit - bc.numReturned
+	}
+	gm := wiremessage.GetMore{
+		FullCollectionName: bc.namespace.DB + "." + bc.namespace.Collection,
+		CursorID:           bc.id,
+		NumberToReturn:     numToReturn,
+	}
+
+	err = conn.WriteWireMessage(ctx, gm)
+	if err != nil {
+		_ = conn.Close()
+		bc.err = err
+		return
+	}
+
+	response, err := conn.ReadWireMessage(ctx)
+	if err != nil {
+		_ = conn.Close()
+		bc.err = err
+		return
+	}
+
+	err = conn.Close()
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	reply, ok := response.(wiremessage.Reply)
+	if !ok {
+		bc.err = errors.New("did not receive OP_REPLY response")
+		return
+	}
+
+	err = validateGetMoreReply(reply)
+	if err != nil {
+		bc.err = err
+		return
+	}
+
+	bc.id = reply.CursorID
+	bc.numReturned += reply.NumberReturned
+	if bc.limit != 0 && bc.numReturned >= bc.limit {
+		err = bc.Close(ctx)
+		if err != nil {
+			bc.err = err
+			return
+		}
+	}
+
+	for _, doc := range reply.Documents {
+		bc.currentBatch = append(bc.currentBatch, doc...)
+	}
+}
+
+func validateGetMoreReply(reply wiremessage.Reply) error {
+	if int(reply.NumberReturned) != len(reply.Documents) {
+		return command.NewCommandResponseError("malformed OP_REPLY: NumberReturned does not match number of returned documents", nil)
+	}
+
+	if reply.ResponseFlags&wiremessage.CursorNotFound == wiremessage.CursorNotFound {
+		return command.QueryFailureError{
+			Message: "query failure - cursor not found",
+		}
+	}
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return command.QueryFailureError{
+			Message:  "query failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go
new file mode 100644
index 0000000..4d461d5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/bulk_write.go
@@ -0,0 +1,627 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// BulkWriteError is an error from one operation in a bulk write.
+type BulkWriteError struct {
+	result.WriteError
+	Model WriteModel
+}
+
+// BulkWriteException is a collection of errors returned by a bulk write operation.
+type BulkWriteException struct {
+	WriteConcernError *result.WriteConcernError
+	WriteErrors       []BulkWriteError
+}
+
+func (BulkWriteException) Error() string {
+	return ""
+}
+
+type bulkWriteBatch struct {
+	models   []WriteModel
+	canRetry bool
+}
+
+// BulkWrite handles the full dispatch cycle for a bulk write operation.
+func BulkWrite(
+	ctx context.Context,
+	ns command.Namespace,
+	models []WriteModel,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	sess *session.Client,
+	writeConcern *writeconcern.WriteConcern,
+	clock *session.ClusterClock,
+	registry *bsoncodec.Registry,
+	opts ...*options.BulkWriteOptions,
+) (result.BulkWrite, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.BulkWrite{}, err
+	}
+
+	err = verifyOptions(models, ss)
+	if err != nil {
+		return result.BulkWrite{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if sess == nil && topo.SupportsSessions() {
+		sess, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.BulkWrite{}, err
+		}
+
+		defer sess.EndSession()
+	}
+
+	bwOpts := options.MergeBulkWriteOptions(opts...)
+
+	ordered := *bwOpts.Ordered
+
+	batches := createBatches(models, ordered)
+	bwRes := result.BulkWrite{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+	bwErr := BulkWriteException{
+		WriteErrors: make([]BulkWriteError, 0),
+	}
+
+	var opIndex int64 // the operation index for the upsertedIDs map
+	continueOnError := !ordered
+	for _, batch := range batches {
+		if len(batch.models) == 0 {
+			continue
+		}
+
+		batchRes, batchErr, err := runBatch(ctx, ns, topo, selector, ss, sess, clock, writeConcern, retryWrite,
+			bwOpts.BypassDocumentValidation, continueOnError, batch, registry)
+
+		mergeResults(&bwRes, batchRes, opIndex)
+		bwErr.WriteConcernError = batchErr.WriteConcernError
+		for i := range batchErr.WriteErrors {
+			batchErr.WriteErrors[i].Index = batchErr.WriteErrors[i].Index + int(opIndex)
+		}
+		bwErr.WriteErrors = append(bwErr.WriteErrors, batchErr.WriteErrors...)
+
+		if !continueOnError && (err != nil || len(batchErr.WriteErrors) > 0 || batchErr.WriteConcernError != nil) {
+			if err != nil {
+				return result.BulkWrite{}, err
+			}
+
+			return result.BulkWrite{}, bwErr
+		}
+
+		opIndex += int64(len(batch.models))
+	}
+
+	bwRes.MatchedCount -= bwRes.UpsertedCount
+	return bwRes, nil
+}
+
+func runBatch(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	batch bulkWriteBatch,
+	registry *bsoncodec.Registry,
+) (result.BulkWrite, BulkWriteException, error) {
+	batchRes := result.BulkWrite{
+		UpsertedIDs: make(map[int64]interface{}),
+	}
+	batchErr := BulkWriteException{}
+
+	var writeErrors []result.WriteError
+	switch batch.models[0].(type) {
+	case InsertOneModel:
+		res, err := runInsert(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, bypassDocValidation,
+			continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.InsertedCount = int64(res.N)
+		writeErrors = res.WriteErrors
+	case DeleteOneModel, DeleteManyModel:
+		res, err := runDelete(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.DeletedCount = int64(res.N)
+		writeErrors = res.WriteErrors
+	case ReplaceOneModel, UpdateOneModel, UpdateManyModel:
+		res, err := runUpdate(ctx, ns, topo, selector, ss, sess, clock, wc, retryWrite, batch, bypassDocValidation,
+			continueOnError, registry)
+		if err != nil {
+			return result.BulkWrite{}, BulkWriteException{}, err
+		}
+
+		batchRes.MatchedCount = res.MatchedCount
+		batchRes.ModifiedCount = res.ModifiedCount
+		batchRes.UpsertedCount = int64(len(res.Upserted))
+		writeErrors = res.WriteErrors
+		for _, upsert := range res.Upserted {
+			batchRes.UpsertedIDs[upsert.Index] = upsert.ID
+		}
+	}
+
+	batchErr.WriteErrors = make([]BulkWriteError, 0, len(writeErrors))
+	for _, we := range writeErrors {
+		batchErr.WriteErrors = append(batchErr.WriteErrors, BulkWriteError{
+			WriteError: we,
+			Model:      batch.models[0],
+		})
+	}
+
+	return batchRes, batchErr, nil
+}
+
+func runInsert(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Insert, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+	var i int
+	for _, model := range batch.models {
+		converted := model.(InsertOneModel)
+		doc, err := interfaceToDocument(converted.Document, registry)
+		if err != nil {
+			return result.Insert{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	cmd := command.Insert{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Docs:            docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+
+	if bypassDocValidation != nil {
+		cmd.Opts = []bsonx.Elem{{"bypassDocumentValidation", bsonx.Boolean(*bypassDocValidation)}}
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return insert(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := insert(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return insert(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func runDelete(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Delete, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+	var i int
+
+	for _, model := range batch.models {
+		var doc bsonx.Doc
+		var err error
+
+		if dom, ok := model.(DeleteOneModel); ok {
+			doc, err = createDeleteDoc(dom.Filter, dom.Collation, false, registry)
+		} else if dmm, ok := model.(DeleteManyModel); ok {
+			doc, err = createDeleteDoc(dmm.Filter, dmm.Collation, true, registry)
+		}
+
+		if err != nil {
+			return result.Delete{}, err
+		}
+
+		docs[i] = doc
+		i++
+	}
+
+	cmd := command.Delete{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Deletes:         docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return delete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := delete(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return delete(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func runUpdate(
+	ctx context.Context,
+	ns command.Namespace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	ss *topology.SelectedServer,
+	sess *session.Client,
+	clock *session.ClusterClock,
+	wc *writeconcern.WriteConcern,
+	retryWrite bool,
+	batch bulkWriteBatch,
+	bypassDocValidation *bool,
+	continueOnError bool,
+	registry *bsoncodec.Registry,
+) (result.Update, error) {
+	docs := make([]bsonx.Doc, len(batch.models))
+
+	for i, model := range batch.models {
+		var doc bsonx.Doc
+		var err error
+
+		if rom, ok := model.(ReplaceOneModel); ok {
+			doc, err = createUpdateDoc(rom.Filter, rom.Replacement, options.ArrayFilters{}, false, rom.UpdateModel, false,
+				registry)
+		} else if uom, ok := model.(UpdateOneModel); ok {
+			doc, err = createUpdateDoc(uom.Filter, uom.Update, uom.ArrayFilters, uom.ArrayFiltersSet, uom.UpdateModel, false,
+				registry)
+		} else if umm, ok := model.(UpdateManyModel); ok {
+			doc, err = createUpdateDoc(umm.Filter, umm.Update, umm.ArrayFilters, umm.ArrayFiltersSet, umm.UpdateModel, true,
+				registry)
+		}
+
+		if err != nil {
+			return result.Update{}, err
+		}
+
+		docs[i] = doc
+	}
+
+	cmd := command.Update{
+		ContinueOnError: continueOnError,
+		NS:              ns,
+		Docs:            docs,
+		Session:         sess,
+		Clock:           clock,
+		WriteConcern:    wc,
+	}
+	if bypassDocValidation != nil {
+		// TODO this is temporary!
+		cmd.Opts = []bsonx.Elem{{"bypassDocumentValidation", bsonx.Boolean(*bypassDocValidation)}}
+		//cmd.Opts = []option.UpdateOptioner{option.OptBypassDocumentValidation(bypassDocValidation)}
+	}
+
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite || !batch.canRetry {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false
+		}
+		return update(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, origErr := update(ctx, cmd, ss, nil)
+	if shouldRetry(origErr, res.WriteConcernError) {
+		newServer, err := topo.SelectServer(ctx, selector)
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, origErr
+		}
+
+		return update(ctx, cmd, newServer, origErr)
+	}
+
+	return res, origErr
+}
+
+func verifyOptions(models []WriteModel, ss *topology.SelectedServer) error {
+	maxVersion := ss.Description().WireVersion.Max
+	// 3.4 is wire version 5
+	// 3.6 is wire version 6
+
+	for _, model := range models {
+		var collationSet bool
+		var afSet bool // arrayFilters
+
+		switch converted := model.(type) {
+		case DeleteOneModel:
+			collationSet = converted.Collation != nil
+		case DeleteManyModel:
+			collationSet = converted.Collation != nil
+		case ReplaceOneModel:
+			collationSet = converted.Collation != nil
+		case UpdateOneModel:
+			afSet = converted.ArrayFiltersSet
+			collationSet = converted.Collation != nil
+		case UpdateManyModel:
+			afSet = converted.ArrayFiltersSet
+			collationSet = converted.Collation != nil
+		}
+
+		if afSet && maxVersion < 6 {
+			return ErrArrayFilters
+		}
+
+		if collationSet && maxVersion < 5 {
+			return ErrCollation
+		}
+	}
+
+	return nil
+}
+
+func createBatches(models []WriteModel, ordered bool) []bulkWriteBatch {
+	if ordered {
+		return createOrderedBatches(models)
+	}
+
+	batches := make([]bulkWriteBatch, 3)
+	var i int
+	for i = 0; i < 3; i++ {
+		batches[i].canRetry = true
+	}
+
+	var numBatches int // number of batches used. can't use len(batches) because it's set to 3
+	insertInd := -1
+	updateInd := -1
+	deleteInd := -1
+
+	for _, model := range models {
+		switch converted := model.(type) {
+		case InsertOneModel:
+			if insertInd == -1 {
+				// this is the first InsertOneModel
+				insertInd = numBatches
+				numBatches++
+			}
+
+			batches[insertInd].models = append(batches[insertInd].models, model)
+		case DeleteOneModel, DeleteManyModel:
+			if deleteInd == -1 {
+				deleteInd = numBatches
+				numBatches++
+			}
+
+			batches[deleteInd].models = append(batches[deleteInd].models, model)
+			if _, ok := converted.(DeleteManyModel); ok {
+				batches[deleteInd].canRetry = false
+			}
+		case ReplaceOneModel, UpdateOneModel, UpdateManyModel:
+			if updateInd == -1 {
+				updateInd = numBatches
+				numBatches++
+			}
+
+			batches[updateInd].models = append(batches[updateInd].models, model)
+			if _, ok := converted.(UpdateManyModel); ok {
+				batches[updateInd].canRetry = false
+			}
+		}
+	}
+
+	return batches
+}
+
+func createOrderedBatches(models []WriteModel) []bulkWriteBatch {
+	var batches []bulkWriteBatch
+	var prevKind command.WriteCommandKind = -1
+	i := -1 // batch index
+
+	for _, model := range models {
+		var createNewBatch bool
+		var canRetry bool
+		var newKind command.WriteCommandKind
+
+		switch model.(type) {
+		case InsertOneModel:
+			createNewBatch = prevKind != command.InsertCommand
+			canRetry = true
+			newKind = command.InsertCommand
+		case DeleteOneModel:
+			createNewBatch = prevKind != command.DeleteCommand
+			canRetry = true
+			newKind = command.DeleteCommand
+		case DeleteManyModel:
+			createNewBatch = prevKind != command.DeleteCommand
+			newKind = command.DeleteCommand
+		case ReplaceOneModel, UpdateOneModel:
+			createNewBatch = prevKind != command.UpdateCommand
+			canRetry = true
+			newKind = command.UpdateCommand
+		case UpdateManyModel:
+			createNewBatch = prevKind != command.UpdateCommand
+			newKind = command.UpdateCommand
+		}
+
+		if createNewBatch {
+			batches = append(batches, bulkWriteBatch{
+				models:   []WriteModel{model},
+				canRetry: canRetry,
+			})
+			i++
+		} else {
+			batches[i].models = append(batches[i].models, model)
+			if !canRetry {
+				batches[i].canRetry = false // don't make it true if it was already false
+			}
+		}
+
+		prevKind = newKind
+	}
+
+	return batches
+}
+
+func shouldRetry(cmdErr error, wcErr *result.WriteConcernError) bool {
+	if cerr, ok := cmdErr.(command.Error); ok && cerr.Retryable() ||
+		wcErr != nil && command.IsWriteConcernErrorRetryable(wcErr) {
+		return true
+	}
+
+	return false
+}
+
+func createUpdateDoc(
+	filter interface{},
+	update interface{},
+	arrayFilters options.ArrayFilters,
+	arrayFiltersSet bool,
+	updateModel UpdateModel,
+	multi bool,
+	registry *bsoncodec.Registry,
+) (bsonx.Doc, error) {
+	f, err := interfaceToDocument(filter, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	u, err := interfaceToDocument(update, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	doc := bsonx.Doc{
+		{"q", bsonx.Document(f)},
+		{"u", bsonx.Document(u)},
+		{"multi", bsonx.Boolean(multi)},
+	}
+
+	if arrayFiltersSet {
+		arr, err := arrayFilters.ToArray()
+		if err != nil {
+			return nil, err
+		}
+		doc = append(doc, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+
+	if updateModel.Collation != nil {
+		doc = append(doc, bsonx.Elem{"collation", bsonx.Document(updateModel.Collation.ToDocument())})
+	}
+
+	if updateModel.UpsertSet {
+		doc = append(doc, bsonx.Elem{"upsert", bsonx.Boolean(updateModel.Upsert)})
+	}
+
+	return doc, nil
+}
+
+func createDeleteDoc(
+	filter interface{},
+	collation *options.Collation,
+	many bool,
+	registry *bsoncodec.Registry,
+) (bsonx.Doc, error) {
+	f, err := interfaceToDocument(filter, registry)
+	if err != nil {
+		return nil, err
+	}
+
+	var limit int32 = 1
+	if many {
+		limit = 0
+	}
+
+	doc := bsonx.Doc{
+		{"q", bsonx.Document(f)},
+		{"limit", bsonx.Int32(limit)},
+	}
+
+	if collation != nil {
+		doc = append(doc, bsonx.Elem{"collation", bsonx.Document(collation.ToDocument())})
+	}
+
+	return doc, nil
+}
+
+func mergeResults(aggResult *result.BulkWrite, newResult result.BulkWrite, opIndex int64) {
+	aggResult.InsertedCount += newResult.InsertedCount
+	aggResult.MatchedCount += newResult.MatchedCount
+	aggResult.ModifiedCount += newResult.ModifiedCount
+	aggResult.DeletedCount += newResult.DeletedCount
+	aggResult.UpsertedCount += newResult.UpsertedCount
+
+	for index, upsertID := range newResult.UpsertedIDs {
+		aggResult.UpsertedIDs[index+opIndex] = upsertID
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go
new file mode 100644
index 0000000..a3b3439
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/commit_transaction.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// CommitTransaction handles the full cycle dispatch and execution of committing a transaction
+// against the provided topology.
+func CommitTransaction(
+	ctx context.Context,
+	cmd command.CommitTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.TransactionResult, error) {
+	res, err := commitTransaction(ctx, cmd, topo, selector, nil)
+	if cerr, ok := err.(command.Error); ok && err != nil {
+		// Retry if appropriate
+		if cerr.Retryable() {
+			res, err = commitTransaction(ctx, cmd, topo, selector, cerr)
+			if cerr2, ok := err.(command.Error); ok && err != nil {
+				// Retry failures also get label
+				cerr2.Labels = append(cerr2.Labels, command.UnknownTransactionCommitResult)
+			} else if err != nil {
+				err = command.Error{Message: err.Error(), Labels: []string{command.UnknownTransactionCommitResult}}
+			}
+		}
+	}
+	return res, err
+}
+
+func commitTransaction(
+	ctx context.Context,
+	cmd command.CommitTransaction,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	oldErr error,
+) (result.TransactionResult, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		// If retrying server selection, return the original error if it fails
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+
+	desc := ss.Description()
+
+	if oldErr != nil && (!topo.SupportsSessions() || !description.SessionsSupported(desc.WireVersion)) {
+		// Assuming we are retrying (oldErr != nil),
+		// if server doesn't support retryable writes, return the original error
+		// Conditions for retry write support are the same as that of sessions
+		return result.TransactionResult{}, oldErr
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.TransactionResult{}, oldErr
+		}
+		return result.TransactionResult{}, err
+	}
+	defer conn.Close()
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+
+	// Add UnknownCommitTransaction Error label where appropriate
+	if err != nil {
+		var newLabels []string
+		if cerr, ok := err.(command.Error); ok {
+			// Replace the label TransientTransactionError with UnknownTransactionCommitResult
+			// if network error, write concern shutdown, or write concern failed errors
+			hasUnknownCommitErr := false
+			for _, label := range cerr.Labels {
+				if label == command.NetworkError {
+					hasUnknownCommitErr = true
+					break
+				}
+			}
+
+			// network error, retryable error, or write concern fail/timeout (64) get the unknown label
+			if hasUnknownCommitErr || cerr.Retryable() || cerr.Code == 64 {
+				for _, label := range cerr.Labels {
+					if label != command.TransientTransactionError {
+						newLabels = append(newLabels, label)
+					}
+				}
+				newLabels = append(newLabels, command.UnknownTransactionCommitResult)
+				cerr.Labels = newLabels
+			}
+			err = cerr
+		}
+	}
+	return res, err
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go
new file mode 100644
index 0000000..7b7fba4
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count.go
@@ -0,0 +1,93 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Count handles the full cycle dispatch and execution of a count command against the provided
+// topology.
+func Count(
+	ctx context.Context,
+	cmd command.Count,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.CountOptions,
+) (int64, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return 0, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return 0, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return 0, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	if countOpts.Limit != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"limit", bsonx.Int64(*countOpts.Limit)})
+	}
+	if countOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*countOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if countOpts.Skip != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"skip", bsonx.Int64(*countOpts.Skip)})
+	}
+	if countOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return 0, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(countOpts.Collation.ToDocument())})
+	}
+	if countOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", countOpts.Hint, registry)
+		if err != nil {
+			return 0, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go
new file mode 100644
index 0000000..b727d53
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/count_documents.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"time"
+)
+
+// CountDocuments handles the full cycle dispatch and execution of a countDocuments command against the provided
+// topology.
+func CountDocuments(
+	ctx context.Context,
+	cmd command.CountDocuments,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.CountOptions,
+) (int64, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return 0, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return 0, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return 0, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return 0, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	countOpts := options.MergeCountOptions(opts...)
+
+	// ignore Skip and Limit because we already have these options in the pipeline
+	if countOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*countOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if countOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return 0, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(countOpts.Collation.ToDocument())})
+	}
+	if countOpts.Hint != nil {
+		hintElem, err := interfaceToElement("hint", countOpts.Hint, registry)
+		if err != nil {
+			return 0, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go
new file mode 100644
index 0000000..48b277e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/create_indexes.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// CreateIndexes handles the full cycle dispatch and execution of a createIndexes
+// command against the provided topology.
+func CreateIndexes(
+	ctx context.Context,
+	cmd command.CreateIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.CreateIndexesOptions,
+) (result.CreateIndexes, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	desc := ss.Description()
+	if desc.WireVersion.Max < 5 && hasCollation(cmd) {
+		return result.CreateIndexes{}, ErrCollation
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+	defer conn.Close()
+
+	cio := options.MergeCreateIndexesOptions(opts...)
+	if cio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*cio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.CreateIndexes{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
+
+func hasCollation(cmd command.CreateIndexes) bool {
+	for _, ind := range cmd.Indexes {
+		if _, err := ind.Document().LookupErr("collation"); err == nil {
+			return true
+		}
+	}
+
+	return false
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go
new file mode 100644
index 0000000..c622f9b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete.go
@@ -0,0 +1,116 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Delete handles the full cycle dispatch and execution of a delete command against the provided
+// topology.
+func Delete(
+	ctx context.Context,
+	cmd command.Delete,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.DeleteOptions,
+) (result.Delete, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Delete{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() && writeconcern.AckWrite(cmd.WriteConcern) {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Delete{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	deleteOpts := options.MergeDeleteOptions(opts...)
+	if deleteOpts.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.Delete{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(deleteOpts.Collation.ToDocument())})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return delete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := delete(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return delete(ctx, cmd, ss, cerr)
+	}
+	return res, originalErr
+}
+
+func delete(
+	ctx context.Context,
+	cmd command.Delete,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Delete, error) {
+	desc := ss.Description()
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Delete{}, oldErr
+		}
+		return result.Delete{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Delete{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go
new file mode 100644
index 0000000..669c797
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/delete_indexes.go
@@ -0,0 +1,62 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropIndexes handles the full cycle dispatch and execution of a dropIndexes
+// command against the provided topology.
+func DropIndexes(
+	ctx context.Context,
+	cmd command.DropIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.DropIndexesOptions,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	dio := options.MergeDropIndexesOptions(opts...)
+	if dio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*dio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go
new file mode 100644
index 0000000..2ec642a
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/dispatch.go
@@ -0,0 +1,67 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+)
+
+// ErrCollation is caused if a collation is given for an invalid server version.
+var ErrCollation = errors.New("collation cannot be set for server versions < 3.4")
+
+// ErrArrayFilters is caused if array filters are given for an invalid server version.
+var ErrArrayFilters = errors.New("array filters cannot be set for server versions < 3.6")
+
+func interfaceToDocument(val interface{}, registry *bsoncodec.Registry) (bsonx.Doc, error) {
+	if val == nil {
+		return bsonx.Doc{}, nil
+	}
+
+	if registry == nil {
+		registry = bson.DefaultRegistry
+	}
+
+	if bs, ok := val.([]byte); ok {
+		// Slight optimization so we'll just use MarshalBSON and not go through the codec machinery.
+		val = bson.Raw(bs)
+	}
+
+	// TODO(skriptble): Use a pool of these instead.
+	buf := make([]byte, 0, 256)
+	b, err := bson.MarshalAppendWithRegistry(registry, buf, val)
+	if err != nil {
+		return nil, err
+	}
+	return bsonx.ReadDoc(b)
+}
+
+func interfaceToElement(key string, i interface{}, registry *bsoncodec.Registry) (bsonx.Elem, error) {
+	switch conv := i.(type) {
+	case string:
+		return bsonx.Elem{key, bsonx.String(conv)}, nil
+	case bsonx.Doc:
+		return bsonx.Elem{key, bsonx.Document(conv)}, nil
+	default:
+		doc, err := interfaceToDocument(i, registry)
+		if err != nil {
+			return bsonx.Elem{}, err
+		}
+
+		return bsonx.Elem{key, bsonx.Document(doc)}, nil
+	}
+}
+
+func closeImplicitSession(sess *session.Client) {
+	if sess != nil && sess.SessionType == session.Implicit {
+		sess.EndSession()
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go
new file mode 100644
index 0000000..eed7390
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/distinct.go
@@ -0,0 +1,77 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"time"
+)
+
+// Distinct handles the full cycle dispatch and execution of a distinct command against the provided
+// topology.
+func Distinct(
+	ctx context.Context,
+	cmd command.Distinct,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.DistinctOptions,
+) (result.Distinct, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+	defer conn.Close()
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Distinct{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	distinctOpts := options.MergeDistinctOptions(opts...)
+
+	if distinctOpts.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{
+			"maxTimeMS", bsonx.Int64(int64(*distinctOpts.MaxTime / time.Millisecond)),
+		})
+	}
+	if distinctOpts.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return result.Distinct{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(distinctOpts.Collation.ToDocument())})
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go
new file mode 100644
index 0000000..657ba3f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_collection.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropCollection handles the full cycle dispatch and execution of a dropCollection
+// command against the provided topology.
+func DropCollection(
+	ctx context.Context,
+	cmd command.DropCollection,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go
new file mode 100644
index 0000000..7c9422f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/drop_database.go
@@ -0,0 +1,52 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// DropDatabase handles the full cycle dispatch and execution of a dropDatabase
+// command against the provided topology.
+func DropDatabase(
+	ctx context.Context,
+	cmd command.DropDatabase,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go
new file mode 100644
index 0000000..46bec84
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/end_sessions.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// EndSessions handles the full cycle dispatch and execution of an endSessions command against the provided
+// topology.
+func EndSessions(
+	ctx context.Context,
+	cmd command.EndSessions,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) ([]result.EndSessions, []error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, []error{err}
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, []error{err}
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go
new file mode 100644
index 0000000..0287ca7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find.go
@@ -0,0 +1,517 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Find handles the full cycle dispatch and execution of a find command against the provided
+// topology.
+func Find(
+	ctx context.Context,
+	cmd command.Find,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOptions,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if desc.WireVersion.Max < 4 {
+		return legacyFind(ctx, cmd, registry, ss, conn, opts...)
+	}
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	fo := options.MergeFindOptions(opts...)
+	if fo.AllowPartialResults != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"allowPartialResults", bsonx.Boolean(*fo.AllowPartialResults)})
+	}
+	if fo.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*fo.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+
+		if fo.Limit != nil && *fo.BatchSize != 0 && *fo.Limit <= int64(*fo.BatchSize) {
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"singleBatch", bsonx.Boolean(true)})
+		}
+	}
+	if fo.Collation != nil {
+		if desc.WireVersion.Max < 5 {
+			return nil, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(fo.Collation.ToDocument())})
+	}
+	if fo.Comment != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"comment", bsonx.String(*fo.Comment)})
+	}
+	if fo.CursorType != nil {
+		switch *fo.CursorType {
+		case options.Tailable:
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"tailable", bsonx.Boolean(true)})
+		case options.TailableAwait:
+			cmd.Opts = append(cmd.Opts, bsonx.Elem{"tailable", bsonx.Boolean(true)}, bsonx.Elem{"awaitData", bsonx.Boolean(true)})
+		}
+	}
+	if fo.Hint != nil {
+		hintElem, err := interfaceToElement("hint", fo.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, hintElem)
+	}
+	if fo.Limit != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"limit", bsonx.Int64(*fo.Limit)})
+	}
+	if fo.Max != nil {
+		maxElem, err := interfaceToElement("max", fo.Max, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, maxElem)
+	}
+	if fo.MaxAwaitTime != nil {
+		// Specified as maxTimeMS on the in the getMore command and not given in initial find command.
+		cmd.CursorOpts = append(cmd.CursorOpts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*fo.MaxAwaitTime / time.Millisecond))})
+	}
+	if fo.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*fo.MaxTime / time.Millisecond))})
+	}
+	if fo.Min != nil {
+		minElem, err := interfaceToElement("min", fo.Min, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, minElem)
+	}
+	if fo.NoCursorTimeout != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"noCursorTimeout", bsonx.Boolean(*fo.NoCursorTimeout)})
+	}
+	if fo.OplogReplay != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"oplogReplay", bsonx.Boolean(*fo.OplogReplay)})
+	}
+	if fo.Projection != nil {
+		projElem, err := interfaceToElement("projection", fo.Projection, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if fo.ReturnKey != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"returnKey", bsonx.Boolean(*fo.ReturnKey)})
+	}
+	if fo.ShowRecordID != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"showRecordId", bsonx.Boolean(*fo.ShowRecordID)})
+	}
+	if fo.Skip != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"skip", bsonx.Int64(*fo.Skip)})
+	}
+	if fo.Snapshot != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"snapshot", bsonx.Boolean(*fo.Snapshot)})
+	}
+	if fo.Sort != nil {
+		sortElem, err := interfaceToElement("sort", fo.Sort, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+
+	res, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+// legacyFind handles the dispatch and execution of a find operation against a pre-3.2 server.
+func legacyFind(
+	ctx context.Context,
+	cmd command.Find,
+	registry *bsoncodec.Registry,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+	opts ...*options.FindOptions,
+) (*BatchCursor, error) {
+	query := wiremessage.Query{
+		FullCollectionName: cmd.NS.DB + "." + cmd.NS.Collection,
+	}
+
+	fo := options.MergeFindOptions(opts...)
+	optsDoc, err := createLegacyOptionsDoc(fo, registry)
+	if err != nil {
+		return nil, err
+	}
+	if fo.Projection != nil {
+		projDoc, err := interfaceToDocument(fo.Projection, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		projRaw, err := projDoc.MarshalBSON()
+		if err != nil {
+			return nil, err
+		}
+		query.ReturnFieldsSelector = projRaw
+	}
+	if fo.Skip != nil {
+		query.NumberToSkip = int32(*fo.Skip)
+		query.SkipSet = true
+	}
+	// batch size of 1 not possible with OP_QUERY because the cursor will be closed immediately
+	if fo.BatchSize != nil && *fo.BatchSize == 1 {
+		query.NumberToReturn = 2
+	} else {
+		query.NumberToReturn = calculateNumberToReturn(fo)
+	}
+	query.Flags = calculateLegacyFlags(fo)
+
+	query.BatchSize = fo.BatchSize
+	if fo.Limit != nil {
+		i := int32(*fo.Limit)
+		query.Limit = &i
+	}
+
+	// set read preference and/or slaveOK flag
+	desc := ss.SelectedDescription()
+	if slaveOkNeeded(cmd.ReadPref, desc) {
+		query.Flags |= wiremessage.SlaveOK
+	}
+	optsDoc = addReadPref(cmd.ReadPref, desc.Server.Kind, optsDoc)
+
+	if cmd.Filter == nil {
+		cmd.Filter = bsonx.Doc{}
+	}
+
+	// filter must be wrapped in $query if other $modifiers are used
+	var queryDoc bsonx.Doc
+	if len(optsDoc) == 0 {
+		queryDoc = cmd.Filter
+	} else {
+		filterDoc := bsonx.Doc{
+			{"$query", bsonx.Document(cmd.Filter)},
+		}
+		// $query should go first
+		queryDoc = append(filterDoc, optsDoc...)
+	}
+
+	queryRaw, err := queryDoc.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query.Query = queryRaw
+
+	reply, err := roundTripQuery(ctx, query, conn)
+	if err != nil {
+		return nil, err
+	}
+
+	var cursorLimit int32
+	var cursorBatchSize int32
+	if query.Limit != nil {
+		cursorLimit = int32(*query.Limit)
+		if cursorLimit < 0 {
+			cursorLimit *= -1
+		}
+	}
+	if query.BatchSize != nil {
+		cursorBatchSize = int32(*query.BatchSize)
+	}
+
+	return NewLegacyBatchCursor(cmd.NS, reply.CursorID, reply.Documents, cursorLimit, cursorBatchSize, ss.Server)
+}
+
+func createLegacyOptionsDoc(fo *options.FindOptions, registry *bsoncodec.Registry) (bsonx.Doc, error) {
+	var optsDoc bsonx.Doc
+
+	if fo.Collation != nil {
+		return nil, ErrCollation
+	}
+	if fo.Comment != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$comment", bsonx.String(*fo.Comment)})
+	}
+	if fo.Hint != nil {
+		hintElem, err := interfaceToElement("$hint", fo.Hint, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, hintElem)
+	}
+	if fo.Max != nil {
+		maxElem, err := interfaceToElement("$max", fo.Max, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, maxElem)
+	}
+	if fo.MaxTime != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$maxTimeMS", bsonx.Int64(int64(*fo.MaxTime / time.Millisecond))})
+	}
+	if fo.Min != nil {
+		minElem, err := interfaceToElement("$min", fo.Min, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, minElem)
+	}
+	if fo.ReturnKey != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$returnKey", bsonx.Boolean(*fo.ReturnKey)})
+	}
+	if fo.ShowRecordID != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$showDiskLoc", bsonx.Boolean(*fo.ShowRecordID)})
+	}
+	if fo.Snapshot != nil {
+		optsDoc = append(optsDoc, bsonx.Elem{"$snapshot", bsonx.Boolean(*fo.Snapshot)})
+	}
+	if fo.Sort != nil {
+		sortElem, err := interfaceToElement("$orderby", fo.Sort, registry)
+		if err != nil {
+			return nil, err
+		}
+
+		optsDoc = append(optsDoc, sortElem)
+	}
+
+	return optsDoc, nil
+}
+
+func calculateLegacyFlags(fo *options.FindOptions) wiremessage.QueryFlag {
+	var flags wiremessage.QueryFlag
+
+	if fo.AllowPartialResults != nil {
+		flags |= wiremessage.Partial
+	}
+	if fo.CursorType != nil {
+		switch *fo.CursorType {
+		case options.Tailable:
+			flags |= wiremessage.TailableCursor
+		case options.TailableAwait:
+			flags |= wiremessage.TailableCursor
+			flags |= wiremessage.AwaitData
+		}
+	}
+	if fo.NoCursorTimeout != nil {
+		flags |= wiremessage.NoCursorTimeout
+	}
+	if fo.OplogReplay != nil {
+		flags |= wiremessage.OplogReplay
+	}
+
+	return flags
+}
+
+// calculate the number to return for the first find query
+func calculateNumberToReturn(opts *options.FindOptions) int32 {
+	var numReturn int32
+	var limit int32
+	var batchSize int32
+
+	if opts.Limit != nil {
+		limit = int32(*opts.Limit)
+	}
+	if opts.BatchSize != nil {
+		batchSize = int32(*opts.BatchSize)
+	}
+
+	if limit < 0 {
+		numReturn = limit
+	} else if limit == 0 {
+		numReturn = batchSize
+	} else if limit < batchSize {
+		numReturn = limit
+	} else {
+		numReturn = batchSize
+	}
+
+	return numReturn
+}
+
+func slaveOkNeeded(rp *readpref.ReadPref, desc description.SelectedServer) bool {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return true
+	}
+	if rp == nil {
+		// assume primary
+		return false
+	}
+
+	return rp.Mode() != readpref.PrimaryMode
+}
+
+func addReadPref(rp *readpref.ReadPref, kind description.ServerKind, query bsonx.Doc) bsonx.Doc {
+	if !readPrefNeeded(rp, kind) {
+		return query
+	}
+
+	doc := createReadPref(rp)
+	if doc == nil {
+		return query
+	}
+
+	return query.Append("$readPreference", bsonx.Document(doc))
+}
+
+func readPrefNeeded(rp *readpref.ReadPref, kind description.ServerKind) bool {
+	if kind != description.Mongos || rp == nil {
+		return false
+	}
+
+	// simple Primary or SecondaryPreferred is communicated via slaveOk to Mongos.
+	if rp.Mode() == readpref.PrimaryMode || rp.Mode() == readpref.SecondaryPreferredMode {
+		if _, ok := rp.MaxStaleness(); !ok && len(rp.TagSets()) == 0 {
+			return false
+		}
+	}
+
+	return true
+}
+
+func createReadPref(rp *readpref.ReadPref) bsonx.Doc {
+	if rp == nil {
+		return nil
+	}
+
+	doc := bsonx.Doc{}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primary")})
+	case readpref.PrimaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+	case readpref.SecondaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondaryPreferred")})
+	case readpref.SecondaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondary")})
+	case readpref.NearestMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("nearest")})
+	}
+
+	sets := make([]bsonx.Val, 0, len(rp.TagSets()))
+	for _, ts := range rp.TagSets() {
+		if len(ts) == 0 {
+			continue
+		}
+		set := bsonx.Doc{}
+		for _, t := range ts {
+			set = append(set, bsonx.Elem{t.Name, bsonx.String(t.Value)})
+		}
+		sets = append(sets, bsonx.Document(set))
+	}
+	if len(sets) > 0 {
+		doc = append(doc, bsonx.Elem{"tags", bsonx.Array(sets)})
+	}
+	if d, ok := rp.MaxStaleness(); ok {
+		doc = append(doc, bsonx.Elem{"maxStalenessSeconds", bsonx.Int32(int32(d.Seconds()))})
+	}
+
+	return doc
+}
+
+func roundTripQuery(ctx context.Context, query wiremessage.Query, conn connection.Connection) (wiremessage.Reply, error) {
+	err := conn.WriteWireMessage(ctx, query)
+	if err != nil {
+		if _, ok := err.(command.Error); ok {
+			return wiremessage.Reply{}, err
+		}
+		return wiremessage.Reply{}, command.Error{
+			Message: err.Error(),
+			Labels:  []string{command.NetworkError},
+		}
+	}
+
+	wm, err := conn.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(command.Error); ok {
+			return wiremessage.Reply{}, err
+		}
+		// Connection errors are transient
+		return wiremessage.Reply{}, command.Error{
+			Message: err.Error(),
+			Labels:  []string{command.NetworkError},
+		}
+	}
+
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		return wiremessage.Reply{}, errors.New("did not receive OP_REPLY response")
+	}
+
+	err = validateOpReply(reply)
+	if err != nil {
+		return wiremessage.Reply{}, err
+	}
+
+	return reply, nil
+}
+
+func validateOpReply(reply wiremessage.Reply) error {
+	if int(reply.NumberReturned) != len(reply.Documents) {
+		return command.NewCommandResponseError(command.ReplyDocumentMismatch, nil)
+	}
+
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return command.QueryFailureError{
+			Message:  "query failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go
new file mode 100644
index 0000000..8e45e88
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_delete.go
@@ -0,0 +1,137 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndDelete handles the full cycle dispatch and execution of a FindOneAndDelete command against the provided
+// topology.
+func FindOneAndDelete(
+	ctx context.Context,
+	cmd command.FindOneAndDelete,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndDeleteOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	do := options.MergeFindOneAndDeleteOptions(opts...)
+	if do.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(do.Collation.ToDocument())})
+	}
+	if do.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMs", bsonx.Int64(int64(*do.MaxTime / time.Millisecond))})
+	}
+	if do.Projection != nil {
+		projElem, err := interfaceToElement("fields", do.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if do.Sort != nil {
+		sortElem, err := interfaceToElement("sort", do.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndDelete(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndDelete(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndDelete(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndDelete(
+	ctx context.Context,
+	cmd command.FindOneAndDelete,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go
new file mode 100644
index 0000000..cde5bf5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_replace.go
@@ -0,0 +1,146 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndReplace handles the full cycle dispatch and execution of a FindOneAndReplace command against the provided
+// topology.
+func FindOneAndReplace(
+	ctx context.Context,
+	cmd command.FindOneAndReplace,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndReplaceOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	ro := options.MergeFindOneAndReplaceOptions(opts...)
+	if ro.BypassDocumentValidation != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"byapssDocumentValidation", bsonx.Boolean(*ro.BypassDocumentValidation)})
+	}
+	if ro.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(ro.Collation.ToDocument())})
+	}
+	if ro.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*ro.MaxTime / time.Millisecond))})
+	}
+	if ro.Projection != nil {
+		maxElem, err := interfaceToElement("fields", ro.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, maxElem)
+	}
+	if ro.ReturnDocument != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"new", bsonx.Boolean(*ro.ReturnDocument == options.After)})
+	}
+	if ro.Sort != nil {
+		sortElem, err := interfaceToElement("sort", ro.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+	if ro.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*ro.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndReplace(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndReplace(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndReplace(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndReplace(
+	ctx context.Context,
+	cmd command.FindOneAndReplace,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go
new file mode 100644
index 0000000..d6c8e67
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/find_one_and_update.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// FindOneAndUpdate handles the full cycle dispatch and execution of a FindOneAndUpdate command against the provided
+// topology.
+func FindOneAndUpdate(
+	ctx context.Context,
+	cmd command.FindOneAndUpdate,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	registry *bsoncodec.Registry,
+	opts ...*options.FindOneAndUpdateOptions,
+) (result.FindAndModify, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	uo := options.MergeFindOneAndUpdateOptions(opts...)
+	if uo.ArrayFilters != nil {
+		arr, err := uo.ArrayFilters.ToArray()
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+	if uo.BypassDocumentValidation != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*uo.BypassDocumentValidation)})
+	}
+	if uo.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.FindAndModify{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(uo.Collation.ToDocument())})
+	}
+	if uo.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*uo.MaxTime / time.Millisecond))})
+	}
+	if uo.Projection != nil {
+		projElem, err := interfaceToElement("fields", uo.Projection, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, projElem)
+	}
+	if uo.ReturnDocument != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"new", bsonx.Boolean(*uo.ReturnDocument == options.After)})
+	}
+	if uo.Sort != nil {
+		sortElem, err := interfaceToElement("sort", uo.Sort, registry)
+		if err != nil {
+			return result.FindAndModify{}, err
+		}
+
+		cmd.Opts = append(cmd.Opts, sortElem)
+	}
+	if uo.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*uo.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return findOneAndUpdate(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := findOneAndUpdate(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return result.FindAndModify{}, originalErr
+		}
+
+		return findOneAndUpdate(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func findOneAndUpdate(
+	ctx context.Context,
+	cmd command.FindOneAndUpdate,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.FindAndModify, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.FindAndModify{}, oldErr
+		}
+		return result.FindAndModify{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.FindAndModify{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go
new file mode 100644
index 0000000..214774c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/insert.go
@@ -0,0 +1,118 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Insert handles the full cycle dispatch and execution of an insert command against the provided
+// topology.
+func Insert(
+	ctx context.Context,
+	cmd command.Insert,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.InsertManyOptions,
+) (result.Insert, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Insert{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Insert{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	insertOpts := options.MergeInsertManyOptions(opts...)
+
+	if insertOpts.BypassDocumentValidation != nil && ss.Description().WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*insertOpts.BypassDocumentValidation)})
+	}
+	if insertOpts.Ordered != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"ordered", bsonx.Boolean(*insertOpts.Ordered)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return insert(ctx, cmd, ss, nil)
+	}
+
+	// TODO figure out best place to put retry write.  Command shouldn't have to know about this field.
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := insert(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return insert(ctx, cmd, ss, cerr)
+	}
+
+	return res, originalErr
+}
+
+func insert(
+	ctx context.Context,
+	cmd command.Insert,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Insert, error) {
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Insert{}, oldErr
+		}
+		return result.Insert{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Insert{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go
new file mode 100644
index 0000000..574d027
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/kill_cursors.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// KillCursors handles the full cycle dispatch and execution of an aggregate command against the provided
+// topology.
+func KillCursors(
+	ctx context.Context,
+	cmd command.KillCursors,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+) (result.KillCursors, error) {
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+	defer conn.Close()
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go
new file mode 100644
index 0000000..c52df2d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections.go
@@ -0,0 +1,133 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrFilterType is thrown when a non-string filter is specified.
+var ErrFilterType = errors.New("filter must be a string")
+
+// ListCollections handles the full cycle dispatch and execution of a listCollections command against the provided
+// topology.
+func ListCollections(
+	ctx context.Context,
+	cmd command.ListCollections,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListCollectionsOptions,
+) (*ListCollectionsBatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if ss.Description().WireVersion.Max < 3 {
+		return legacyListCollections(ctx, cmd, ss, conn)
+	}
+
+	rp, err := getReadPrefBasedOnTransaction(cmd.ReadPref, cmd.Session)
+	if err != nil {
+		return nil, err
+	}
+	cmd.ReadPref = rp
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	lc := options.MergeListCollectionsOptions(opts...)
+	if lc.NameOnly != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"nameOnly", bsonx.Boolean(*lc.NameOnly)})
+	}
+
+	res, err := cmd.RoundTrip(ctx, ss.Description(), conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	batchCursor, err := NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewListCollectionsBatchCursor(batchCursor)
+}
+
+func legacyListCollections(
+	ctx context.Context,
+	cmd command.ListCollections,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+) (*ListCollectionsBatchCursor, error) {
+	filter, err := transformFilter(cmd.Filter, cmd.DB)
+	if err != nil {
+		return nil, err
+	}
+
+	findCmd := command.Find{
+		NS:       command.NewNamespace(cmd.DB, "system.namespaces"),
+		ReadPref: cmd.ReadPref,
+		Filter:   filter,
+	}
+
+	// don't need registry because it's used to create BSON docs for find options that don't exist in this case
+	batchCursor, err := legacyFind(ctx, findCmd, nil, ss, conn)
+	if err != nil {
+		return nil, err
+	}
+
+	return NewLegacyListCollectionsBatchCursor(batchCursor)
+}
+
+// modify the user-supplied filter to prefix the "name" field with the database name.
+// returns the original filter if the name field is not present or a copy with the modified name field if it is
+func transformFilter(filter bsonx.Doc, dbName string) (bsonx.Doc, error) {
+	if filter == nil {
+		return filter, nil
+	}
+
+	if nameVal, err := filter.LookupErr("name"); err == nil {
+		name, ok := nameVal.StringValueOK()
+		if !ok {
+			return nil, ErrFilterType
+		}
+
+		filterCopy := filter.Copy()
+		filterCopy.Set("name", bsonx.String(dbName+"."+name))
+		return filterCopy, nil
+	}
+	return filter, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go
new file mode 100644
index 0000000..11fa77d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_collections_batch_cursor.go
@@ -0,0 +1,121 @@
+package driver
+
+import (
+	"context"
+	"errors"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+)
+
+// ListCollectionsBatchCursor is a special batch cursor returned from ListCollections that properly
+// handles current and legacy ListCollections operations.
+type ListCollectionsBatchCursor struct {
+	legacy       bool
+	bc           *BatchCursor
+	currentBatch []byte
+	err          error
+}
+
+// NewListCollectionsBatchCursor creates a new non-legacy ListCollectionsCursor.
+func NewListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) {
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &ListCollectionsBatchCursor{bc: bc}, nil
+}
+
+// NewLegacyListCollectionsBatchCursor creates a new legacy ListCollectionsCursor.
+func NewLegacyListCollectionsBatchCursor(bc *BatchCursor) (*ListCollectionsBatchCursor, error) {
+	if bc == nil {
+		return nil, errors.New("batch cursor must not be nil")
+	}
+	return &ListCollectionsBatchCursor{legacy: true, bc: bc}, nil
+}
+
+// ID returns the cursor ID for this batch cursor.
+func (lcbc *ListCollectionsBatchCursor) ID() int64 {
+	return lcbc.bc.ID()
+}
+
+// Next indicates if there is another batch available. Returning false does not necessarily indicate
+// that the cursor is closed. This method will return false when an empty batch is returned.
+//
+// If Next returns true, there is a valid batch of documents available. If Next returns false, there
+// is not a valid batch of documents available.
+func (lcbc *ListCollectionsBatchCursor) Next(ctx context.Context) bool {
+	if !lcbc.bc.Next(ctx) {
+		return false
+	}
+
+	if !lcbc.legacy {
+		lcbc.currentBatch = lcbc.bc.currentBatch
+		return true
+	}
+
+	batch := lcbc.bc.currentBatch
+	lcbc.currentBatch = lcbc.currentBatch[:0]
+	var doc bsoncore.Document
+	var ok bool
+	for {
+		doc, batch, ok = bsoncore.ReadDocument(batch)
+		if !ok {
+			break
+		}
+
+		doc, lcbc.err = lcbc.projectNameElement(doc)
+		if lcbc.err != nil {
+			return false
+		}
+		lcbc.currentBatch = append(lcbc.currentBatch, doc...)
+	}
+
+	return true
+}
+
+// Batch will append the current batch of documents to dst. RequiredBytes can be called to determine
+// the length of the current batch of documents.
+//
+// If there is no batch available, this method does nothing.
+func (lcbc *ListCollectionsBatchCursor) Batch(dst []byte) []byte {
+	return append(dst, lcbc.currentBatch...)
+}
+
+// RequiredBytes returns the number of bytes required for the current batch.
+func (lcbc *ListCollectionsBatchCursor) RequiredBytes() int { return len(lcbc.currentBatch) }
+
+// Err returns the latest error encountered.
+func (lcbc *ListCollectionsBatchCursor) Err() error {
+	if lcbc.err != nil {
+		return lcbc.err
+	}
+	return lcbc.bc.Err()
+}
+
+// Close closes this batch cursor.
+func (lcbc *ListCollectionsBatchCursor) Close(ctx context.Context) error { return lcbc.bc.Close(ctx) }
+
+// project out the database name for a legacy server
+func (*ListCollectionsBatchCursor) projectNameElement(rawDoc bsoncore.Document) (bsoncore.Document, error) {
+	elems, err := rawDoc.Elements()
+	if err != nil {
+		return nil, err
+	}
+
+	var filteredElems []byte
+	for _, elem := range elems {
+		key := elem.Key()
+		if key != "name" {
+			filteredElems = append(filteredElems, elem...)
+			continue
+		}
+
+		name := elem.Value().StringValue()
+		collName := name[strings.Index(name, ".")+1:]
+		filteredElems = bsoncore.AppendStringElement(filteredElems, "name", collName)
+	}
+
+	var filteredDoc []byte
+	filteredDoc = bsoncore.BuildDocument(filteredDoc, filteredElems)
+	return filteredDoc, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go
new file mode 100644
index 0000000..608269b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_databases.go
@@ -0,0 +1,60 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// ListDatabases handles the full cycle dispatch and execution of a listDatabases command against the provided
+// topology.
+func ListDatabases(
+	ctx context.Context,
+	cmd command.ListDatabases,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListDatabasesOptions,
+) (result.ListDatabases, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.ListDatabases{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	ld := options.MergeListDatabasesOptions(opts...)
+	if ld.NameOnly != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"nameOnly", bsonx.Boolean(*ld.NameOnly)})
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go
new file mode 100644
index 0000000..d40ef96
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/list_indexes.go
@@ -0,0 +1,105 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ListIndexes handles the full cycle dispatch and execution of a listIndexes command against the provided
+// topology.
+func ListIndexes(
+	ctx context.Context,
+	cmd command.ListIndexes,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	opts ...*options.ListIndexesOptions,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if ss.Description().WireVersion.Max < 3 {
+		return legacyListIndexes(ctx, cmd, ss, conn, opts...)
+	}
+
+	lio := options.MergeListIndexesOptions(opts...)
+	if lio.BatchSize != nil {
+		elem := bsonx.Elem{"batchSize", bsonx.Int32(*lio.BatchSize)}
+		cmd.Opts = append(cmd.Opts, elem)
+		cmd.CursorOpts = append(cmd.CursorOpts, elem)
+	}
+	if lio.MaxTime != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"maxTimeMS", bsonx.Int64(int64(*lio.MaxTime / time.Millisecond))})
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	res, err := cmd.RoundTrip(ctx, ss.Description(), conn)
+	if err != nil {
+		closeImplicitSession(cmd.Session)
+		return nil, err
+	}
+
+	return NewBatchCursor(bsoncore.Document(res), cmd.Session, cmd.Clock, ss.Server, cmd.CursorOpts...)
+}
+
+func legacyListIndexes(
+	ctx context.Context,
+	cmd command.ListIndexes,
+	ss *topology.SelectedServer,
+	conn connection.Connection,
+	opts ...*options.ListIndexesOptions,
+) (*BatchCursor, error) {
+	lio := options.MergeListIndexesOptions(opts...)
+	ns := cmd.NS.DB + "." + cmd.NS.Collection
+
+	findCmd := command.Find{
+		NS: command.NewNamespace(cmd.NS.DB, "system.indexes"),
+		Filter: bsonx.Doc{
+			{"ns", bsonx.String(ns)},
+		},
+	}
+
+	findOpts := options.Find()
+	if lio.BatchSize != nil {
+		findOpts.SetBatchSize(*lio.BatchSize)
+	}
+	if lio.MaxTime != nil {
+		findOpts.SetMaxTime(*lio.MaxTime)
+	}
+
+	return legacyFind(ctx, findCmd, nil, ss, conn, findOpts)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go
new file mode 100644
index 0000000..17ceb60
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/models.go
@@ -0,0 +1,81 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+)
+
+// WriteModel is the interface satisfied by all models for bulk writes.
+type WriteModel interface {
+	writeModel()
+}
+
+// InsertOneModel is the write model for insert operations.
+type InsertOneModel struct {
+	Document interface{}
+}
+
+func (InsertOneModel) writeModel() {}
+
+// DeleteOneModel is the write model for delete operations.
+type DeleteOneModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+func (DeleteOneModel) writeModel() {}
+
+// DeleteManyModel is the write model for deleteMany operations.
+type DeleteManyModel struct {
+	Filter    interface{}
+	Collation *options.Collation
+}
+
+func (DeleteManyModel) writeModel() {}
+
+// UpdateModel contains the fields that are shared between the ReplaceOneModel, UpdateOneModel, and UpdateManyModel types
+type UpdateModel struct {
+	Collation *options.Collation
+	Upsert    bool
+	UpsertSet bool
+}
+
+// ReplaceOneModel is the write model for replace operations.
+type ReplaceOneModel struct {
+	Filter      interface{}
+	Replacement interface{}
+	UpdateModel
+}
+
+func (ReplaceOneModel) writeModel() {}
+
+// UpdateOneModel is the write model for update operations.
+type UpdateOneModel struct {
+	Filter interface{}
+	Update interface{}
+	// default is to not send a value. for servers < 3.6, error raised if value given. for unack writes using opcodes,
+	// error raised if value given
+	ArrayFilters    options.ArrayFilters
+	ArrayFiltersSet bool
+	UpdateModel
+}
+
+func (UpdateOneModel) writeModel() {}
+
+// UpdateManyModel is the write model for updateMany operations.
+type UpdateManyModel struct {
+	Filter interface{}
+	Update interface{}
+	// default is to not send a value. for servers < 3.6, error raised if value given. for unack writes using opcodes,
+	// error raised if value given
+	ArrayFilters    options.ArrayFilters
+	ArrayFiltersSet bool
+	UpdateModel
+}
+
+func (UpdateManyModel) writeModel() {}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go
new file mode 100644
index 0000000..e31877c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read.go
@@ -0,0 +1,85 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Read handles the full cycle dispatch and execution of a read command against the provided
+// topology.
+func Read(
+	ctx context.Context,
+	cmd command.Read,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if cmd.Session != nil && cmd.Session.TransactionRunning() {
+		// When command.read is directly used, this implies an operation level
+		// read preference, so we do not override it with the transaction read pref.
+		err = checkTransactionReadPref(cmd.ReadPref)
+
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, ss.Description(), conn)
+}
+
+func getReadPrefBasedOnTransaction(current *readpref.ReadPref, sess *session.Client) (*readpref.ReadPref, error) {
+	if sess != nil && sess.TransactionRunning() {
+		// Transaction's read preference always takes priority
+		current = sess.CurrentRp
+		err := checkTransactionReadPref(current)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return current, nil
+}
+
+func checkTransactionReadPref(pref *readpref.ReadPref) error {
+	if pref != nil && (pref.Mode() == readpref.SecondaryMode ||
+		pref.Mode() == readpref.SecondaryPreferredMode ||
+		pref.Mode() == readpref.NearestMode ||
+		pref.Mode() == readpref.PrimaryPreferredMode) {
+		return command.ErrNonPrimaryRP
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go
new file mode 100644
index 0000000..fdc792c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/read_cursor.go
@@ -0,0 +1,69 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ReadCursor handles the full dispatch cycle and execution of a read command against the provided topology and returns
+// a Cursor over the resulting BSON reader.
+func ReadCursor(
+	ctx context.Context,
+	cmd command.Read,
+	topo *topology.Topology,
+	selecctor description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	cursorOpts ...bsonx.Elem,
+) (*BatchCursor, error) {
+
+	ss, err := topo.SelectServer(ctx, selecctor)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer conn.Close()
+
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, conn)
+	if err != nil {
+		if cmd.Session != nil && cmd.Session.SessionType == session.Implicit {
+			cmd.Session.EndSession()
+		}
+		return nil, err
+	}
+
+	cursor, err := NewBatchCursor(bsoncore.Document(rdr), cmd.Session, cmd.Clock, ss.Server, cursorOpts...)
+	if err != nil {
+		if cmd.Session != nil && cmd.Session.SessionType == session.Implicit {
+			cmd.Session.EndSession()
+		}
+		return nil, err
+	}
+
+	return cursor, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go
new file mode 100644
index 0000000..405d507
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/client_session.go
@@ -0,0 +1,347 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+)
+
+// ErrSessionEnded is returned when a client session is used after a call to endSession().
+var ErrSessionEnded = errors.New("ended session was used")
+
+// ErrNoTransactStarted is returned if a transaction operation is called when no transaction has started.
+var ErrNoTransactStarted = errors.New("no transaction started")
+
+// ErrTransactInProgress is returned if startTransaction() is called when a transaction is in progress.
+var ErrTransactInProgress = errors.New("transaction already in progress")
+
+// ErrAbortAfterCommit is returned when abort is called after a commit.
+var ErrAbortAfterCommit = errors.New("cannot call abortTransaction after calling commitTransaction")
+
+// ErrAbortTwice is returned if abort is called after transaction is already aborted.
+var ErrAbortTwice = errors.New("cannot call abortTransaction twice")
+
+// ErrCommitAfterAbort is returned if commit is called after an abort.
+var ErrCommitAfterAbort = errors.New("cannot call commitTransaction after calling abortTransaction")
+
+// ErrUnackWCUnsupported is returned if an unacknowledged write concern is supported for a transaciton.
+var ErrUnackWCUnsupported = errors.New("transactions do not support unacknowledged write concerns")
+
+// Type describes the type of the session
+type Type uint8
+
+// These constants are the valid types for a client session.
+const (
+	Explicit Type = iota
+	Implicit
+)
+
+// State indicates the state of the FSM.
+type state uint8
+
+// Client Session states
+const (
+	None state = iota
+	Starting
+	InProgress
+	Committed
+	Aborted
+)
+
+// Client is a session for clients to run commands.
+type Client struct {
+	*Server
+	ClientID       uuid.UUID
+	ClusterTime    bson.Raw
+	Consistent     bool // causal consistency
+	OperationTime  *primitive.Timestamp
+	SessionType    Type
+	Terminated     bool
+	RetryingCommit bool
+	Committing     bool
+	Aborting       bool
+	RetryWrite     bool
+
+	// options for the current transaction
+	// most recently set by transactionopt
+	CurrentRc *readconcern.ReadConcern
+	CurrentRp *readpref.ReadPref
+	CurrentWc *writeconcern.WriteConcern
+
+	// default transaction options
+	transactionRc *readconcern.ReadConcern
+	transactionRp *readpref.ReadPref
+	transactionWc *writeconcern.WriteConcern
+
+	pool  *Pool
+	state state
+}
+
+func getClusterTime(clusterTime bson.Raw) (uint32, uint32) {
+	if clusterTime == nil {
+		return 0, 0
+	}
+
+	clusterTimeVal, err := clusterTime.LookupErr("$clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	timestampVal, err := bson.Raw(clusterTimeVal.Value).LookupErr("clusterTime")
+	if err != nil {
+		return 0, 0
+	}
+
+	return timestampVal.Timestamp()
+}
+
+// MaxClusterTime compares 2 clusterTime documents and returns the document representing the highest cluster time.
+func MaxClusterTime(ct1, ct2 bson.Raw) bson.Raw {
+	epoch1, ord1 := getClusterTime(ct1)
+	epoch2, ord2 := getClusterTime(ct2)
+
+	if epoch1 > epoch2 {
+		return ct1
+	} else if epoch1 < epoch2 {
+		return ct2
+	} else if ord1 > ord2 {
+		return ct1
+	} else if ord1 < ord2 {
+		return ct2
+	}
+
+	return ct1
+}
+
+// NewClientSession creates a Client.
+func NewClientSession(pool *Pool, clientID uuid.UUID, sessionType Type, opts ...*ClientOptions) (*Client, error) {
+	c := &Client{
+		Consistent:  true, // set default
+		ClientID:    clientID,
+		SessionType: sessionType,
+		pool:        pool,
+	}
+
+	mergedOpts := mergeClientOptions(opts...)
+	if mergedOpts.CausalConsistency != nil {
+		c.Consistent = *mergedOpts.CausalConsistency
+	}
+	if mergedOpts.DefaultReadPreference != nil {
+		c.transactionRp = mergedOpts.DefaultReadPreference
+	}
+	if mergedOpts.DefaultReadConcern != nil {
+		c.transactionRc = mergedOpts.DefaultReadConcern
+	}
+	if mergedOpts.DefaultWriteConcern != nil {
+		c.transactionWc = mergedOpts.DefaultWriteConcern
+	}
+
+	servSess, err := pool.GetSession()
+	if err != nil {
+		return nil, err
+	}
+
+	c.Server = servSess
+
+	return c, nil
+}
+
+// AdvanceClusterTime updates the session's cluster time.
+func (c *Client) AdvanceClusterTime(clusterTime bson.Raw) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.ClusterTime = MaxClusterTime(c.ClusterTime, clusterTime)
+	return nil
+}
+
+// AdvanceOperationTime updates the session's operation time.
+func (c *Client) AdvanceOperationTime(opTime *primitive.Timestamp) error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+
+	if c.OperationTime == nil {
+		c.OperationTime = opTime
+		return nil
+	}
+
+	if opTime.T > c.OperationTime.T {
+		c.OperationTime = opTime
+	} else if (opTime.T == c.OperationTime.T) && (opTime.I > c.OperationTime.I) {
+		c.OperationTime = opTime
+	}
+
+	return nil
+}
+
+// UpdateUseTime updates the session's last used time.
+// Must be called whenver this session is used to send a command to the server.
+func (c *Client) UpdateUseTime() error {
+	if c.Terminated {
+		return ErrSessionEnded
+	}
+	c.updateUseTime()
+	return nil
+}
+
+// EndSession ends the session.
+func (c *Client) EndSession() {
+	if c.Terminated {
+		return
+	}
+
+	c.Terminated = true
+	c.pool.ReturnSession(c.Server)
+
+	return
+}
+
+// TransactionInProgress returns true if the client session is in an active transaction.
+func (c *Client) TransactionInProgress() bool {
+	return c.state == InProgress
+}
+
+// TransactionStarting returns true if the client session is starting a transaction.
+func (c *Client) TransactionStarting() bool {
+	return c.state == Starting
+}
+
+// TransactionRunning returns true if the client session has started the transaction
+// and it hasn't been committed or aborted
+func (c *Client) TransactionRunning() bool {
+	return c.state == Starting || c.state == InProgress
+}
+
+// TransactionCommitted returns true of the client session just committed a transaciton.
+func (c *Client) TransactionCommitted() bool {
+	return c.state == Committed
+}
+
+// CheckStartTransaction checks to see if allowed to start transaction and returns
+// an error if not allowed
+func (c *Client) CheckStartTransaction() error {
+	if c.state == InProgress || c.state == Starting {
+		return ErrTransactInProgress
+	}
+	return nil
+}
+
+// StartTransaction initializes the transaction options and advances the state machine.
+// It does not contact the server to start the transaction.
+func (c *Client) StartTransaction(opts *TransactionOptions) error {
+	err := c.CheckStartTransaction()
+	if err != nil {
+		return err
+	}
+
+	c.IncrementTxnNumber()
+	c.RetryingCommit = false
+
+	if opts != nil {
+		c.CurrentRc = opts.ReadConcern
+		c.CurrentRp = opts.ReadPreference
+		c.CurrentWc = opts.WriteConcern
+	}
+
+	if c.CurrentRc == nil {
+		c.CurrentRc = c.transactionRc
+	}
+
+	if c.CurrentRp == nil {
+		c.CurrentRp = c.transactionRp
+	}
+
+	if c.CurrentWc == nil {
+		c.CurrentWc = c.transactionWc
+	}
+
+	if !writeconcern.AckWrite(c.CurrentWc) {
+		c.clearTransactionOpts()
+		return ErrUnackWCUnsupported
+	}
+
+	c.state = Starting
+	return nil
+}
+
+// CheckCommitTransaction checks to see if allowed to commit transaction and returns
+// an error if not allowed.
+func (c *Client) CheckCommitTransaction() error {
+	if c.state == None {
+		return ErrNoTransactStarted
+	} else if c.state == Aborted {
+		return ErrCommitAfterAbort
+	}
+	return nil
+}
+
+// CommitTransaction updates the state for a successfully committed transaction and returns
+// an error if not permissible.  It does not actually perform the commit.
+func (c *Client) CommitTransaction() error {
+	err := c.CheckCommitTransaction()
+	if err != nil {
+		return err
+	}
+	c.state = Committed
+	return nil
+}
+
+// CheckAbortTransaction checks to see if allowed to abort transaction and returns
+// an error if not allowed.
+func (c *Client) CheckAbortTransaction() error {
+	if c.state == None {
+		return ErrNoTransactStarted
+	} else if c.state == Committed {
+		return ErrAbortAfterCommit
+	} else if c.state == Aborted {
+		return ErrAbortTwice
+	}
+	return nil
+}
+
+// AbortTransaction updates the state for a successfully committed transaction and returns
+// an error if not permissible.  It does not actually perform the abort.
+func (c *Client) AbortTransaction() error {
+	err := c.CheckAbortTransaction()
+	if err != nil {
+		return err
+	}
+	c.state = Aborted
+	c.clearTransactionOpts()
+	return nil
+}
+
+// ApplyCommand advances the state machine upon command execution.
+func (c *Client) ApplyCommand() {
+	if c.Committing {
+		// Do not change state if committing after already committed
+		return
+	}
+	if c.state == Starting {
+		c.state = InProgress
+	} else if c.state == Committed || c.state == Aborted {
+		c.clearTransactionOpts()
+		c.state = None
+	}
+}
+
+func (c *Client) clearTransactionOpts() {
+	c.RetryingCommit = false
+	c.Aborting = false
+	c.Committing = false
+	c.CurrentWc = nil
+	c.CurrentRp = nil
+	c.CurrentRc = nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go
new file mode 100644
index 0000000..85376bf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/cluster_clock.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+// ClusterClock represents a logical clock for keeping track of cluster time.
+type ClusterClock struct {
+	clusterTime bson.Raw
+	lock        sync.Mutex
+}
+
+// GetClusterTime returns the cluster's current time.
+func (cc *ClusterClock) GetClusterTime() bson.Raw {
+	var ct bson.Raw
+	cc.lock.Lock()
+	ct = cc.clusterTime
+	cc.lock.Unlock()
+
+	return ct
+}
+
+// AdvanceClusterTime updates the cluster's current time.
+func (cc *ClusterClock) AdvanceClusterTime(clusterTime bson.Raw) {
+	cc.lock.Lock()
+	cc.clusterTime = MaxClusterTime(cc.clusterTime, clusterTime)
+	cc.lock.Unlock()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go
new file mode 100644
index 0000000..ac10518
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/options.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+)
+
+// ClientOptions represents all possible options for creating a client session.
+type ClientOptions struct {
+	CausalConsistency     *bool
+	DefaultReadConcern    *readconcern.ReadConcern
+	DefaultWriteConcern   *writeconcern.WriteConcern
+	DefaultReadPreference *readpref.ReadPref
+}
+
+// TransactionOptions represents all possible options for starting a transaction in a session.
+type TransactionOptions struct {
+	ReadConcern    *readconcern.ReadConcern
+	WriteConcern   *writeconcern.WriteConcern
+	ReadPreference *readpref.ReadPref
+}
+
+func mergeClientOptions(opts ...*ClientOptions) *ClientOptions {
+	c := &ClientOptions{}
+	for _, opt := range opts {
+		if opt == nil {
+			continue
+		}
+		if opt.CausalConsistency != nil {
+			c.CausalConsistency = opt.CausalConsistency
+		}
+		if opt.DefaultReadConcern != nil {
+			c.DefaultReadConcern = opt.DefaultReadConcern
+		}
+		if opt.DefaultReadPreference != nil {
+			c.DefaultReadPreference = opt.DefaultReadPreference
+		}
+		if opt.DefaultWriteConcern != nil {
+			c.DefaultWriteConcern = opt.DefaultWriteConcern
+		}
+	}
+
+	return c
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go
new file mode 100644
index 0000000..06aa51f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/server_session.go
@@ -0,0 +1,63 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"time"
+
+	"crypto/rand"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+)
+
+var rander = rand.Reader
+
+// Server is an open session with the server.
+type Server struct {
+	SessionID bsonx.Doc
+	TxnNumber int64
+	LastUsed  time.Time
+}
+
+// returns whether or not a session has expired given a timeout in minutes
+// a session is considered expired if it has less than 1 minute left before becoming stale
+func (ss *Server) expired(timeoutMinutes uint32) bool {
+	if timeoutMinutes <= 0 {
+		return true
+	}
+	timeUnused := time.Since(ss.LastUsed).Minutes()
+	return timeUnused > float64(timeoutMinutes-1)
+}
+
+// update the last used time for this session.
+// must be called whenever this server session is used to send a command to the server.
+func (ss *Server) updateUseTime() {
+	ss.LastUsed = time.Now()
+}
+
+func newServerSession() (*Server, error) {
+	id, err := uuid.New()
+	if err != nil {
+		return nil, err
+	}
+
+	idDoc := bsonx.Doc{{"id", bsonx.Binary(UUIDSubtype, id[:])}}
+
+	return &Server{
+		SessionID: idDoc,
+		LastUsed:  time.Now(),
+	}, nil
+}
+
+// IncrementTxnNumber increments the transaction number.
+func (ss *Server) IncrementTxnNumber() {
+	ss.TxnNumber++
+}
+
+// UUIDSubtype is the BSON binary subtype that a UUID should be encoded as
+const UUIDSubtype byte = 4
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go
new file mode 100644
index 0000000..af749da
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/session/session_pool.go
@@ -0,0 +1,175 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package session
+
+import (
+	"sync"
+
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Node represents a server session in a linked list
+type Node struct {
+	*Server
+	next *Node
+	prev *Node
+}
+
+// Pool is a pool of server sessions that can be reused.
+type Pool struct {
+	descChan <-chan description.Topology
+	head     *Node
+	tail     *Node
+	timeout  uint32
+	mutex    sync.Mutex // mutex to protect list and sessionTimeout
+
+	checkedOut int // number of sessions checked out of pool
+}
+
+func (p *Pool) createServerSession() (*Server, error) {
+	s, err := newServerSession()
+	if err != nil {
+		return nil, err
+	}
+
+	p.checkedOut++
+	return s, nil
+}
+
+// NewPool creates a new server session pool
+func NewPool(descChan <-chan description.Topology) *Pool {
+	p := &Pool{
+		descChan: descChan,
+	}
+
+	return p
+}
+
+// assumes caller has mutex to protect the pool
+func (p *Pool) updateTimeout() {
+	select {
+	case newDesc := <-p.descChan:
+		p.timeout = newDesc.SessionTimeoutMinutes
+	default:
+		// no new description waiting
+	}
+}
+
+// GetSession retrieves an unexpired session from the pool.
+func (p *Pool) GetSession() (*Server, error) {
+	p.mutex.Lock() // prevent changing the linked list while seeing if sessions have expired
+	defer p.mutex.Unlock()
+
+	// empty pool
+	if p.head == nil && p.tail == nil {
+		return p.createServerSession()
+	}
+
+	p.updateTimeout()
+	for p.head != nil {
+		// pull session from head of queue and return if it is valid for at least 1 more minute
+		if p.head.expired(p.timeout) {
+			p.head = p.head.next
+			continue
+		}
+
+		// found unexpired session
+		session := p.head.Server
+		if p.head.next != nil {
+			p.head.next.prev = nil
+		}
+		if p.tail == p.head {
+			p.tail = nil
+			p.head = nil
+		} else {
+			p.head = p.head.next
+		}
+
+		p.checkedOut++
+		return session, nil
+	}
+
+	// no valid session found
+	p.tail = nil // empty list
+	return p.createServerSession()
+}
+
+// ReturnSession returns a session to the pool if it has not expired.
+func (p *Pool) ReturnSession(ss *Server) {
+	if ss == nil {
+		return
+	}
+
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	p.checkedOut--
+	p.updateTimeout()
+	// check sessions at end of queue for expired
+	// stop checking after hitting the first valid session
+	for p.tail != nil && p.tail.expired(p.timeout) {
+		if p.tail.prev != nil {
+			p.tail.prev.next = nil
+		}
+		p.tail = p.tail.prev
+	}
+
+	// session expired
+	if ss.expired(p.timeout) {
+		return
+	}
+
+	newNode := &Node{
+		Server: ss,
+		next:   nil,
+		prev:   nil,
+	}
+
+	// empty list
+	if p.tail == nil {
+		p.head = newNode
+		p.tail = newNode
+		return
+	}
+
+	// at least 1 valid session in list
+	newNode.next = p.head
+	p.head.prev = newNode
+	p.head = newNode
+}
+
+// IDSlice returns a slice of session IDs for each session in the pool
+func (p *Pool) IDSlice() []bsonx.Doc {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	ids := []bsonx.Doc{}
+	for node := p.head; node != nil; node = node.next {
+		ids = append(ids, node.SessionID)
+	}
+
+	return ids
+}
+
+// String implements the Stringer interface
+func (p *Pool) String() string {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	s := ""
+	for head := p.head; head != nil; head = head.next {
+		s += head.SessionID.String() + "\n"
+	}
+
+	return s
+}
+
+// CheckedOut returns number of sessions checked out from pool.
+func (p *Pool) CheckedOut() int {
+	return p.checkedOut
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go
new file mode 100644
index 0000000..d59f5b5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/connection.go
@@ -0,0 +1,96 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"net"
+
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// sconn is a wrapper around a connection.Connection. This type is returned by
+// a Server so that it can track network errors and when a non-timeout network
+// error is returned, the pool on the server can be cleared.
+type sconn struct {
+	connection.Connection
+	s  *Server
+	id uint64
+}
+
+var notMasterCodes = []int32{10107, 13435}
+var recoveringCodes = []int32{11600, 11602, 13436, 189, 91}
+
+func (sc *sconn) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	wm, err := sc.Connection.ReadWireMessage(ctx)
+	if err != nil {
+		sc.processErr(err)
+	} else {
+		e := command.DecodeError(wm)
+		sc.processErr(e)
+	}
+	return wm, err
+}
+
+func (sc *sconn) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	err := sc.Connection.WriteWireMessage(ctx, wm)
+	sc.processErr(err)
+	return err
+}
+
+func (sc *sconn) processErr(err error) {
+	// TODO(GODRIVER-524) handle the rest of sdam error handling
+	// Invalidate server description if not master or node recovering error occurs
+	if cerr, ok := err.(command.Error); ok && (isRecoveringError(cerr) || isNotMasterError(cerr)) {
+		desc := sc.s.Description()
+		desc.Kind = description.Unknown
+		desc.LastError = err
+		// updates description to unknown
+		sc.s.updateDescription(desc, false)
+	}
+
+	ne, ok := err.(connection.NetworkError)
+	if !ok {
+		return
+	}
+
+	if netErr, ok := ne.Wrapped.(net.Error); ok && netErr.Timeout() {
+		return
+	}
+	if ne.Wrapped == context.Canceled || ne.Wrapped == context.DeadlineExceeded {
+		return
+	}
+
+	desc := sc.s.Description()
+	desc.Kind = description.Unknown
+	desc.LastError = err
+	// updates description to unknown
+	sc.s.updateDescription(desc, false)
+}
+
+func isRecoveringError(err command.Error) bool {
+	for _, c := range recoveringCodes {
+		if c == err.Code {
+			return true
+		}
+	}
+	return strings.Contains(err.Error(), "node is recovering")
+}
+
+func isNotMasterError(err command.Error) bool {
+	for _, c := range notMasterCodes {
+		if c == err.Code {
+			return true
+		}
+	}
+	return strings.Contains(err.Error(), "not master")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go
new file mode 100644
index 0000000..3682b57
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/fsm.go
@@ -0,0 +1,350 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"bytes"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+var supportedWireVersions = description.NewVersionRange(2, 6)
+var minSupportedMongoDBVersion = "2.6"
+
+type fsm struct {
+	description.Topology
+	SetName       string
+	maxElectionID primitive.ObjectID
+	maxSetVersion uint32
+}
+
+func newFSM() *fsm {
+	return new(fsm)
+}
+
+// apply should operate on immutable TopologyDescriptions and Descriptions. This way we don't have to
+// lock for the entire time we're applying server description.
+func (f *fsm) apply(s description.Server) (description.Topology, error) {
+
+	newServers := make([]description.Server, len(f.Servers))
+	copy(newServers, f.Servers)
+
+	oldMinutes := f.SessionTimeoutMinutes
+	f.Topology = description.Topology{
+		Kind:    f.Kind,
+		Servers: newServers,
+	}
+
+	// For data bearing servers, set SessionTimeoutMinutes to the lowest among them
+	if oldMinutes == 0 {
+		// If timeout currently 0, check all servers to see if any still don't have a timeout
+		// If they all have timeout, pick the lowest.
+		timeout := s.SessionTimeoutMinutes
+		for _, server := range f.Servers {
+			if server.DataBearing() && server.SessionTimeoutMinutes < timeout {
+				timeout = server.SessionTimeoutMinutes
+			}
+		}
+		f.SessionTimeoutMinutes = timeout
+	} else {
+		if s.DataBearing() && oldMinutes > s.SessionTimeoutMinutes {
+			f.SessionTimeoutMinutes = s.SessionTimeoutMinutes
+		} else {
+			f.SessionTimeoutMinutes = oldMinutes
+		}
+	}
+
+	if _, ok := f.findServer(s.Addr); !ok {
+		return f.Topology, nil
+	}
+
+	if s.WireVersion != nil {
+		if s.WireVersion.Max < supportedWireVersions.Min {
+			return description.Topology{}, fmt.Errorf(
+				"server at %s reports wire version %d, but this version of the Go driver requires "+
+					"at least %d (MongoDB %s)",
+				s.Addr.String(),
+				s.WireVersion.Max,
+				supportedWireVersions.Min,
+				minSupportedMongoDBVersion,
+			)
+		}
+
+		if s.WireVersion.Min > supportedWireVersions.Max {
+			return description.Topology{}, fmt.Errorf(
+				"server at %s requires wire version %d, but this version of the Go driver only "+
+					"supports up to %d",
+				s.Addr.String(),
+				s.WireVersion.Min,
+				supportedWireVersions.Max,
+			)
+		}
+	}
+
+	switch f.Kind {
+	case description.Unknown:
+		f.applyToUnknown(s)
+	case description.Sharded:
+		f.applyToSharded(s)
+	case description.ReplicaSetNoPrimary:
+		f.applyToReplicaSetNoPrimary(s)
+	case description.ReplicaSetWithPrimary:
+		f.applyToReplicaSetWithPrimary(s)
+	case description.Single:
+		f.applyToSingle(s)
+	}
+
+	return f.Topology, nil
+}
+
+func (f *fsm) applyToReplicaSetNoPrimary(s description.Server) {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithoutPrimary(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) applyToReplicaSetWithPrimary(s description.Server) {
+	switch s.Kind {
+	case description.Standalone, description.Mongos:
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.updateRSWithPrimaryFromMember(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+		f.checkIfHasPrimary()
+	}
+}
+
+func (f *fsm) applyToSharded(s description.Server) {
+	switch s.Kind {
+	case description.Mongos, description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		f.removeServerByAddr(s.Addr)
+	}
+}
+
+func (f *fsm) applyToSingle(s description.Server) {
+	switch s.Kind {
+	case description.Unknown:
+		f.replaceServer(s)
+	case description.Standalone, description.Mongos:
+		if f.SetName != "" {
+			f.removeServerByAddr(s.Addr)
+			return
+		}
+
+		f.replaceServer(s)
+	case description.RSPrimary, description.RSSecondary, description.RSArbiter, description.RSMember, description.RSGhost:
+		if f.SetName != "" && f.SetName != s.SetName {
+			f.removeServerByAddr(s.Addr)
+			return
+		}
+
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) applyToUnknown(s description.Server) {
+	switch s.Kind {
+	case description.Mongos:
+		f.setKind(description.Sharded)
+		f.replaceServer(s)
+	case description.RSPrimary:
+		f.updateRSFromPrimary(s)
+	case description.RSSecondary, description.RSArbiter, description.RSMember:
+		f.setKind(description.ReplicaSetNoPrimary)
+		f.updateRSWithoutPrimary(s)
+	case description.Standalone:
+		f.updateUnknownWithStandalone(s)
+	case description.Unknown, description.RSGhost:
+		f.replaceServer(s)
+	}
+}
+
+func (f *fsm) checkIfHasPrimary() {
+	if _, ok := f.findPrimary(); ok {
+		f.setKind(description.ReplicaSetWithPrimary)
+	} else {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+func (f *fsm) updateRSFromPrimary(s description.Server) {
+	if f.SetName == "" {
+		f.SetName = s.SetName
+	} else if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	if s.SetVersion != 0 && !bytes.Equal(s.ElectionID[:], primitive.NilObjectID[:]) {
+		if f.maxSetVersion > s.SetVersion || bytes.Compare(f.maxElectionID[:], s.ElectionID[:]) == 1 {
+			f.replaceServer(description.Server{
+				Addr:      s.Addr,
+				LastError: fmt.Errorf("was a primary, but its set version or election id is stale"),
+			})
+			f.checkIfHasPrimary()
+			return
+		}
+
+		f.maxElectionID = s.ElectionID
+	}
+
+	if s.SetVersion > f.maxSetVersion {
+		f.maxSetVersion = s.SetVersion
+	}
+
+	if j, ok := f.findPrimary(); ok {
+		f.setServer(j, description.Server{
+			Addr:      f.Servers[j].Addr,
+			LastError: fmt.Errorf("was a primary, but a new primary was discovered"),
+		})
+	}
+
+	f.replaceServer(s)
+
+	for j := len(f.Servers) - 1; j >= 0; j-- {
+		found := false
+		for _, member := range s.Members {
+			if member == f.Servers[j].Addr {
+				found = true
+				break
+			}
+		}
+		if !found {
+			f.removeServer(j)
+		}
+	}
+
+	for _, member := range s.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	f.checkIfHasPrimary()
+}
+
+func (f *fsm) updateRSWithPrimaryFromMember(s description.Server) {
+	if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		f.checkIfHasPrimary()
+		return
+	}
+
+	f.replaceServer(s)
+
+	if _, ok := f.findPrimary(); !ok {
+		f.setKind(description.ReplicaSetNoPrimary)
+	}
+}
+
+func (f *fsm) updateRSWithoutPrimary(s description.Server) {
+	if f.SetName == "" {
+		f.SetName = s.SetName
+	} else if f.SetName != s.SetName {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	for _, member := range s.Members {
+		if _, ok := f.findServer(member); !ok {
+			f.addServer(member)
+		}
+	}
+
+	if s.Addr != s.CanonicalAddr {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.replaceServer(s)
+}
+
+func (f *fsm) updateUnknownWithStandalone(s description.Server) {
+	if len(f.Servers) > 1 {
+		f.removeServerByAddr(s.Addr)
+		return
+	}
+
+	f.setKind(description.Single)
+	f.replaceServer(s)
+}
+
+func (f *fsm) addServer(addr address.Address) {
+	f.Servers = append(f.Servers, description.Server{
+		Addr: addr.Canonicalize(),
+	})
+}
+
+func (f *fsm) findPrimary() (int, bool) {
+	for i, s := range f.Servers {
+		if s.Kind == description.RSPrimary {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) findServer(addr address.Address) (int, bool) {
+	canon := addr.Canonicalize()
+	for i, s := range f.Servers {
+		if canon == s.Addr {
+			return i, true
+		}
+	}
+
+	return 0, false
+}
+
+func (f *fsm) removeServer(i int) {
+	f.Servers = append(f.Servers[:i], f.Servers[i+1:]...)
+}
+
+func (f *fsm) removeServerByAddr(addr address.Address) {
+	if i, ok := f.findServer(addr); ok {
+		f.removeServer(i)
+	}
+}
+
+func (f *fsm) replaceServer(s description.Server) bool {
+	if i, ok := f.findServer(s.Addr); ok {
+		f.setServer(i, s)
+		return true
+	}
+	return false
+}
+
+func (f *fsm) setServer(i int, s description.Server) {
+	f.Servers[i] = s
+}
+
+func (f *fsm) setKind(k description.TopologyKind) {
+	f.Kind = k
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go
new file mode 100644
index 0000000..3a7ace2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server.go
@@ -0,0 +1,506 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+const minHeartbeatInterval = 500 * time.Millisecond
+const connectionSemaphoreSize = math.MaxInt64
+
+// ErrServerClosed occurs when an attempt to get a connection is made after
+// the server has been closed.
+var ErrServerClosed = errors.New("server is closed")
+
+// ErrServerConnected occurs when at attempt to connect is made after a server
+// has already been connected.
+var ErrServerConnected = errors.New("server is connected")
+
+// SelectedServer represents a specific server that was selected during server selection.
+// It contains the kind of the typology it was selected from.
+type SelectedServer struct {
+	*Server
+
+	Kind description.TopologyKind
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (ss *SelectedServer) Description() description.SelectedServer {
+	sdesc := ss.Server.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   ss.Kind,
+	}
+}
+
+// These constants represent the connection states of a server.
+const (
+	disconnected int32 = iota
+	disconnecting
+	connected
+	connecting
+)
+
+func connectionStateString(state int32) string {
+	switch state {
+	case 0:
+		return "Disconnected"
+	case 1:
+		return "Disconnecting"
+	case 2:
+		return "Connected"
+	case 3:
+		return "Connecting"
+	}
+
+	return ""
+}
+
+// Server is a single server within a topology.
+type Server struct {
+	cfg     *serverConfig
+	address address.Address
+
+	connectionstate int32
+	done            chan struct{}
+	checkNow        chan struct{}
+	closewg         sync.WaitGroup
+	pool            connection.Pool
+
+	desc atomic.Value // holds a description.Server
+
+	averageRTTSet bool
+	averageRTT    time.Duration
+
+	subLock             sync.Mutex
+	subscribers         map[uint64]chan description.Server
+	currentSubscriberID uint64
+
+	subscriptionsClosed bool
+}
+
+// ConnectServer creates a new Server and then initializes it using the
+// Connect method.
+func ConnectServer(ctx context.Context, addr address.Address, opts ...ServerOption) (*Server, error) {
+	srvr, err := NewServer(addr, opts...)
+	if err != nil {
+		return nil, err
+	}
+	err = srvr.Connect(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return srvr, nil
+}
+
+// NewServer creates a new server. The mongodb server at the address will be monitored
+// on an internal monitoring goroutine.
+func NewServer(addr address.Address, opts ...ServerOption) (*Server, error) {
+	cfg, err := newServerConfig(opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	s := &Server{
+		cfg:     cfg,
+		address: addr,
+
+		done:     make(chan struct{}),
+		checkNow: make(chan struct{}, 1),
+
+		subscribers: make(map[uint64]chan description.Server),
+	}
+	s.desc.Store(description.Server{Addr: addr})
+
+	var maxConns uint64
+	if cfg.maxConns == 0 {
+		maxConns = math.MaxInt64
+	} else {
+		maxConns = uint64(cfg.maxConns)
+	}
+
+	s.pool, err = connection.NewPool(addr, uint64(cfg.maxIdleConns), maxConns, cfg.connectionOpts...)
+	if err != nil {
+		return nil, err
+	}
+
+	return s, nil
+}
+
+// Connect initialzies the Server by starting background monitoring goroutines.
+// This method must be called before a Server can be used.
+func (s *Server) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&s.connectionstate, disconnected, connected) {
+		return ErrServerConnected
+	}
+	s.desc.Store(description.Server{Addr: s.address})
+	go s.update()
+	s.closewg.Add(1)
+	return s.pool.Connect(ctx)
+}
+
+// Disconnect closes sockets to the server referenced by this Server.
+// Subscriptions to this Server will be closed. Disconnect will shutdown
+// any monitoring goroutines, close the idle connection pool, and will
+// wait until all the in use connections have been returned to the connection
+// pool and are closed before returning. If the context expires via
+// cancellation, deadline, or timeout before the in use connections have been
+// returned, the in use connections will be closed, resulting in the failure of
+// any in flight read or write operations. If this method returns with no
+// errors, all connections associated with this Server have been closed.
+func (s *Server) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&s.connectionstate, connected, disconnecting) {
+		return ErrServerClosed
+	}
+
+	// For every call to Connect there must be at least 1 goroutine that is
+	// waiting on the done channel.
+	s.done <- struct{}{}
+	err := s.pool.Disconnect(ctx)
+	if err != nil {
+		return err
+	}
+
+	s.closewg.Wait()
+	atomic.StoreInt32(&s.connectionstate, disconnected)
+
+	return nil
+}
+
+// Connection gets a connection to the server.
+func (s *Server) Connection(ctx context.Context) (connection.Connection, error) {
+	if atomic.LoadInt32(&s.connectionstate) != connected {
+		return nil, ErrServerClosed
+	}
+	conn, desc, err := s.pool.Get(ctx)
+	if err != nil {
+		if _, ok := err.(*auth.Error); ok {
+			// authentication error --> drain connection
+			_ = s.pool.Drain()
+		}
+		if _, ok := err.(*connection.NetworkError); ok {
+			// update description to unknown and clears the connection pool
+			if desc != nil {
+				desc.Kind = description.Unknown
+				desc.LastError = err
+				s.updateDescription(*desc, false)
+			} else {
+				_ = s.pool.Drain()
+			}
+		}
+		return nil, err
+	}
+	if desc != nil {
+		go s.updateDescription(*desc, false)
+	}
+	sc := &sconn{Connection: conn, s: s}
+	return sc, nil
+}
+
+// Description returns a description of the server as of the last heartbeat.
+func (s *Server) Description() description.Server {
+	return s.desc.Load().(description.Server)
+}
+
+// SelectedDescription returns a description.SelectedServer with a Kind of
+// Single. This can be used when performing tasks like monitoring a batch
+// of servers and you want to run one off commands against those servers.
+func (s *Server) SelectedDescription() description.SelectedServer {
+	sdesc := s.Description()
+	return description.SelectedServer{
+		Server: sdesc,
+		Kind:   description.Single,
+	}
+}
+
+// Subscribe returns a ServerSubscription which has a channel on which all
+// updated server descriptions will be sent. The channel will have a buffer
+// size of one, and will be pre-populated with the current description.
+func (s *Server) Subscribe() (*ServerSubscription, error) {
+	if atomic.LoadInt32(&s.connectionstate) != connected {
+		return nil, ErrSubscribeAfterClosed
+	}
+	ch := make(chan description.Server, 1)
+	ch <- s.desc.Load().(description.Server)
+
+	s.subLock.Lock()
+	defer s.subLock.Unlock()
+	if s.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := s.currentSubscriberID
+	s.subscribers[id] = ch
+	s.currentSubscriberID++
+
+	ss := &ServerSubscription{
+		C:  ch,
+		s:  s,
+		id: id,
+	}
+
+	return ss, nil
+}
+
+// RequestImmediateCheck will cause the server to send a heartbeat immediately
+// instead of waiting for the heartbeat timeout.
+func (s *Server) RequestImmediateCheck() {
+	select {
+	case s.checkNow <- struct{}{}:
+	default:
+	}
+}
+
+// update handles performing heartbeats and updating any subscribers of the
+// newest description.Server retrieved.
+func (s *Server) update() {
+	defer s.closewg.Done()
+	heartbeatTicker := time.NewTicker(s.cfg.heartbeatInterval)
+	rateLimiter := time.NewTicker(minHeartbeatInterval)
+	defer heartbeatTicker.Stop()
+	defer rateLimiter.Stop()
+	checkNow := s.checkNow
+	done := s.done
+
+	var doneOnce bool
+	defer func() {
+		if r := recover(); r != nil {
+			if doneOnce {
+				return
+			}
+			// We keep this goroutine alive attempting to read from the done channel.
+			<-done
+		}
+	}()
+
+	var conn connection.Connection
+	var desc description.Server
+
+	desc, conn = s.heartbeat(nil)
+	s.updateDescription(desc, true)
+
+	closeServer := func() {
+		doneOnce = true
+		s.subLock.Lock()
+		for id, c := range s.subscribers {
+			close(c)
+			delete(s.subscribers, id)
+		}
+		s.subscriptionsClosed = true
+		s.subLock.Unlock()
+		if conn == nil {
+			return
+		}
+		conn.Close()
+	}
+	for {
+		select {
+		case <-heartbeatTicker.C:
+		case <-checkNow:
+		case <-done:
+			closeServer()
+			return
+		}
+
+		select {
+		case <-rateLimiter.C:
+		case <-done:
+			closeServer()
+			return
+		}
+
+		desc, conn = s.heartbeat(conn)
+		s.updateDescription(desc, false)
+	}
+}
+
+// updateDescription handles updating the description on the Server, notifying
+// subscribers, and potentially draining the connection pool. The initial
+// parameter is used to determine if this is the first description from the
+// server.
+func (s *Server) updateDescription(desc description.Server, initial bool) {
+	defer func() {
+		//  ¯\_(ツ)_/¯
+		_ = recover()
+	}()
+	s.desc.Store(desc)
+
+	s.subLock.Lock()
+	for _, c := range s.subscribers {
+		select {
+		// drain the channel if it isn't empty
+		case <-c:
+		default:
+		}
+		c <- desc
+	}
+	s.subLock.Unlock()
+
+	if initial {
+		// We don't clear the pool on the first update on the description.
+		return
+	}
+
+	switch desc.Kind {
+	case description.Unknown:
+		_ = s.pool.Drain()
+	}
+}
+
+// heartbeat sends a heartbeat to the server using the given connection. The connection can be nil.
+func (s *Server) heartbeat(conn connection.Connection) (description.Server, connection.Connection) {
+	const maxRetry = 2
+	var saved error
+	var desc description.Server
+	var set bool
+	var err error
+	ctx := context.Background()
+
+	for i := 1; i <= maxRetry; i++ {
+		if conn != nil && conn.Expired() {
+			conn.Close()
+			conn = nil
+		}
+
+		if conn == nil {
+			opts := []connection.Option{
+				connection.WithConnectTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+				connection.WithReadTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+				connection.WithWriteTimeout(func(time.Duration) time.Duration { return s.cfg.heartbeatTimeout }),
+			}
+			opts = append(opts, s.cfg.connectionOpts...)
+			// We override whatever handshaker is currently attached to the options with an empty
+			// one because need to make sure we don't do auth.
+			opts = append(opts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				return nil
+			}))
+
+			// Override any command monitors specified in options with nil to avoid monitoring heartbeats.
+			opts = append(opts, connection.WithMonitor(func(*event.CommandMonitor) *event.CommandMonitor {
+				return nil
+			}))
+			conn, _, err = connection.New(ctx, s.address, opts...)
+			if err != nil {
+				saved = err
+				if conn != nil {
+					conn.Close()
+				}
+				conn = nil
+				continue
+			}
+		}
+
+		now := time.Now()
+
+		isMasterCmd := &command.IsMaster{Compressors: s.cfg.compressionOpts}
+		isMaster, err := isMasterCmd.RoundTrip(ctx, conn)
+		if err != nil {
+			saved = err
+			conn.Close()
+			conn = nil
+			continue
+		}
+
+		clusterTime := isMaster.ClusterTime
+		if s.cfg.clock != nil {
+			s.cfg.clock.AdvanceClusterTime(clusterTime)
+		}
+
+		delay := time.Since(now)
+		desc = description.NewServer(s.address, isMaster).SetAverageRTT(s.updateAverageRTT(delay))
+		desc.HeartbeatInterval = s.cfg.heartbeatInterval
+		set = true
+
+		break
+	}
+
+	if !set {
+		desc = description.Server{
+			Addr:      s.address,
+			LastError: saved,
+		}
+	}
+
+	return desc, conn
+}
+
+func (s *Server) updateAverageRTT(delay time.Duration) time.Duration {
+	if !s.averageRTTSet {
+		s.averageRTT = delay
+	} else {
+		alpha := 0.2
+		s.averageRTT = time.Duration(alpha*float64(delay) + (1-alpha)*float64(s.averageRTT))
+	}
+	return s.averageRTT
+}
+
+// Drain will drain the connection pool of this server. This is mainly here so the
+// pool for the server doesn't need to be directly exposed and so that when an error
+// is returned from reading or writing, a client can drain the pool for this server.
+// This is exposed here so we don't have to wrap the Connection type and sniff responses
+// for errors that would cause the pool to be drained, which can in turn centralize the
+// logic for handling errors in the Client type.
+func (s *Server) Drain() error { return s.pool.Drain() }
+
+// String implements the Stringer interface.
+func (s *Server) String() string {
+	desc := s.Description()
+	str := fmt.Sprintf("Addr: %s, Type: %s, State: %s",
+		s.address, desc.Kind, connectionStateString(s.connectionstate))
+	if len(desc.Tags) != 0 {
+		str += fmt.Sprintf(", Tag sets: %s", desc.Tags)
+	}
+	if s.connectionstate == connected {
+		str += fmt.Sprintf(", Avergage RTT: %d", s.averageRTT)
+	}
+	if desc.LastError != nil {
+		str += fmt.Sprintf(", Last error: %s", desc.LastError)
+	}
+
+	return str
+}
+
+// ServerSubscription represents a subscription to the description.Server updates for
+// a specific server.
+type ServerSubscription struct {
+	C  <-chan description.Server
+	s  *Server
+	id uint64
+}
+
+// Unsubscribe unsubscribes this ServerSubscription from updates and closes the
+// subscription channel.
+func (ss *ServerSubscription) Unsubscribe() error {
+	ss.s.subLock.Lock()
+	defer ss.s.subLock.Unlock()
+	if ss.s.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := ss.s.subscribers[ss.id]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(ss.s.subscribers, ss.id)
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go
new file mode 100644
index 0000000..0ebbecf
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/server_options.go
@@ -0,0 +1,121 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+)
+
+var defaultRegistry = bson.NewRegistryBuilder().Build()
+
+type serverConfig struct {
+	clock             *session.ClusterClock
+	compressionOpts   []string
+	connectionOpts    []connection.Option
+	appname           string
+	heartbeatInterval time.Duration
+	heartbeatTimeout  time.Duration
+	maxConns          uint16
+	maxIdleConns      uint16
+	registry          *bsoncodec.Registry
+}
+
+func newServerConfig(opts ...ServerOption) (*serverConfig, error) {
+	cfg := &serverConfig{
+		heartbeatInterval: 10 * time.Second,
+		heartbeatTimeout:  10 * time.Second,
+		maxConns:          100,
+		maxIdleConns:      100,
+		registry:          defaultRegistry,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// ServerOption configures a server.
+type ServerOption func(*serverConfig) error
+
+// WithConnectionOptions configures the server's connections.
+func WithConnectionOptions(fn func(...connection.Option) []connection.Option) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.connectionOpts = fn(cfg.connectionOpts...)
+		return nil
+	}
+}
+
+// WithCompressionOptions configures the server's compressors.
+func WithCompressionOptions(fn func(...string) []string) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.compressionOpts = fn(cfg.compressionOpts...)
+		return nil
+	}
+}
+
+// WithHeartbeatInterval configures a server's heartbeat interval.
+func WithHeartbeatInterval(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.heartbeatInterval = fn(cfg.heartbeatInterval)
+		return nil
+	}
+}
+
+// WithHeartbeatTimeout configures how long to wait for a heartbeat socket to
+// connection.
+func WithHeartbeatTimeout(fn func(time.Duration) time.Duration) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.heartbeatTimeout = fn(cfg.heartbeatTimeout)
+		return nil
+	}
+}
+
+// WithMaxConnections configures the maximum number of connections to allow for
+// a given server. If max is 0, then there is no upper limit to the number of
+// connections.
+func WithMaxConnections(fn func(uint16) uint16) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.maxConns = fn(cfg.maxConns)
+		return nil
+	}
+}
+
+// WithMaxIdleConnections configures the maximum number of idle connections
+// allowed for the server.
+func WithMaxIdleConnections(fn func(uint16) uint16) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.maxIdleConns = fn(cfg.maxIdleConns)
+		return nil
+	}
+}
+
+// WithClock configures the ClusterClock for the server to use.
+func WithClock(fn func(clock *session.ClusterClock) *session.ClusterClock) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.clock = fn(cfg.clock)
+		return nil
+	}
+}
+
+// WithRegistry configures the registry for the server to use when creating
+// cursors.
+func WithRegistry(fn func(*bsoncodec.Registry) *bsoncodec.Registry) ServerOption {
+	return func(cfg *serverConfig) error {
+		cfg.registry = fn(cfg.registry)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go
new file mode 100644
index 0000000..09a319c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology.go
@@ -0,0 +1,471 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package topology contains types that handles the discovery, monitoring, and selection
+// of servers. This package is designed to expose enough inner workings of service discovery
+// and monitoring to allow low level applications to have fine grained control, while hiding
+// most of the detailed implementation of the algorithms.
+package topology
+
+import (
+	"context"
+	"errors"
+	"math/rand"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsoncodec"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// ErrSubscribeAfterClosed is returned when a user attempts to subscribe to a
+// closed Server or Topology.
+var ErrSubscribeAfterClosed = errors.New("cannot subscribe after close")
+
+// ErrTopologyClosed is returned when a user attempts to call a method on a
+// closed Topology.
+var ErrTopologyClosed = errors.New("topology is closed")
+
+// ErrTopologyConnected is returned whena  user attempts to connect to an
+// already connected Topology.
+var ErrTopologyConnected = errors.New("topology is connected or connecting")
+
+// ErrServerSelectionTimeout is returned from server selection when the server
+// selection process took longer than allowed by the timeout.
+var ErrServerSelectionTimeout = errors.New("server selection timeout")
+
+// MonitorMode represents the way in which a server is monitored.
+type MonitorMode uint8
+
+// These constants are the available monitoring modes.
+const (
+	AutomaticMode MonitorMode = iota
+	SingleMode
+)
+
+// Topology represents a MongoDB deployment.
+type Topology struct {
+	registry *bsoncodec.Registry
+
+	connectionstate int32
+
+	cfg *config
+
+	desc atomic.Value // holds a description.Topology
+
+	done chan struct{}
+
+	fsm       *fsm
+	changes   chan description.Server
+	changeswg sync.WaitGroup
+
+	SessionPool *session.Pool
+
+	// This should really be encapsulated into it's own type. This will likely
+	// require a redesign so we can share a minimum of data between the
+	// subscribers and the topology.
+	subscribers         map[uint64]chan description.Topology
+	currentSubscriberID uint64
+	subscriptionsClosed bool
+	subLock             sync.Mutex
+
+	// We should redesign how we connect and handle individal servers. This is
+	// too difficult to maintain and it's rather easy to accidentally access
+	// the servers without acquiring the lock or checking if the servers are
+	// closed. This lock should also be an RWMutex.
+	serversLock   sync.Mutex
+	serversClosed bool
+	servers       map[address.Address]*Server
+
+	wg sync.WaitGroup
+}
+
+// New creates a new topology.
+func New(opts ...Option) (*Topology, error) {
+	cfg, err := newConfig(opts...)
+	if err != nil {
+		return nil, err
+	}
+
+	t := &Topology{
+		cfg:         cfg,
+		done:        make(chan struct{}),
+		fsm:         newFSM(),
+		changes:     make(chan description.Server),
+		subscribers: make(map[uint64]chan description.Topology),
+		servers:     make(map[address.Address]*Server),
+	}
+	t.desc.Store(description.Topology{})
+
+	if cfg.replicaSetName != "" {
+		t.fsm.SetName = cfg.replicaSetName
+		t.fsm.Kind = description.ReplicaSetNoPrimary
+	}
+
+	if cfg.mode == SingleMode {
+		t.fsm.Kind = description.Single
+	}
+
+	return t, nil
+}
+
+// Connect initializes a Topology and starts the monitoring process. This function
+// must be called to properly monitor the topology.
+func (t *Topology) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&t.connectionstate, disconnected, connecting) {
+		return ErrTopologyConnected
+	}
+
+	t.desc.Store(description.Topology{})
+	var err error
+	t.serversLock.Lock()
+	for _, a := range t.cfg.seedList {
+		addr := address.Address(a).Canonicalize()
+		t.fsm.Servers = append(t.fsm.Servers, description.Server{Addr: addr})
+		err = t.addServer(ctx, addr)
+	}
+	t.serversLock.Unlock()
+
+	go t.update()
+	t.changeswg.Add(1)
+
+	t.subscriptionsClosed = false // explicitly set in case topology was disconnected and then reconnected
+
+	atomic.StoreInt32(&t.connectionstate, connected)
+
+	// After connection, make a subscription to keep the pool updated
+	sub, err := t.Subscribe()
+	t.SessionPool = session.NewPool(sub.C)
+	return err
+}
+
+// Disconnect closes the topology. It stops the monitoring thread and
+// closes all open subscriptions.
+func (t *Topology) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&t.connectionstate, connected, disconnecting) {
+		return ErrTopologyClosed
+	}
+
+	t.serversLock.Lock()
+	t.serversClosed = true
+	for addr, server := range t.servers {
+		t.removeServer(ctx, addr, server)
+	}
+	t.serversLock.Unlock()
+
+	t.wg.Wait()
+	t.done <- struct{}{}
+	t.changeswg.Wait()
+
+	t.desc.Store(description.Topology{})
+
+	atomic.StoreInt32(&t.connectionstate, disconnected)
+	return nil
+}
+
+// Description returns a description of the topology.
+func (t *Topology) Description() description.Topology {
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	return td
+}
+
+// Subscribe returns a Subscription on which all updated description.Topologys
+// will be sent. The channel of the subscription will have a buffer size of one,
+// and will be pre-populated with the current description.Topology.
+func (t *Topology) Subscribe() (*Subscription, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, errors.New("cannot subscribe to Topology that is not connected")
+	}
+	ch := make(chan description.Topology, 1)
+	td, ok := t.desc.Load().(description.Topology)
+	if !ok {
+		td = description.Topology{}
+	}
+	ch <- td
+
+	t.subLock.Lock()
+	defer t.subLock.Unlock()
+	if t.subscriptionsClosed {
+		return nil, ErrSubscribeAfterClosed
+	}
+	id := t.currentSubscriberID
+	t.subscribers[id] = ch
+	t.currentSubscriberID++
+
+	return &Subscription{
+		C:  ch,
+		t:  t,
+		id: id,
+	}, nil
+}
+
+// RequestImmediateCheck will send heartbeats to all the servers in the
+// topology right away, instead of waiting for the heartbeat timeout.
+func (t *Topology) RequestImmediateCheck() {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return
+	}
+	t.serversLock.Lock()
+	for _, server := range t.servers {
+		server.RequestImmediateCheck()
+	}
+	t.serversLock.Unlock()
+}
+
+// SupportsSessions returns true if the topology supports sessions.
+func (t *Topology) SupportsSessions() bool {
+	return t.Description().SessionTimeoutMinutes != 0 && t.Description().Kind != description.Single
+}
+
+// SelectServer selects a server given a selector.SelectServer complies with the
+// server selection spec, and will time out after severSelectionTimeout or when the
+// parent context is done.
+func (t *Topology) SelectServer(ctx context.Context, ss description.ServerSelector) (*SelectedServer, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, ErrTopologyClosed
+	}
+	var ssTimeoutCh <-chan time.Time
+
+	if t.cfg.serverSelectionTimeout > 0 {
+		ssTimeout := time.NewTimer(t.cfg.serverSelectionTimeout)
+		ssTimeoutCh = ssTimeout.C
+		defer ssTimeout.Stop()
+	}
+
+	sub, err := t.Subscribe()
+	if err != nil {
+		return nil, err
+	}
+	defer sub.Unsubscribe()
+
+	for {
+		suitable, err := t.selectServer(ctx, sub.C, ss, ssTimeoutCh)
+		if err != nil {
+			return nil, err
+		}
+
+		selected := suitable[rand.Intn(len(suitable))]
+		selectedS, err := t.FindServer(selected)
+		switch {
+		case err != nil:
+			return nil, err
+		case selectedS != nil:
+			return selectedS, nil
+		default:
+			// We don't have an actual server for the provided description.
+			// This could happen for a number of reasons, including that the
+			// server has since stopped being a part of this topology, or that
+			// the server selector returned no suitable servers.
+		}
+	}
+}
+
+// FindServer will attempt to find a server that fits the given server description.
+// This method will return nil, nil if a matching server could not be found.
+func (t *Topology) FindServer(selected description.Server) (*SelectedServer, error) {
+	if atomic.LoadInt32(&t.connectionstate) != connected {
+		return nil, ErrTopologyClosed
+	}
+	t.serversLock.Lock()
+	defer t.serversLock.Unlock()
+	server, ok := t.servers[selected.Addr]
+	if !ok {
+		return nil, nil
+	}
+
+	desc := t.Description()
+	return &SelectedServer{
+		Server: server,
+		Kind:   desc.Kind,
+	}, nil
+}
+
+func wrapServerSelectionError(err error, t *Topology) error {
+	return fmt.Errorf("server selection error: %v\ncurrent topology: %s", err, t.String())
+}
+
+// selectServer is the core piece of server selection. It handles getting
+// topology descriptions and running sever selection on those descriptions.
+func (t *Topology) selectServer(ctx context.Context, subscriptionCh <-chan description.Topology, ss description.ServerSelector, timeoutCh <-chan time.Time) ([]description.Server, error) {
+	var current description.Topology
+	for {
+		select {
+		case <-ctx.Done():
+			return nil, ctx.Err()
+		case <-timeoutCh:
+			return nil, wrapServerSelectionError(ErrServerSelectionTimeout, t)
+		case current = <-subscriptionCh:
+		}
+
+		var allowed []description.Server
+		for _, s := range current.Servers {
+			if s.Kind != description.Unknown {
+				allowed = append(allowed, s)
+			}
+		}
+
+		suitable, err := ss.SelectServer(current, allowed)
+		if err != nil {
+			return nil, wrapServerSelectionError(err, t)
+		}
+
+		if len(suitable) > 0 {
+			return suitable, nil
+		}
+
+		t.RequestImmediateCheck()
+	}
+}
+
+func (t *Topology) update() {
+	defer t.changeswg.Done()
+	defer func() {
+		//  ¯\_(ツ)_/¯
+		if r := recover(); r != nil {
+			<-t.done
+		}
+	}()
+
+	for {
+		select {
+		case change := <-t.changes:
+			current, err := t.apply(context.TODO(), change)
+			if err != nil {
+				continue
+			}
+
+			t.desc.Store(current)
+			t.subLock.Lock()
+			for _, ch := range t.subscribers {
+				// We drain the description if there's one in the channel
+				select {
+				case <-ch:
+				default:
+				}
+				ch <- current
+			}
+			t.subLock.Unlock()
+		case <-t.done:
+			t.subLock.Lock()
+			for id, ch := range t.subscribers {
+				close(ch)
+				delete(t.subscribers, id)
+			}
+			t.subscriptionsClosed = true
+			t.subLock.Unlock()
+			return
+		}
+	}
+}
+
+func (t *Topology) apply(ctx context.Context, desc description.Server) (description.Topology, error) {
+	var err error
+	prev := t.fsm.Topology
+
+	current, err := t.fsm.apply(desc)
+	if err != nil {
+		return description.Topology{}, err
+	}
+
+	diff := description.DiffTopology(prev, current)
+	t.serversLock.Lock()
+	if t.serversClosed {
+		t.serversLock.Unlock()
+		return description.Topology{}, nil
+	}
+
+	for _, removed := range diff.Removed {
+		if s, ok := t.servers[removed.Addr]; ok {
+			t.removeServer(ctx, removed.Addr, s)
+		}
+	}
+
+	for _, added := range diff.Added {
+		_ = t.addServer(ctx, added.Addr)
+	}
+	t.serversLock.Unlock()
+	return current, nil
+}
+
+func (t *Topology) addServer(ctx context.Context, addr address.Address) error {
+	if _, ok := t.servers[addr]; ok {
+		return nil
+	}
+
+	svr, err := ConnectServer(ctx, addr, t.cfg.serverOpts...)
+	if err != nil {
+		return err
+	}
+
+	t.servers[addr] = svr
+	var sub *ServerSubscription
+	sub, err = svr.Subscribe()
+	if err != nil {
+		return err
+	}
+
+	t.wg.Add(1)
+	go func() {
+		for c := range sub.C {
+			t.changes <- c
+		}
+
+		t.wg.Done()
+	}()
+
+	return nil
+}
+
+func (t *Topology) removeServer(ctx context.Context, addr address.Address, server *Server) {
+	_ = server.Disconnect(ctx)
+	delete(t.servers, addr)
+}
+
+// String implements the Stringer interface
+func (t *Topology) String() string {
+	desc := t.Description()
+	str := fmt.Sprintf("Type: %s\nServers:\n", desc.Kind)
+	for _, s := range t.servers {
+		str += s.String() + "\n"
+	}
+	return str
+}
+
+// Subscription is a subscription to updates to the description of the Topology that created this
+// Subscription.
+type Subscription struct {
+	C  <-chan description.Topology
+	t  *Topology
+	id uint64
+}
+
+// Unsubscribe unsubscribes this Subscription from updates and closes the
+// subscription channel.
+func (s *Subscription) Unsubscribe() error {
+	s.t.subLock.Lock()
+	defer s.t.subLock.Unlock()
+	if s.t.subscriptionsClosed {
+		return nil
+	}
+
+	ch, ok := s.t.subscribers[s.id]
+	if !ok {
+		return nil
+	}
+
+	close(ch)
+	delete(s.t.subscribers, s.id)
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go
new file mode 100644
index 0000000..9fa98e6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/topology/topology_options.go
@@ -0,0 +1,269 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package topology
+
+import (
+	"bytes"
+	"strings"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/auth"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+	"github.com/mongodb/mongo-go-driver/x/network/connection"
+	"github.com/mongodb/mongo-go-driver/x/network/connstring"
+)
+
+// Option is a configuration option for a topology.
+type Option func(*config) error
+
+type config struct {
+	mode                   MonitorMode
+	replicaSetName         string
+	seedList               []string
+	serverOpts             []ServerOption
+	cs                     connstring.ConnString
+	serverSelectionTimeout time.Duration
+}
+
+func newConfig(opts ...Option) (*config, error) {
+	cfg := &config{
+		seedList:               []string{"localhost:27017"},
+		serverSelectionTimeout: 30 * time.Second,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	return cfg, nil
+}
+
+// WithConnString configures the topology using the connection string.
+func WithConnString(fn func(connstring.ConnString) connstring.ConnString) Option {
+	return func(c *config) error {
+		cs := fn(c.cs)
+		c.cs = cs
+
+		if cs.ServerSelectionTimeoutSet {
+			c.serverSelectionTimeout = cs.ServerSelectionTimeout
+		}
+
+		var connOpts []connection.Option
+
+		if cs.AppName != "" {
+			connOpts = append(connOpts, connection.WithAppName(func(string) string { return cs.AppName }))
+		}
+
+		switch cs.Connect {
+		case connstring.SingleConnect:
+			c.mode = SingleMode
+		}
+
+		c.seedList = cs.Hosts
+
+		if cs.ConnectTimeout > 0 {
+			c.serverOpts = append(c.serverOpts, WithHeartbeatTimeout(func(time.Duration) time.Duration { return cs.ConnectTimeout }))
+			connOpts = append(connOpts, connection.WithConnectTimeout(func(time.Duration) time.Duration { return cs.ConnectTimeout }))
+		}
+
+		if cs.SocketTimeoutSet {
+			connOpts = append(
+				connOpts,
+				connection.WithReadTimeout(func(time.Duration) time.Duration { return cs.SocketTimeout }),
+				connection.WithWriteTimeout(func(time.Duration) time.Duration { return cs.SocketTimeout }),
+			)
+		}
+
+		if cs.HeartbeatInterval > 0 {
+			c.serverOpts = append(c.serverOpts, WithHeartbeatInterval(func(time.Duration) time.Duration { return cs.HeartbeatInterval }))
+		}
+
+		if cs.MaxConnIdleTime > 0 {
+			connOpts = append(connOpts, connection.WithIdleTimeout(func(time.Duration) time.Duration { return cs.MaxConnIdleTime }))
+		}
+
+		if cs.MaxPoolSizeSet {
+			c.serverOpts = append(c.serverOpts, WithMaxConnections(func(uint16) uint16 { return cs.MaxPoolSize }))
+			c.serverOpts = append(c.serverOpts, WithMaxIdleConnections(func(uint16) uint16 { return cs.MaxPoolSize }))
+		}
+
+		if cs.ReplicaSet != "" {
+			c.replicaSetName = cs.ReplicaSet
+		}
+
+		var x509Username string
+		if cs.SSL {
+			tlsConfig := connection.NewTLSConfig()
+
+			if cs.SSLCaFileSet {
+				err := tlsConfig.AddCACertFromFile(cs.SSLCaFile)
+				if err != nil {
+					return err
+				}
+			}
+
+			if cs.SSLInsecure {
+				tlsConfig.SetInsecure(true)
+			}
+
+			if cs.SSLClientCertificateKeyFileSet {
+				if cs.SSLClientCertificateKeyPasswordSet && cs.SSLClientCertificateKeyPassword != nil {
+					tlsConfig.SetClientCertDecryptPassword(cs.SSLClientCertificateKeyPassword)
+				}
+				s, err := tlsConfig.AddClientCertFromFile(cs.SSLClientCertificateKeyFile)
+				if err != nil {
+					return err
+				}
+
+				// The Go x509 package gives the subject with the pairs in reverse order that we want.
+				pairs := strings.Split(s, ",")
+				b := bytes.NewBufferString("")
+
+				for i := len(pairs) - 1; i >= 0; i-- {
+					b.WriteString(pairs[i])
+
+					if i > 0 {
+						b.WriteString(",")
+					}
+				}
+
+				x509Username = b.String()
+			}
+
+			connOpts = append(connOpts, connection.WithTLSConfig(func(*connection.TLSConfig) *connection.TLSConfig { return tlsConfig }))
+		}
+
+		if cs.Username != "" || cs.AuthMechanism == auth.MongoDBX509 || cs.AuthMechanism == auth.GSSAPI {
+			cred := &auth.Cred{
+				Source:      "admin",
+				Username:    cs.Username,
+				Password:    cs.Password,
+				PasswordSet: cs.PasswordSet,
+				Props:       cs.AuthMechanismProperties,
+			}
+
+			if cs.AuthSource != "" {
+				cred.Source = cs.AuthSource
+			} else {
+				switch cs.AuthMechanism {
+				case auth.MongoDBX509:
+					if cred.Username == "" {
+						cred.Username = x509Username
+					}
+					fallthrough
+				case auth.GSSAPI, auth.PLAIN:
+					cred.Source = "$external"
+				default:
+					cred.Source = cs.Database
+				}
+			}
+
+			authenticator, err := auth.CreateAuthenticator(cs.AuthMechanism, cred)
+			if err != nil {
+				return err
+			}
+
+			connOpts = append(connOpts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				options := &auth.HandshakeOptions{
+					AppName:       cs.AppName,
+					Authenticator: authenticator,
+					Compressors:   cs.Compressors,
+				}
+				if cs.AuthMechanism == "" {
+					// Required for SASL mechanism negotiation during handshake
+					options.DBUser = cred.Source + "." + cred.Username
+				}
+				return auth.Handshaker(h, options)
+			}))
+		} else {
+			// We need to add a non-auth Handshaker to the connection options
+			connOpts = append(connOpts, connection.WithHandshaker(func(h connection.Handshaker) connection.Handshaker {
+				return &command.Handshake{Client: command.ClientDoc(cs.AppName), Compressors: cs.Compressors}
+			}))
+		}
+
+		if len(cs.Compressors) > 0 {
+			comp := make([]compressor.Compressor, 0, len(cs.Compressors))
+
+			for _, c := range cs.Compressors {
+				switch c {
+				case "snappy":
+					comp = append(comp, compressor.CreateSnappy())
+				case "zlib":
+					zlibComp, err := compressor.CreateZlib(cs.ZlibLevel)
+					if err != nil {
+						return err
+					}
+
+					comp = append(comp, zlibComp)
+				}
+			}
+
+			connOpts = append(connOpts, connection.WithCompressors(func(compressors []compressor.Compressor) []compressor.Compressor {
+				return append(compressors, comp...)
+			}))
+
+			c.serverOpts = append(c.serverOpts, WithCompressionOptions(func(opts ...string) []string {
+				return append(opts, cs.Compressors...)
+			}))
+		}
+
+		if len(connOpts) > 0 {
+			c.serverOpts = append(c.serverOpts, WithConnectionOptions(func(opts ...connection.Option) []connection.Option {
+				return append(opts, connOpts...)
+			}))
+		}
+
+		return nil
+	}
+}
+
+// WithMode configures the topology's monitor mode.
+func WithMode(fn func(MonitorMode) MonitorMode) Option {
+	return func(cfg *config) error {
+		cfg.mode = fn(cfg.mode)
+		return nil
+	}
+}
+
+// WithReplicaSetName configures the topology's default replica set name.
+func WithReplicaSetName(fn func(string) string) Option {
+	return func(cfg *config) error {
+		cfg.replicaSetName = fn(cfg.replicaSetName)
+		return nil
+	}
+}
+
+// WithSeedList configures a topology's seed list.
+func WithSeedList(fn func(...string) []string) Option {
+	return func(cfg *config) error {
+		cfg.seedList = fn(cfg.seedList...)
+		return nil
+	}
+}
+
+// WithServerOptions configures a topology's server options for when a new server
+// needs to be created.
+func WithServerOptions(fn func(...ServerOption) []ServerOption) Option {
+	return func(cfg *config) error {
+		cfg.serverOpts = fn(cfg.serverOpts...)
+		return nil
+	}
+}
+
+// WithServerSelectionTimeout configures a topology's server selection timeout.
+// A server selection timeout of 0 means there is no timeout for server selection.
+func WithServerSelectionTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(cfg *config) error {
+		cfg.serverSelectionTimeout = fn(cfg.serverSelectionTimeout)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go
new file mode 100644
index 0000000..d8d8d16
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/update.go
@@ -0,0 +1,134 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/mongo/options"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// Update handles the full cycle dispatch and execution of an update command against the provided
+// topology.
+func Update(
+	ctx context.Context,
+	cmd command.Update,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+	retryWrite bool,
+	opts ...*options.UpdateOptions,
+) (result.Update, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return result.Update{}, err
+	}
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return result.Update{}, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	updateOpts := options.MergeUpdateOptions(opts...)
+
+	if updateOpts.ArrayFilters != nil {
+		if ss.Description().WireVersion.Max < 6 {
+			return result.Update{}, ErrArrayFilters
+		}
+		arr, err := updateOpts.ArrayFilters.ToArray()
+		if err != nil {
+			return result.Update{}, err
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"arrayFilters", bsonx.Array(arr)})
+	}
+	if updateOpts.BypassDocumentValidation != nil && ss.Description().WireVersion.Includes(4) {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"bypassDocumentValidation", bsonx.Boolean(*updateOpts.BypassDocumentValidation)})
+	}
+	if updateOpts.Collation != nil {
+		if ss.Description().WireVersion.Max < 5 {
+			return result.Update{}, ErrCollation
+		}
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"collation", bsonx.Document(updateOpts.Collation.ToDocument())})
+	}
+	if updateOpts.Upsert != nil {
+		cmd.Opts = append(cmd.Opts, bsonx.Elem{"upsert", bsonx.Boolean(*updateOpts.Upsert)})
+	}
+
+	// Execute in a single trip if retry writes not supported, or retry not enabled
+	if !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) || !retryWrite {
+		if cmd.Session != nil {
+			cmd.Session.RetryWrite = false // explicitly set to false to prevent encoding transaction number
+		}
+		return update(ctx, cmd, ss, nil)
+	}
+
+	cmd.Session.RetryWrite = retryWrite
+	cmd.Session.IncrementTxnNumber()
+
+	res, originalErr := update(ctx, cmd, ss, nil)
+
+	// Retry if appropriate
+	if cerr, ok := originalErr.(command.Error); ok && cerr.Retryable() ||
+		res.WriteConcernError != nil && command.IsWriteConcernErrorRetryable(res.WriteConcernError) {
+		ss, err := topo.SelectServer(ctx, selector)
+
+		// Return original error if server selection fails or new server does not support retryable writes
+		if err != nil || !retrySupported(topo, ss.Description(), cmd.Session, cmd.WriteConcern) {
+			return res, originalErr
+		}
+
+		return update(ctx, cmd, ss, cerr)
+	}
+	return res, originalErr
+
+}
+
+func update(
+	ctx context.Context,
+	cmd command.Update,
+	ss *topology.SelectedServer,
+	oldErr error,
+) (result.Update, error) {
+	desc := ss.Description()
+
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		if oldErr != nil {
+			return result.Update{}, oldErr
+		}
+		return result.Update{}, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return result.Update{}, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go
new file mode 100644
index 0000000..7ad5eea
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid/uuid.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"io"
+)
+
+// UUID represents a UUID.
+type UUID [16]byte
+
+var rander = rand.Reader
+
+// New generates a new uuid.
+func New() (UUID, error) {
+	var uuid [16]byte
+
+	_, err := io.ReadFull(rander, uuid[:])
+	if err != nil {
+		return [16]byte{}, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+
+	return uuid, nil
+}
+
+// Equal returns true if two UUIDs are equal.
+func Equal(a, b UUID) bool {
+	return bytes.Equal([]byte(a[:]), []byte(b[:]))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go
new file mode 100644
index 0000000..6447a49
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/mongo/driver/write.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package driver
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/topology"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/uuid"
+	"github.com/mongodb/mongo-go-driver/x/network/command"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+)
+
+// Write handles the full cycle dispatch and execution of a write command against the provided
+// topology.
+func Write(
+	ctx context.Context,
+	cmd command.Write,
+	topo *topology.Topology,
+	selector description.ServerSelector,
+	clientID uuid.UUID,
+	pool *session.Pool,
+) (bson.Raw, error) {
+
+	ss, err := topo.SelectServer(ctx, selector)
+	if err != nil {
+		return nil, err
+	}
+
+	desc := ss.Description()
+	conn, err := ss.Connection(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if !writeconcern.AckWrite(cmd.WriteConcern) {
+		go func() {
+			defer func() { _ = recover() }()
+			defer conn.Close()
+
+			_, _ = cmd.RoundTrip(ctx, desc, conn)
+		}()
+
+		return nil, command.ErrUnacknowledgedWrite
+	}
+	defer conn.Close()
+
+	// If no explicit session and deployment supports sessions, start implicit session.
+	if cmd.Session == nil && topo.SupportsSessions() {
+		cmd.Session, err = session.NewClientSession(pool, clientID, session.Implicit)
+		if err != nil {
+			return nil, err
+		}
+		defer cmd.Session.EndSession()
+	}
+
+	return cmd.RoundTrip(ctx, desc, conn)
+}
+
+// Retryable writes are supported if the server supports sessions, the operation is not
+// within a transaction, and the write is acknowledged
+func retrySupported(
+	topo *topology.Topology,
+	desc description.SelectedServer,
+	sess *session.Client,
+	wc *writeconcern.WriteConcern,
+) bool {
+	return topo.SupportsSessions() &&
+		description.SessionsSupported(desc.WireVersion) &&
+		!(sess.TransactionInProgress() || sess.TransactionStarting()) &&
+		writeconcern.AckWrite(wc)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go
new file mode 100644
index 0000000..c2a3bb7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/address/addr.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package address
+
+import (
+	"net"
+	"strings"
+)
+
+const defaultPort = "27017"
+
+// Address is a network address. It can either be an IP address or a DNS name.
+type Address string
+
+// Network is the network protocol for this address. In most cases this will be
+// "tcp" or "unix".
+func (a Address) Network() string {
+	if strings.HasSuffix(string(a), "sock") {
+		return "unix"
+	}
+	return "tcp"
+}
+
+// String is the canonical version of this address, e.g. localhost:27017,
+// 1.2.3.4:27017, example.com:27017.
+func (a Address) String() string {
+	// TODO: unicode case folding?
+	s := strings.ToLower(string(a))
+	if len(s) == 0 {
+		return ""
+	}
+	if a.Network() != "unix" {
+		_, _, err := net.SplitHostPort(s)
+		if err != nil && strings.Contains(err.Error(), "missing port in address") {
+			s += ":" + defaultPort
+		}
+	}
+
+	return s
+}
+
+// Canonicalize creates a canonicalized address.
+func (a Address) Canonicalize() Address {
+	return Address(a.String())
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go
new file mode 100644
index 0000000..54e5cca
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/abort_transaction.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// AbortTransaction represents the abortTransaction() command
+type AbortTransaction struct {
+	Session *session.Client
+	err     error
+	result  result.TransactionResult
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (at *AbortTransaction) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := at.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (at *AbortTransaction) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"abortTransaction", bsonx.Int32(1)}}
+	return &Write{
+		DB:           "admin",
+		Command:      cmd,
+		Session:      at.Session,
+		WriteConcern: at.Session.CurrentWc,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (at *AbortTransaction) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *AbortTransaction {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		at.err = err
+		return at
+	}
+
+	return at.decode(desc, rdr)
+}
+
+func (at *AbortTransaction) decode(desc description.SelectedServer, rdr bson.Raw) *AbortTransaction {
+	at.err = bson.Unmarshal(rdr, &at.result)
+	if at.err == nil && at.result.WriteConcernError != nil {
+		at.err = Error{
+			Code:    int32(at.result.WriteConcernError.Code),
+			Message: at.result.WriteConcernError.ErrMsg,
+		}
+	}
+	return at
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (at *AbortTransaction) Result() (result.TransactionResult, error) {
+	if at.err != nil {
+		return result.TransactionResult{}, at.err
+	}
+
+	return at.result, nil
+}
+
+// Err returns the error set on this command
+func (at *AbortTransaction) Err() error {
+	return at.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (at *AbortTransaction) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.TransactionResult, error) {
+	cmd := at.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.TransactionResult{}, err
+	}
+
+	return at.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go
new file mode 100644
index 0000000..106f583
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/aggregate.go
@@ -0,0 +1,159 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Aggregate represents the aggregate command.
+//
+// The aggregate command performs an aggregation.
+type Aggregate struct {
+	NS           Namespace
+	Pipeline     bsonx.Arr
+	CursorOpts   []bsonx.Elem
+	Opts         []bsonx.Elem
+	ReadPref     *readpref.ReadPref
+	WriteConcern *writeconcern.WriteConcern
+	ReadConcern  *readconcern.ReadConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (a *Aggregate) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := a.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (a *Aggregate) encode(desc description.SelectedServer) (*Read, error) {
+	if err := a.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"aggregate", bsonx.String(a.NS.Collection)},
+		{"pipeline", bsonx.Array(a.Pipeline)},
+	}
+
+	cursor := bsonx.Doc{}
+	hasOutStage := a.HasDollarOut()
+
+	for _, opt := range a.Opts {
+		switch opt.Key {
+		case "batchSize":
+			if opt.Value.Int32() == 0 && hasOutStage {
+				continue
+			}
+			cursor = append(cursor, opt)
+		default:
+			command = append(command, opt)
+		}
+	}
+	command = append(command, bsonx.Elem{"cursor", bsonx.Document(cursor)})
+
+	// add write concern because it won't be added by the Read command's Encode()
+	if desc.WireVersion.Max >= 5 && hasOutStage && a.WriteConcern != nil {
+		t, data, err := a.WriteConcern.MarshalBSONValue()
+		if err != nil {
+			return nil, err
+		}
+		var xval bsonx.Val
+		err = xval.UnmarshalBSONValue(t, data)
+		if err != nil {
+			return nil, err
+		}
+		command = append(command, bsonx.Elem{Key: "writeConcern", Value: xval})
+	}
+
+	return &Read{
+		DB:          a.NS.DB,
+		Command:     command,
+		ReadPref:    a.ReadPref,
+		ReadConcern: a.ReadConcern,
+		Clock:       a.Clock,
+		Session:     a.Session,
+	}, nil
+}
+
+// HasDollarOut returns true if the Pipeline field contains a $out stage.
+func (a *Aggregate) HasDollarOut() bool {
+	if a.Pipeline == nil {
+		return false
+	}
+	if len(a.Pipeline) == 0 {
+		return false
+	}
+
+	val := a.Pipeline[len(a.Pipeline)-1]
+
+	doc, ok := val.DocumentOK()
+	if !ok || len(doc) != 1 {
+		return false
+	}
+	return doc[0].Key == "$out"
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (a *Aggregate) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Aggregate {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		a.err = err
+		return a
+	}
+
+	return a.decode(desc, rdr)
+}
+
+func (a *Aggregate) decode(desc description.SelectedServer, rdr bson.Raw) *Aggregate {
+	a.result = rdr
+	return a
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (a *Aggregate) Result() (bson.Raw, error) {
+	if a.err != nil {
+		return nil, a.err
+	}
+	return a.result, nil
+}
+
+// Err returns the error set on this command.
+func (a *Aggregate) Err() error { return a.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (a *Aggregate) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := a.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return a.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go
new file mode 100644
index 0000000..6bd09fa
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/buildinfo.go
@@ -0,0 +1,95 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// BuildInfo represents the buildInfo command.
+//
+// The buildInfo command is used for getting the build information for a
+// MongoDB server.
+type BuildInfo struct {
+	err error
+	res result.BuildInfo
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (bi *BuildInfo) Encode() (wiremessage.WireMessage, error) {
+	// This can probably just be a global variable that we reuse.
+	cmd := bsonx.Doc{{"buildInfo", bsonx.Int32(1)}}
+	rdr, err := cmd.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: "admin.$cmd",
+		Flags:              wiremessage.SlaveOK,
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+	return query, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (bi *BuildInfo) Decode(wm wiremessage.WireMessage) *BuildInfo {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		bi.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return bi
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		bi.err = err
+		return bi
+	}
+	err = bson.Unmarshal(rdr, &bi.res)
+	if err != nil {
+		bi.err = err
+		return bi
+	}
+	return bi
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (bi *BuildInfo) Result() (result.BuildInfo, error) {
+	if bi.err != nil {
+		return result.BuildInfo{}, bi.err
+	}
+
+	return bi.res, nil
+}
+
+// Err returns the error set on this command.
+func (bi *BuildInfo) Err() error { return bi.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (bi *BuildInfo) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.BuildInfo, error) {
+	wm, err := bi.Encode()
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return result.BuildInfo{}, err
+	}
+	return bi.Decode(wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go
new file mode 100644
index 0000000..859f797
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/command.go
@@ -0,0 +1,708 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// WriteBatch represents a single batch for a write operation.
+type WriteBatch struct {
+	*Write
+	numDocs int
+}
+
+// DecodeError attempts to decode the wiremessage as an error
+func DecodeError(wm wiremessage.WireMessage) error {
+	var rdr bson.Raw
+	switch msg := wm.(type) {
+	case wiremessage.Msg:
+		for _, section := range msg.Sections {
+			switch converted := section.(type) {
+			case wiremessage.SectionBody:
+				rdr = converted.Document
+			}
+		}
+	case wiremessage.Reply:
+		if msg.ResponseFlags&wiremessage.QueryFailure != wiremessage.QueryFailure {
+			return nil
+		}
+		rdr = msg.Documents[0]
+	}
+
+	err := rdr.Validate()
+	if err != nil {
+		return nil
+	}
+
+	extractedError := extractError(rdr)
+
+	// If parsed successfully return the error
+	if _, ok := extractedError.(Error); ok {
+		return err
+	}
+
+	return nil
+}
+
+// helper method to extract an error from a reader if there is one; first returned item is the
+// error if it exists, the second holds parsing errors
+func extractError(rdr bson.Raw) error {
+	var errmsg, codeName string
+	var code int32
+	var labels []string
+	elems, err := rdr.Elements()
+	if err != nil {
+		return err
+	}
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "ok":
+			switch elem.Value().Type {
+			case bson.TypeInt32:
+				if elem.Value().Int32() == 1 {
+					return nil
+				}
+			case bson.TypeInt64:
+				if elem.Value().Int64() == 1 {
+					return nil
+				}
+			case bson.TypeDouble:
+				if elem.Value().Double() == 1 {
+					return nil
+				}
+			}
+		case "errmsg":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				errmsg = str
+			}
+		case "codeName":
+			if str, okay := elem.Value().StringValueOK(); okay {
+				codeName = str
+			}
+		case "code":
+			if c, okay := elem.Value().Int32OK(); okay {
+				code = c
+			}
+		case "errorLabels":
+			if arr, okay := elem.Value().ArrayOK(); okay {
+				elems, err := arr.Elements()
+				if err != nil {
+					continue
+				}
+				for _, elem := range elems {
+					if str, ok := elem.Value().StringValueOK(); ok {
+						labels = append(labels, str)
+					}
+				}
+
+			}
+		}
+	}
+
+	if errmsg == "" {
+		errmsg = "command failed"
+	}
+
+	return Error{
+		Code:    code,
+		Message: errmsg,
+		Name:    codeName,
+		Labels:  labels,
+	}
+}
+
+func responseClusterTime(response bson.Raw) bson.Raw {
+	clusterTime, err := response.LookupErr("$clusterTime")
+	if err != nil {
+		// $clusterTime not included by the server
+		return nil
+	}
+	idx, doc := bsoncore.AppendDocumentStart(nil)
+	doc = bsoncore.AppendHeader(doc, clusterTime.Type, "$clusterTime")
+	doc = append(doc, clusterTime.Value...)
+	doc, _ = bsoncore.AppendDocumentEnd(doc, idx)
+	return doc
+}
+
+func updateClusterTimes(sess *session.Client, clock *session.ClusterClock, response bson.Raw) error {
+	clusterTime := responseClusterTime(response)
+	if clusterTime == nil {
+		return nil
+	}
+
+	if sess != nil {
+		err := sess.AdvanceClusterTime(clusterTime)
+		if err != nil {
+			return err
+		}
+	}
+
+	if clock != nil {
+		clock.AdvanceClusterTime(clusterTime)
+	}
+
+	return nil
+}
+
+func updateOperationTime(sess *session.Client, response bson.Raw) error {
+	if sess == nil {
+		return nil
+	}
+
+	opTimeElem, err := response.LookupErr("operationTime")
+	if err != nil {
+		// operationTime not included by the server
+		return nil
+	}
+
+	t, i := opTimeElem.Timestamp()
+	return sess.AdvanceOperationTime(&primitive.Timestamp{
+		T: t,
+		I: i,
+	})
+}
+
+func marshalCommand(cmd bsonx.Doc) (bson.Raw, error) {
+	if cmd == nil {
+		return bson.Raw{5, 0, 0, 0, 0}, nil
+	}
+
+	return cmd.MarshalBSON()
+}
+
+// adds session related fields to a BSON doc representing a command
+func addSessionFields(cmd bsonx.Doc, desc description.SelectedServer, client *session.Client) (bsonx.Doc, error) {
+	if client == nil || !description.SessionsSupported(desc.WireVersion) || desc.SessionTimeoutMinutes == 0 {
+		return cmd, nil
+	}
+
+	if client.Terminated {
+		return cmd, session.ErrSessionEnded
+	}
+
+	if _, err := cmd.LookupElementErr("lsid"); err != nil {
+		cmd = cmd.Delete("lsid")
+	}
+
+	cmd = append(cmd, bsonx.Elem{"lsid", bsonx.Document(client.SessionID)})
+
+	if client.TransactionRunning() ||
+		client.RetryingCommit {
+		cmd = addTransaction(cmd, client)
+	}
+
+	client.ApplyCommand() // advance the state machine based on a command executing
+
+	return cmd, nil
+}
+
+// if in a transaction, add the transaction fields
+func addTransaction(cmd bsonx.Doc, client *session.Client) bsonx.Doc {
+	cmd = append(cmd, bsonx.Elem{"txnNumber", bsonx.Int64(client.TxnNumber)})
+	if client.TransactionStarting() {
+		// When starting transaction, always transition to the next state, even on error
+		cmd = append(cmd, bsonx.Elem{"startTransaction", bsonx.Boolean(true)})
+	}
+	return append(cmd, bsonx.Elem{"autocommit", bsonx.Boolean(false)})
+}
+
+func addClusterTime(cmd bsonx.Doc, desc description.SelectedServer, sess *session.Client, clock *session.ClusterClock) bsonx.Doc {
+	if (clock == nil && sess == nil) || !description.SessionsSupported(desc.WireVersion) {
+		return cmd
+	}
+
+	var clusterTime bson.Raw
+	if clock != nil {
+		clusterTime = clock.GetClusterTime()
+	}
+
+	if sess != nil {
+		if clusterTime == nil {
+			clusterTime = sess.ClusterTime
+		} else {
+			clusterTime = session.MaxClusterTime(clusterTime, sess.ClusterTime)
+		}
+	}
+
+	if clusterTime == nil {
+		return cmd
+	}
+
+	d, err := bsonx.ReadDoc(clusterTime)
+	if err != nil {
+		return cmd // broken clusterTime
+	}
+
+	cmd = cmd.Delete("$clusterTime")
+
+	return append(cmd, d...)
+}
+
+// add a read concern to a BSON doc representing a command
+func addReadConcern(cmd bsonx.Doc, desc description.SelectedServer, rc *readconcern.ReadConcern, sess *session.Client) (bsonx.Doc, error) {
+	// Starting transaction's read concern overrides all others
+	if sess != nil && sess.TransactionStarting() && sess.CurrentRc != nil {
+		rc = sess.CurrentRc
+	}
+
+	// start transaction must append afterclustertime IF causally consistent and operation time exists
+	if rc == nil && sess != nil && sess.TransactionStarting() && sess.Consistent && sess.OperationTime != nil {
+		rc = readconcern.New()
+	}
+
+	if rc == nil {
+		return cmd, nil
+	}
+
+	t, data, err := rc.MarshalBSONValue()
+	if err != nil {
+		return cmd, err
+	}
+
+	var rcDoc bsonx.Doc
+	err = rcDoc.UnmarshalBSONValue(t, data)
+	if err != nil {
+		return cmd, err
+	}
+	if description.SessionsSupported(desc.WireVersion) && sess != nil && sess.Consistent && sess.OperationTime != nil {
+		rcDoc = append(rcDoc, bsonx.Elem{"afterClusterTime", bsonx.Timestamp(sess.OperationTime.T, sess.OperationTime.I)})
+	}
+
+	cmd = cmd.Delete("readConcern")
+
+	if len(rcDoc) != 0 {
+		cmd = append(cmd, bsonx.Elem{"readConcern", bsonx.Document(rcDoc)})
+	}
+	return cmd, nil
+}
+
+// add a write concern to a BSON doc representing a command
+func addWriteConcern(cmd bsonx.Doc, wc *writeconcern.WriteConcern) (bsonx.Doc, error) {
+	if wc == nil {
+		return cmd, nil
+	}
+
+	t, data, err := wc.MarshalBSONValue()
+	if err != nil {
+		if err == writeconcern.ErrEmptyWriteConcern {
+			return cmd, nil
+		}
+		return cmd, err
+	}
+
+	var xval bsonx.Val
+	err = xval.UnmarshalBSONValue(t, data)
+	if err != nil {
+		return cmd, err
+	}
+
+	// delete if doc already has write concern
+	cmd = cmd.Delete("writeConcern")
+
+	return append(cmd, bsonx.Elem{Key: "writeConcern", Value: xval}), nil
+}
+
+// Get the error labels from a command response
+func getErrorLabels(rdr *bson.Raw) ([]string, error) {
+	var labels []string
+	labelsElem, err := rdr.LookupErr("errorLabels")
+	if err != bsoncore.ErrElementNotFound {
+		return nil, err
+	}
+	if labelsElem.Type == bsontype.Array {
+		labelsIt, err := labelsElem.Array().Elements()
+		if err != nil {
+			return nil, err
+		}
+		for _, elem := range labelsIt {
+			labels = append(labels, elem.Value().StringValue())
+		}
+	}
+	return labels, nil
+}
+
+// Remove command arguments for insert, update, and delete commands from the BSON document so they can be encoded
+// as a Section 1 payload in OP_MSG
+func opmsgRemoveArray(cmd bsonx.Doc) (bsonx.Doc, bsonx.Arr, string) {
+	var array bsonx.Arr
+	var id string
+
+	keys := []string{"documents", "updates", "deletes"}
+
+	for _, key := range keys {
+		val, err := cmd.LookupErr(key)
+		if err != nil {
+			continue
+		}
+
+		array = val.Array()
+		cmd = cmd.Delete(key)
+		id = key
+		break
+	}
+
+	return cmd, array, id
+}
+
+// Add the $db and $readPreference keys to the command
+// If the command has no read preference, pass nil for rpDoc
+func opmsgAddGlobals(cmd bsonx.Doc, dbName string, rpDoc bsonx.Doc) (bson.Raw, error) {
+	cmd = append(cmd, bsonx.Elem{"$db", bsonx.String(dbName)})
+	if rpDoc != nil {
+		cmd = append(cmd, bsonx.Elem{"$readPreference", bsonx.Document(rpDoc)})
+	}
+
+	return cmd.MarshalBSON() // bsonx.Doc.MarshalBSON never returns an error.
+}
+
+func opmsgCreateDocSequence(arr bsonx.Arr, identifier string) (wiremessage.SectionDocumentSequence, error) {
+	docSequence := wiremessage.SectionDocumentSequence{
+		PayloadType: wiremessage.DocumentSequence,
+		Identifier:  identifier,
+		Documents:   make([]bson.Raw, 0, len(arr)),
+	}
+
+	for _, val := range arr {
+		d, _ := val.Document().MarshalBSON()
+		docSequence.Documents = append(docSequence.Documents, d)
+	}
+
+	docSequence.Size = int32(docSequence.PayloadLen())
+	return docSequence, nil
+}
+
+func splitBatches(docs []bsonx.Doc, maxCount, targetBatchSize int) ([][]bsonx.Doc, error) {
+	batches := [][]bsonx.Doc{}
+
+	if targetBatchSize > reservedCommandBufferBytes {
+		targetBatchSize -= reservedCommandBufferBytes
+	}
+
+	if maxCount <= 0 {
+		maxCount = 1
+	}
+
+	startAt := 0
+splitInserts:
+	for {
+		size := 0
+		batch := []bsonx.Doc{}
+	assembleBatch:
+		for idx := startAt; idx < len(docs); idx++ {
+			raw, _ := docs[idx].MarshalBSON()
+
+			if len(raw) > targetBatchSize {
+				return nil, ErrDocumentTooLarge
+			}
+			if size+len(raw) > targetBatchSize {
+				break assembleBatch
+			}
+
+			size += len(raw)
+			batch = append(batch, docs[idx])
+			startAt++
+			if len(batch) == maxCount {
+				break assembleBatch
+			}
+		}
+		batches = append(batches, batch)
+		if startAt == len(docs) {
+			break splitInserts
+		}
+	}
+
+	return batches, nil
+}
+
+func encodeBatch(
+	docs []bsonx.Doc,
+	opts []bsonx.Elem,
+	cmdKind WriteCommandKind,
+	collName string,
+) (bsonx.Doc, error) {
+	var cmdName string
+	var docString string
+
+	switch cmdKind {
+	case InsertCommand:
+		cmdName = "insert"
+		docString = "documents"
+	case UpdateCommand:
+		cmdName = "update"
+		docString = "updates"
+	case DeleteCommand:
+		cmdName = "delete"
+		docString = "deletes"
+	}
+
+	cmd := bsonx.Doc{{cmdName, bsonx.String(collName)}}
+
+	vals := make(bsonx.Arr, 0, len(docs))
+	for _, doc := range docs {
+		vals = append(vals, bsonx.Document(doc))
+	}
+	cmd = append(cmd, bsonx.Elem{docString, bsonx.Array(vals)})
+	cmd = append(cmd, opts...)
+
+	return cmd, nil
+}
+
+// converts batches of Write Commands to wire messages
+func batchesToWireMessage(batches []*WriteBatch, desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	wms := make([]wiremessage.WireMessage, len(batches))
+	for _, cmd := range batches {
+		wm, err := cmd.Encode(desc)
+		if err != nil {
+			return nil, err
+		}
+
+		wms = append(wms, wm)
+	}
+
+	return wms, nil
+}
+
+// Roundtrips the write batches, returning the result structs (as interface),
+// the write batches that weren't round tripped and any errors
+func roundTripBatches(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+	batches []*WriteBatch,
+	continueOnError bool,
+	sess *session.Client,
+	cmdKind WriteCommandKind,
+) (interface{}, []*WriteBatch, error) {
+	var res interface{}
+	var upsertIndex int64 // the operation index for the upserted IDs map
+
+	// hold onto txnNumber, reset it when loop exits to ensure reuse of same
+	// transaction number if retry is needed
+	var txnNumber int64
+	if sess != nil && sess.RetryWrite {
+		txnNumber = sess.TxnNumber
+	}
+	for j, cmd := range batches {
+		rdr, err := cmd.RoundTrip(ctx, desc, rw)
+		if err != nil {
+			if sess != nil && sess.RetryWrite {
+				sess.TxnNumber = txnNumber + int64(j)
+			}
+			return res, batches, err
+		}
+
+		// TODO can probably DRY up this code
+		switch cmdKind {
+		case InsertCommand:
+			if res == nil {
+				res = result.Insert{}
+			}
+
+			conv, _ := res.(result.Insert)
+			insertCmd := &Insert{}
+			r, err := insertCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return res, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.N += r.N
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+		case UpdateCommand:
+			if res == nil {
+				res = result.Update{}
+			}
+
+			conv, _ := res.(result.Update)
+			updateCmd := &Update{}
+			r, err := updateCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return conv, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.MatchedCount += r.MatchedCount
+			conv.ModifiedCount += r.ModifiedCount
+			for _, upsert := range r.Upserted {
+				conv.Upserted = append(conv.Upserted, result.Upsert{
+					Index: upsert.Index + upsertIndex,
+					ID:    upsert.ID,
+				})
+			}
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+			upsertIndex += int64(cmd.numDocs)
+		case DeleteCommand:
+			if res == nil {
+				res = result.Delete{}
+			}
+
+			conv, _ := res.(result.Delete)
+			deleteCmd := &Delete{}
+			r, err := deleteCmd.decode(desc, rdr).Result()
+			if err != nil {
+				return conv, batches, err
+			}
+
+			conv.WriteErrors = append(conv.WriteErrors, r.WriteErrors...)
+
+			if r.WriteConcernError != nil {
+				conv.WriteConcernError = r.WriteConcernError
+				if sess != nil && sess.RetryWrite {
+					sess.TxnNumber = txnNumber
+					return conv, batches, nil // report writeconcernerror for retry
+				}
+			}
+
+			conv.N += r.N
+
+			if !continueOnError && len(conv.WriteErrors) > 0 {
+				return conv, batches, nil
+			}
+
+			res = conv
+		}
+
+		// Increment txnNumber for each batch
+		if sess != nil && sess.RetryWrite {
+			sess.IncrementTxnNumber()
+			batches = batches[1:] // if batch encoded successfully, remove it from the slice
+		}
+	}
+
+	if sess != nil && sess.RetryWrite {
+		// if retryable write succeeded, transaction number will be incremented one extra time,
+		// so we decrement it here
+		sess.TxnNumber--
+	}
+
+	return res, batches, nil
+}
+
+// get the firstBatch, cursor ID, and namespace from a bson.Raw
+func getCursorValues(result bson.Raw) ([]bson.RawValue, Namespace, int64, error) {
+	cur, err := result.LookupErr("cursor")
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+	if cur.Type != bson.TypeEmbeddedDocument {
+		return nil, Namespace{}, 0, fmt.Errorf("cursor should be an embedded document but it is a BSON %s", cur.Type)
+	}
+
+	elems, err := cur.Document().Elements()
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+
+	var ok bool
+	var arr bson.Raw
+	var namespace Namespace
+	var cursorID int64
+
+	for _, elem := range elems {
+		switch elem.Key() {
+		case "firstBatch":
+			arr, ok = elem.Value().ArrayOK()
+			if !ok {
+				return nil, Namespace{}, 0, fmt.Errorf("firstBatch should be an array but it is a BSON %s", elem.Value().Type)
+			}
+			if err != nil {
+				return nil, Namespace{}, 0, err
+			}
+		case "ns":
+			if elem.Value().Type != bson.TypeString {
+				return nil, Namespace{}, 0, fmt.Errorf("namespace should be a string but it is a BSON %s", elem.Value().Type)
+			}
+			namespace = ParseNamespace(elem.Value().StringValue())
+			err = namespace.Validate()
+			if err != nil {
+				return nil, Namespace{}, 0, err
+			}
+		case "id":
+			cursorID, ok = elem.Value().Int64OK()
+			if !ok {
+				return nil, Namespace{}, 0, fmt.Errorf("id should be an int64 but it is a BSON %s", elem.Value().Type)
+			}
+		}
+	}
+
+	vals, err := arr.Values()
+	if err != nil {
+		return nil, Namespace{}, 0, err
+	}
+
+	return vals, namespace, cursorID, nil
+}
+
+func getBatchSize(opts []bsonx.Elem) int32 {
+	for _, opt := range opts {
+		if opt.Key == "batchSize" {
+			return opt.Value.Int32()
+		}
+	}
+
+	return 0
+}
+
+// ErrUnacknowledgedWrite is returned from functions that have an unacknowledged
+// write concern.
+var ErrUnacknowledgedWrite = errors.New("unacknowledged write")
+
+// WriteCommandKind is the type of command represented by a Write
+type WriteCommandKind int8
+
+// These constants represent the valid types of write commands.
+const (
+	InsertCommand WriteCommandKind = iota
+	UpdateCommand
+	DeleteCommand
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go
new file mode 100644
index 0000000..e04bd6f
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/commit_transaction.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CommitTransaction represents the commitTransaction() command
+type CommitTransaction struct {
+	Session *session.Client
+	err     error
+	result  result.TransactionResult
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (ct *CommitTransaction) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := ct.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (ct *CommitTransaction) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"commitTransaction", bsonx.Int32(1)}}
+	return &Write{
+		DB:           "admin",
+		Command:      cmd,
+		Session:      ct.Session,
+		WriteConcern: ct.Session.CurrentWc,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (ct *CommitTransaction) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *CommitTransaction {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ct.err = err
+		return ct
+	}
+
+	return ct.decode(desc, rdr)
+}
+
+func (ct *CommitTransaction) decode(desc description.SelectedServer, rdr bson.Raw) *CommitTransaction {
+	ct.err = bson.Unmarshal(rdr, &ct.result)
+	if ct.err == nil && ct.result.WriteConcernError != nil {
+		ct.err = Error{
+			Code:    int32(ct.result.WriteConcernError.Code),
+			Message: ct.result.WriteConcernError.ErrMsg,
+		}
+	}
+	return ct
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ct *CommitTransaction) Result() (result.TransactionResult, error) {
+	if ct.err != nil {
+		return result.TransactionResult{}, ct.err
+	}
+
+	return ct.result, nil
+}
+
+// Err returns the error set on this command
+func (ct *CommitTransaction) Err() error {
+	return ct.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (ct *CommitTransaction) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.TransactionResult, error) {
+	cmd := ct.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.TransactionResult{}, err
+	}
+
+	return ct.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go
new file mode 100644
index 0000000..419a78b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count.go
@@ -0,0 +1,128 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Count represents the count command.
+//
+// The count command counts how many documents in a collection match the given query.
+type Count struct {
+	NS          Namespace
+	Query       bsonx.Doc
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result int64
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (c *Count) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := c.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (c *Count) encode(desc description.SelectedServer) (*Read, error) {
+	if err := c.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"count", bsonx.String(c.NS.Collection)}, {"query", bsonx.Document(c.Query)}}
+	command = append(command, c.Opts...)
+
+	return &Read{
+		Clock:       c.Clock,
+		DB:          c.NS.DB,
+		ReadPref:    c.ReadPref,
+		Command:     command,
+		ReadConcern: c.ReadConcern,
+		Session:     c.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (c *Count) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Count {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		c.err = err
+		return c
+	}
+
+	return c.decode(desc, rdr)
+}
+
+func (c *Count) decode(desc description.SelectedServer, rdr bson.Raw) *Count {
+	val, err := rdr.LookupErr("n")
+	switch {
+	case err == bsoncore.ErrElementNotFound:
+		c.err = errors.New("invalid response from server, no 'n' field")
+		return c
+	case err != nil:
+		c.err = err
+		return c
+	}
+
+	switch val.Type {
+	case bson.TypeInt32:
+		c.result = int64(val.Int32())
+	case bson.TypeInt64:
+		c.result = val.Int64()
+	case bson.TypeDouble:
+		c.result = int64(val.Double())
+	default:
+		c.err = errors.New("invalid response from server, value field is not a number")
+	}
+
+	return c
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (c *Count) Result() (int64, error) {
+	if c.err != nil {
+		return 0, c.err
+	}
+	return c.result, nil
+}
+
+// Err returns the error set on this command.
+func (c *Count) Err() error { return c.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (c *Count) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (int64, error) {
+	cmd, err := c.encode(desc)
+	if err != nil {
+		return 0, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return 0, err
+	}
+
+	return c.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go
new file mode 100644
index 0000000..a9a27f1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/count_documents.go
@@ -0,0 +1,122 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CountDocuments represents the CountDocuments command.
+//
+// The countDocuments command counts how many documents in a collection match the given query.
+type CountDocuments struct {
+	NS          Namespace
+	Pipeline    bsonx.Arr
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result int64
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (c *CountDocuments) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	if err := c.NS.Validate(); err != nil {
+		return nil, err
+	}
+	command := bsonx.Doc{{"aggregate", bsonx.String(c.NS.Collection)}, {"pipeline", bsonx.Array(c.Pipeline)}}
+
+	command = append(command, bsonx.Elem{"cursor", bsonx.Document(bsonx.Doc{})})
+	command = append(command, c.Opts...)
+
+	return (&Read{DB: c.NS.DB, ReadPref: c.ReadPref, Command: command}).Encode(desc)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (c *CountDocuments) Decode(ctx context.Context, desc description.SelectedServer, wm wiremessage.WireMessage) *CountDocuments {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		c.err = err
+		return c
+	}
+
+	cursor, err := rdr.LookupErr("cursor")
+	if err != nil || cursor.Type != bsontype.EmbeddedDocument {
+		c.err = errors.New("Invalid response from server, no 'cursor' field")
+		return c
+	}
+	batch, err := cursor.Document().LookupErr("firstBatch")
+	if err != nil || batch.Type != bsontype.Array {
+		c.err = errors.New("Invalid response from server, no 'firstBatch' field")
+		return c
+	}
+
+	elem, err := batch.Array().IndexErr(0)
+	if err != nil || elem.Value().Type != bsontype.EmbeddedDocument {
+		c.result = 0
+		return c
+	}
+
+	val, err := elem.Value().Document().LookupErr("n")
+	if err != nil {
+		c.err = errors.New("Invalid response from server, no 'n' field")
+		return c
+	}
+
+	switch val.Type {
+	case bsontype.Int32:
+		c.result = int64(val.Int32())
+	case bsontype.Int64:
+		c.result = val.Int64()
+	default:
+		c.err = errors.New("Invalid response from server, value field is not a number")
+	}
+
+	return c
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (c *CountDocuments) Result() (int64, error) {
+	if c.err != nil {
+		return 0, c.err
+	}
+	return c.result, nil
+}
+
+// Err returns the error set on this command.
+func (c *CountDocuments) Err() error { return c.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (c *CountDocuments) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (int64, error) {
+	wm, err := c.Encode(desc)
+	if err != nil {
+		return 0, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return 0, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return 0, err
+	}
+	return c.Decode(ctx, desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go
new file mode 100644
index 0000000..e2b33c3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/create_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// CreateIndexes represents the createIndexes command.
+//
+// The createIndexes command creates indexes for a namespace.
+type CreateIndexes struct {
+	NS           Namespace
+	Indexes      bsonx.Arr
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.CreateIndexes
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (ci *CreateIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := ci.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (ci *CreateIndexes) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{
+		{"createIndexes", bsonx.String(ci.NS.Collection)},
+		{"indexes", bsonx.Array(ci.Indexes)},
+	}
+	cmd = append(cmd, ci.Opts...)
+
+	write := &Write{
+		Clock:   ci.Clock,
+		DB:      ci.NS.DB,
+		Command: cmd,
+		Session: ci.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = ci.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (ci *CreateIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *CreateIndexes {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ci.err = err
+		return ci
+	}
+
+	return ci.decode(desc, rdr)
+}
+
+func (ci *CreateIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *CreateIndexes {
+	ci.err = bson.Unmarshal(rdr, &ci.result)
+	return ci
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ci *CreateIndexes) Result() (result.CreateIndexes, error) {
+	if ci.err != nil {
+		return result.CreateIndexes{}, ci.err
+	}
+	return ci.result, nil
+}
+
+// Err returns the error set on this command.
+func (ci *CreateIndexes) Err() error { return ci.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (ci *CreateIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.CreateIndexes, error) {
+	cmd, err := ci.encode(desc)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.CreateIndexes{}, err
+	}
+
+	return ci.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go
new file mode 100644
index 0000000..de20dd7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/delete.go
@@ -0,0 +1,154 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Delete represents the delete command.
+//
+// The delete command executes a delete with a given set of delete documents
+// and options.
+type Delete struct {
+	ContinueOnError bool
+	NS              Namespace
+	Deletes         []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Clock           *session.ClusterClock
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Delete
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Delete) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := d.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(d.batches, desc)
+}
+
+func (d *Delete) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(d.Deletes, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := d.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		d.batches = append(d.batches, cmd)
+	}
+
+	return nil
+}
+
+func (d *Delete) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	copyDocs := make([]bsonx.Doc, 0, len(docs))
+	for _, doc := range docs {
+		copyDocs = append(copyDocs, doc.Copy())
+	}
+
+	var options []bsonx.Elem
+	for _, opt := range d.Opts {
+		if opt.Key == "collation" {
+			for idx := range copyDocs {
+				copyDocs[idx] = append(copyDocs[idx], opt)
+			}
+		} else {
+			options = append(options, opt)
+		}
+	}
+
+	command, err := encodeBatch(copyDocs, options, DeleteCommand, d.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        d.Clock,
+			DB:           d.NS.DB,
+			Command:      command,
+			WriteConcern: d.WriteConcern,
+			Session:      d.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (d *Delete) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Delete {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		d.err = err
+		return d
+	}
+
+	return d.decode(desc, rdr)
+}
+
+func (d *Delete) decode(desc description.SelectedServer, rdr bson.Raw) *Delete {
+	d.err = bson.Unmarshal(rdr, &d.result)
+	return d
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (d *Delete) Result() (result.Delete, error) {
+	if d.err != nil {
+		return result.Delete{}, d.err
+	}
+	return d.result, nil
+}
+
+// Err returns the error set on this command.
+func (d *Delete) Err() error { return d.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (d *Delete) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Delete, error) {
+	if d.batches == nil {
+		if err := d.encode(desc); err != nil {
+			return result.Delete{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		d.batches,
+		d.ContinueOnError,
+		d.Session,
+		DeleteCommand,
+	)
+
+	if batches != nil {
+		d.batches = batches
+	}
+
+	if err != nil {
+		return result.Delete{}, err
+	}
+
+	return r.(result.Delete), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go
new file mode 100644
index 0000000..ba793e1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/distinct.go
@@ -0,0 +1,115 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Distinct represents the disctinct command.
+//
+// The distinct command returns the distinct values for a specified field
+// across a single collection.
+type Distinct struct {
+	NS          Namespace
+	Field       string
+	Query       bsonx.Doc
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result result.Distinct
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Distinct) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := d.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (d *Distinct) encode(desc description.SelectedServer) (*Read, error) {
+	if err := d.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"distinct", bsonx.String(d.NS.Collection)}, {"key", bsonx.String(d.Field)}}
+
+	if d.Query != nil {
+		command = append(command, bsonx.Elem{"query", bsonx.Document(d.Query)})
+	}
+
+	command = append(command, d.Opts...)
+
+	return &Read{
+		Clock:       d.Clock,
+		DB:          d.NS.DB,
+		ReadPref:    d.ReadPref,
+		Command:     command,
+		ReadConcern: d.ReadConcern,
+		Session:     d.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (d *Distinct) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Distinct {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		d.err = err
+		return d
+	}
+
+	return d.decode(desc, rdr)
+}
+
+func (d *Distinct) decode(desc description.SelectedServer, rdr bson.Raw) *Distinct {
+	d.err = bson.Unmarshal(rdr, &d.result)
+	return d
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (d *Distinct) Result() (result.Distinct, error) {
+	if d.err != nil {
+		return result.Distinct{}, d.err
+	}
+	return d.result, nil
+}
+
+// Err returns the error set on this command.
+func (d *Distinct) Err() error { return d.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (d *Distinct) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Distinct, error) {
+	cmd, err := d.encode(desc)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.Distinct{}, err
+	}
+
+	return d.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go
new file mode 100644
index 0000000..ea7a308
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/doc.go
@@ -0,0 +1,16 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package command contains abstractions for operations that can be performed against a MongoDB
+// deployment. The types in this package are meant to provide a general set of commands that a
+// user can run against a MongoDB database without knowing the version of the database.
+//
+// Each type consists of two levels of interaction. The lowest level are the Encode and Decode
+// methods. These are meant to be symmetric eventually, but currently only support the driver
+// side of commands. The higher level is the RoundTrip method. This only makes sense from the
+// driver side of commands and this method handles the encoding of the request and decoding of
+// the response using the given wiremessage.ReadWriter.
+package command
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go
new file mode 100644
index 0000000..c067ccc
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_collection.go
@@ -0,0 +1,101 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropCollection represents the drop command.
+//
+// The dropCollections command drops collection for a database.
+type DropCollection struct {
+	DB           string
+	Collection   string
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (dc *DropCollection) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := dc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (dc *DropCollection) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{{"drop", bsonx.String(dc.Collection)}}
+
+	write := &Write{
+		Clock:   dc.Clock,
+		DB:      dc.DB,
+		Command: cmd,
+		Session: dc.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = dc.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (dc *DropCollection) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropCollection {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		dc.err = err
+		return dc
+	}
+
+	return dc.decode(desc, rdr)
+}
+
+func (dc *DropCollection) decode(desc description.SelectedServer, rdr bson.Raw) *DropCollection {
+	dc.result = rdr
+	return dc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (dc *DropCollection) Result() (bson.Raw, error) {
+	if dc.err != nil {
+		return nil, dc.err
+	}
+
+	return dc.result, nil
+}
+
+// Err returns the error set on this command.
+func (dc *DropCollection) Err() error { return dc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (dc *DropCollection) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := dc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go
new file mode 100644
index 0000000..e3b3f7c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_database.go
@@ -0,0 +1,100 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropDatabase represents the DropDatabase command.
+//
+// The DropDatabases command drops database.
+type DropDatabase struct {
+	DB           string
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (dd *DropDatabase) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := dd.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (dd *DropDatabase) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{{"dropDatabase", bsonx.Int32(1)}}
+
+	write := &Write{
+		Clock:   dd.Clock,
+		DB:      dd.DB,
+		Command: cmd,
+		Session: dd.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = dd.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (dd *DropDatabase) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropDatabase {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		dd.err = err
+		return dd
+	}
+
+	return dd.decode(desc, rdr)
+}
+
+func (dd *DropDatabase) decode(desc description.SelectedServer, rdr bson.Raw) *DropDatabase {
+	dd.result = rdr
+	return dd
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (dd *DropDatabase) Result() (bson.Raw, error) {
+	if dd.err != nil {
+		return nil, dd.err
+	}
+
+	return dd.result, nil
+}
+
+// Err returns the error set on this command.
+func (dd *DropDatabase) Err() error { return dd.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (dd *DropDatabase) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := dd.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return dd.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go
new file mode 100644
index 0000000..bf6a7d6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/drop_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// DropIndexes represents the dropIndexes command.
+//
+// The dropIndexes command drops indexes for a namespace.
+type DropIndexes struct {
+	NS           Namespace
+	Index        string
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (di *DropIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := di.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (di *DropIndexes) encode(desc description.SelectedServer) (*Write, error) {
+	cmd := bsonx.Doc{
+		{"dropIndexes", bsonx.String(di.NS.Collection)},
+		{"index", bsonx.String(di.Index)},
+	}
+	cmd = append(cmd, di.Opts...)
+
+	write := &Write{
+		Clock:   di.Clock,
+		DB:      di.NS.DB,
+		Command: cmd,
+		Session: di.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 5 {
+		write.WriteConcern = di.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (di *DropIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *DropIndexes {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		di.err = err
+		return di
+	}
+
+	return di.decode(desc, rdr)
+}
+
+func (di *DropIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *DropIndexes {
+	di.result = rdr
+	return di
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (di *DropIndexes) Result() (bson.Raw, error) {
+	if di.err != nil {
+		return nil, di.err
+	}
+
+	return di.result, nil
+}
+
+// Err returns the error set on this command.
+func (di *DropIndexes) Err() error { return di.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (di *DropIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := di.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	di.result, err = cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return di.Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go
new file mode 100644
index 0000000..11ac140
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/end_sessions.go
@@ -0,0 +1,138 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// must be sent to admin db
+// { endSessions: [ {id: uuid}, ... ], $clusterTime: ... }
+// only send $clusterTime when gossiping the cluster time
+// send 10k sessions at a time
+
+// EndSessions represents an endSessions command.
+type EndSessions struct {
+	Clock      *session.ClusterClock
+	SessionIDs []bsonx.Doc
+
+	results []result.EndSessions
+	errors  []error
+}
+
+// BatchSize is the max number of sessions to be included in 1 endSessions command.
+const BatchSize = 10000
+
+func (es *EndSessions) split() [][]bsonx.Doc {
+	batches := [][]bsonx.Doc{}
+	docIndex := 0
+	totalNumDocs := len(es.SessionIDs)
+
+createBatches:
+	for {
+		batch := []bsonx.Doc{}
+
+		for i := 0; i < BatchSize; i++ {
+			if docIndex == totalNumDocs {
+				break createBatches
+			}
+
+			batch = append(batch, es.SessionIDs[docIndex])
+			docIndex++
+		}
+
+		batches = append(batches, batch)
+	}
+
+	return batches
+}
+
+func (es *EndSessions) encodeBatch(batch []bsonx.Doc, desc description.SelectedServer) *Write {
+	vals := make(bsonx.Arr, 0, len(batch))
+	for _, doc := range batch {
+		vals = append(vals, bsonx.Document(doc))
+	}
+
+	cmd := bsonx.Doc{{"endSessions", bsonx.Array(vals)}}
+
+	return &Write{
+		Clock:   es.Clock,
+		DB:      "admin",
+		Command: cmd,
+	}
+}
+
+// Encode will encode this command into a series of wire messages for the given server description.
+func (es *EndSessions) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	cmds := es.encode(desc)
+	wms := make([]wiremessage.WireMessage, len(cmds))
+
+	for _, cmd := range cmds {
+		wm, err := cmd.Encode(desc)
+		if err != nil {
+			return nil, err
+		}
+
+		wms = append(wms, wm)
+	}
+
+	return wms, nil
+}
+
+func (es *EndSessions) encode(desc description.SelectedServer) []*Write {
+	out := []*Write{}
+	batches := es.split()
+
+	for _, batch := range batches {
+		out = append(out, es.encodeBatch(batch, desc))
+	}
+
+	return out
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (es *EndSessions) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *EndSessions {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		es.errors = append(es.errors, err)
+		return es
+	}
+
+	return es.decode(desc, rdr)
+}
+
+func (es *EndSessions) decode(desc description.SelectedServer, rdr bson.Raw) *EndSessions {
+	var res result.EndSessions
+	es.errors = append(es.errors, bson.Unmarshal(rdr, &res))
+	es.results = append(es.results, res)
+	return es
+}
+
+// Result returns the results of the decoded wire messages.
+func (es *EndSessions) Result() ([]result.EndSessions, []error) {
+	return es.results, es.errors
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (es *EndSessions) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) ([]result.EndSessions, []error) {
+	cmds := es.encode(desc)
+
+	for _, cmd := range cmds {
+		rdr, _ := cmd.RoundTrip(ctx, desc, rw) // ignore any errors returned by the command
+		es.decode(desc, rdr)
+	}
+
+	return es.Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go
new file mode 100644
index 0000000..5ecb48c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/errors.go
@@ -0,0 +1,141 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+	"fmt"
+
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+var (
+	// ErrUnknownCommandFailure occurs when a command fails for an unknown reason.
+	ErrUnknownCommandFailure = errors.New("unknown command failure")
+	// ErrNoCommandResponse occurs when the server sent no response document to a command.
+	ErrNoCommandResponse = errors.New("no command response document")
+	// ErrMultiDocCommandResponse occurs when the server sent multiple documents in response to a command.
+	ErrMultiDocCommandResponse = errors.New("command returned multiple documents")
+	// ErrNoDocCommandResponse occurs when the server indicated a response existed, but none was found.
+	ErrNoDocCommandResponse = errors.New("command returned no documents")
+	// ErrDocumentTooLarge occurs when a document that is larger than the maximum size accepted by a
+	// server is passed to an insert command.
+	ErrDocumentTooLarge = errors.New("an inserted document is too large")
+	// ErrNonPrimaryRP occurs when a nonprimary read preference is used with a transaction.
+	ErrNonPrimaryRP = errors.New("read preference in a transaction must be primary")
+	// UnknownTransactionCommitResult is an error label for unknown transaction commit results.
+	UnknownTransactionCommitResult = "UnknownTransactionCommitResult"
+	// TransientTransactionError is an error label for transient errors with transactions.
+	TransientTransactionError = "TransientTransactionError"
+	// NetworkError is an error label for network errors.
+	NetworkError = "NetworkError"
+	// ReplyDocumentMismatch is an error label for OP_QUERY field mismatch errors.
+	ReplyDocumentMismatch = "malformed OP_REPLY: NumberReturned does not match number of documents returned"
+)
+
+var retryableCodes = []int32{11600, 11602, 10107, 13435, 13436, 189, 91, 7, 6, 89, 9001}
+
+// QueryFailureError is an error representing a command failure as a document.
+type QueryFailureError struct {
+	Message  string
+	Response bson.Raw
+}
+
+// Error implements the error interface.
+func (e QueryFailureError) Error() string {
+	return fmt.Sprintf("%s: %v", e.Message, e.Response)
+}
+
+// ResponseError is an error parsing the response to a command.
+type ResponseError struct {
+	Message string
+	Wrapped error
+}
+
+// NewCommandResponseError creates a CommandResponseError.
+func NewCommandResponseError(msg string, err error) ResponseError {
+	return ResponseError{Message: msg, Wrapped: err}
+}
+
+// Error implements the error interface.
+func (e ResponseError) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("%s: %s", e.Message, e.Wrapped)
+	}
+	return fmt.Sprintf("%s", e.Message)
+}
+
+// Error is a command execution error from the database.
+type Error struct {
+	Code    int32
+	Message string
+	Labels  []string
+	Name    string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	if e.Name != "" {
+		return fmt.Sprintf("(%v) %v", e.Name, e.Message)
+	}
+	return e.Message
+}
+
+// HasErrorLabel returns true if the error contains the specified label.
+func (e Error) HasErrorLabel(label string) bool {
+	if e.Labels != nil {
+		for _, l := range e.Labels {
+			if l == label {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// Retryable returns true if the error is retryable
+func (e Error) Retryable() bool {
+	for _, label := range e.Labels {
+		if label == NetworkError {
+			return true
+		}
+	}
+	for _, code := range retryableCodes {
+		if e.Code == code {
+			return true
+		}
+	}
+	if strings.Contains(e.Message, "not master") || strings.Contains(e.Message, "node is recovering") {
+		return true
+	}
+
+	return false
+}
+
+// IsWriteConcernErrorRetryable returns true if the write concern error is retryable.
+func IsWriteConcernErrorRetryable(wce *result.WriteConcernError) bool {
+	for _, code := range retryableCodes {
+		if int32(wce.Code) == code {
+			return true
+		}
+	}
+	if strings.Contains(wce.ErrMsg, "not master") || strings.Contains(wce.ErrMsg, "node is recovering") {
+		return true
+	}
+
+	return false
+}
+
+// IsNotFound indicates if the error is from a namespace not being found.
+func IsNotFound(err error) bool {
+	e, ok := err.(Error)
+	// need message check because legacy servers don't include the error code
+	return ok && (e.Code == 26 || e.Message == "ns not found")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go
new file mode 100644
index 0000000..e9d135e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find.go
@@ -0,0 +1,113 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Find represents the find command.
+//
+// The find command finds documents within a collection that match a filter.
+type Find struct {
+	NS          Namespace
+	Filter      bsonx.Doc
+	CursorOpts  []bsonx.Elem
+	Opts        []bsonx.Elem
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *Find) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *Find) encode(desc description.SelectedServer) (*Read, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{{"find", bsonx.String(f.NS.Collection)}}
+
+	if f.Filter != nil {
+		command = append(command, bsonx.Elem{"filter", bsonx.Document(f.Filter)})
+	}
+
+	command = append(command, f.Opts...)
+
+	return &Read{
+		Clock:       f.Clock,
+		DB:          f.NS.DB,
+		ReadPref:    f.ReadPref,
+		Command:     command,
+		ReadConcern: f.ReadConcern,
+		Session:     f.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *Find) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Find {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *Find) decode(desc description.SelectedServer, rdr bson.Raw) *Find {
+	f.result = rdr
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *Find) Result() (bson.Raw, error) {
+	if f.err != nil {
+		return nil, f.err
+	}
+
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *Find) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *Find) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go
new file mode 100644
index 0000000..260ee37
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_and_modify.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// unmarshalFindAndModifyResult turns the provided bson.Reader into a findAndModify result.
+func unmarshalFindAndModifyResult(rdr bson.Raw) (result.FindAndModify, error) {
+	var res result.FindAndModify
+
+	val, err := rdr.LookupErr("value")
+	switch {
+	case err == bsoncore.ErrElementNotFound:
+		return result.FindAndModify{}, errors.New("invalid response from server, no value field")
+	case err != nil:
+		return result.FindAndModify{}, err
+	}
+
+	switch val.Type {
+	case bson.TypeNull:
+	case bson.TypeEmbeddedDocument:
+		res.Value = val.Document()
+	default:
+		return result.FindAndModify{}, errors.New("invalid response from server, 'value' field is not a document")
+	}
+
+	if val, err := rdr.LookupErr("lastErrorObject", "updatedExisting"); err == nil {
+		b, ok := val.BooleanOK()
+		if ok {
+			res.LastErrorObject.UpdatedExisting = b
+		}
+	}
+
+	if val, err := rdr.LookupErr("lastErrorObject", "upserted"); err == nil {
+		oid, ok := val.ObjectIDOK()
+		if ok {
+			res.LastErrorObject.Upserted = oid
+		}
+	}
+	return res, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go
new file mode 100644
index 0000000..f5c36d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_delete.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndDelete represents the findOneAndDelete operation.
+//
+// The findOneAndDelete command deletes a single document that matches a query and returns it.
+type FindOneAndDelete struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndDelete) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndDelete) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"remove", bsonx.Boolean(true)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndDelete) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndDelete {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndDelete) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndDelete {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndDelete) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndDelete) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndDelete) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go
new file mode 100644
index 0000000..b3139f8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_replace.go
@@ -0,0 +1,112 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndReplace represents the findOneAndReplace operation.
+//
+// The findOneAndReplace command modifies and returns a single document.
+type FindOneAndReplace struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Replacement  bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndReplace) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndReplace) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"update", bsonx.Document(f.Replacement)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndReplace) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndReplace {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndReplace) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndReplace {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndReplace) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndReplace) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndReplace) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go
new file mode 100644
index 0000000..b90c7d0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/find_one_update.go
@@ -0,0 +1,112 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// FindOneAndUpdate represents the findOneAndUpdate operation.
+//
+// The findOneAndUpdate command modifies and returns a single document.
+type FindOneAndUpdate struct {
+	NS           Namespace
+	Query        bsonx.Doc
+	Update       bsonx.Doc
+	Opts         []bsonx.Elem
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result result.FindAndModify
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (f *FindOneAndUpdate) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (f *FindOneAndUpdate) encode(desc description.SelectedServer) (*Write, error) {
+	if err := f.NS.Validate(); err != nil {
+		return nil, err
+	}
+
+	command := bsonx.Doc{
+		{"findAndModify", bsonx.String(f.NS.Collection)},
+		{"query", bsonx.Document(f.Query)},
+		{"update", bsonx.Document(f.Update)},
+	}
+	command = append(command, f.Opts...)
+
+	write := &Write{
+		Clock:   f.Clock,
+		DB:      f.NS.DB,
+		Command: command,
+		Session: f.Session,
+	}
+	if desc.WireVersion != nil && desc.WireVersion.Max >= 4 {
+		write.WriteConcern = f.WriteConcern
+	}
+	return write, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (f *FindOneAndUpdate) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *FindOneAndUpdate {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		f.err = err
+		return f
+	}
+
+	return f.decode(desc, rdr)
+}
+
+func (f *FindOneAndUpdate) decode(desc description.SelectedServer, rdr bson.Raw) *FindOneAndUpdate {
+	f.result, f.err = unmarshalFindAndModifyResult(rdr)
+	return f
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (f *FindOneAndUpdate) Result() (result.FindAndModify, error) {
+	if f.err != nil {
+		return result.FindAndModify{}, f.err
+	}
+	return f.result, nil
+}
+
+// Err returns the error set on this command.
+func (f *FindOneAndUpdate) Err() error { return f.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (f *FindOneAndUpdate) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.FindAndModify, error) {
+	cmd, err := f.encode(desc)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.FindAndModify{}, err
+	}
+
+	return f.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go
new file mode 100644
index 0000000..ce016b1
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/get_more.go
@@ -0,0 +1,108 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GetMore represents the getMore command.
+//
+// The getMore command retrieves additional documents from a cursor.
+type GetMore struct {
+	ID      int64
+	NS      Namespace
+	Opts    []bsonx.Elem
+	Clock   *session.ClusterClock
+	Session *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (gm *GetMore) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd, err := gm.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return cmd.Encode(desc)
+}
+
+func (gm *GetMore) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{
+		{"getMore", bsonx.Int64(gm.ID)},
+		{"collection", bsonx.String(gm.NS.Collection)},
+	}
+
+	for _, opt := range gm.Opts {
+		switch opt.Key {
+		case "maxAwaitTimeMS":
+			cmd = append(cmd, bsonx.Elem{"maxTimeMs", opt.Value})
+		default:
+			cmd = append(cmd, opt)
+		}
+	}
+
+	return &Read{
+		Clock:   gm.Clock,
+		DB:      gm.NS.DB,
+		Command: cmd,
+		Session: gm.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (gm *GetMore) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *GetMore {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		gm.err = err
+		return gm
+	}
+
+	return gm.decode(desc, rdr)
+}
+
+func (gm *GetMore) decode(desc description.SelectedServer, rdr bson.Raw) *GetMore {
+	gm.result = rdr
+	return gm
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (gm *GetMore) Result() (bson.Raw, error) {
+	if gm.err != nil {
+		return nil, gm.err
+	}
+
+	return gm.result, nil
+}
+
+// Err returns the error set on this command.
+func (gm *GetMore) Err() error { return gm.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (gm *GetMore) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := gm.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return gm.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go
new file mode 100644
index 0000000..4f68c3c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/getlasterror.go
@@ -0,0 +1,111 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// GetLastError represents the getLastError command.
+//
+// The getLastError command is used for getting the last
+// error from the last command on a connection.
+//
+// Since GetLastError only makes sense in the context of
+// a single connection, there is no Dispatch method.
+type GetLastError struct {
+	Clock   *session.ClusterClock
+	Session *session.Client
+
+	err error
+	res result.GetLastError
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (gle *GetLastError) Encode() (wiremessage.WireMessage, error) {
+	encoded, err := gle.encode()
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(description.SelectedServer{})
+}
+
+func (gle *GetLastError) encode() (*Read, error) {
+	// This can probably just be a global variable that we reuse.
+	cmd := bsonx.Doc{{"getLastError", bsonx.Int32(1)}}
+
+	return &Read{
+		Clock:    gle.Clock,
+		DB:       "admin",
+		ReadPref: readpref.Secondary(),
+		Session:  gle.Session,
+		Command:  cmd,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (gle *GetLastError) Decode(wm wiremessage.WireMessage) *GetLastError {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		gle.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return gle
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		gle.err = err
+		return gle
+	}
+	return gle.decode(rdr)
+}
+
+func (gle *GetLastError) decode(rdr bson.Raw) *GetLastError {
+	err := bson.Unmarshal(rdr, &gle.res)
+	if err != nil {
+		gle.err = err
+		return gle
+	}
+
+	return gle
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (gle *GetLastError) Result() (result.GetLastError, error) {
+	if gle.err != nil {
+		return result.GetLastError{}, gle.err
+	}
+
+	return gle.res, nil
+}
+
+// Err returns the error set on this command.
+func (gle *GetLastError) Err() error { return gle.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (gle *GetLastError) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.GetLastError, error) {
+	cmd, err := gle.encode()
+	if err != nil {
+		return result.GetLastError{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, description.SelectedServer{}, rw)
+	if err != nil {
+		return result.GetLastError{}, err
+	}
+
+	return gle.decode(rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go
new file mode 100644
index 0000000..29eb103
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/handshake.go
@@ -0,0 +1,117 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"runtime"
+
+	"github.com/mongodb/mongo-go-driver/version"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Handshake represents a generic MongoDB Handshake. It calls isMaster and
+// buildInfo.
+//
+// The isMaster and buildInfo commands are used to build a server description.
+type Handshake struct {
+	Client             bsonx.Doc
+	Compressors        []string
+	SaslSupportedMechs string
+
+	ismstr result.IsMaster
+	err    error
+}
+
+// Encode will encode the handshake commands into a wire message containing isMaster
+func (h *Handshake) Encode() (wiremessage.WireMessage, error) {
+	var wm wiremessage.WireMessage
+	ismstr, err := (&IsMaster{
+		Client:             h.Client,
+		Compressors:        h.Compressors,
+		SaslSupportedMechs: h.SaslSupportedMechs,
+	}).Encode()
+	if err != nil {
+		return wm, err
+	}
+
+	wm = ismstr
+	return wm, nil
+}
+
+// Decode will decode the wire messages.
+// Errors during decoding are deferred until either the Result or Err methods
+// are called.
+func (h *Handshake) Decode(wm wiremessage.WireMessage) *Handshake {
+	h.ismstr, h.err = (&IsMaster{}).Decode(wm).Result()
+	if h.err != nil {
+		return h
+	}
+	return h
+}
+
+// Result returns the result of decoded wire messages.
+func (h *Handshake) Result(addr address.Address) (description.Server, error) {
+	if h.err != nil {
+		return description.Server{}, h.err
+	}
+	return description.NewServer(addr, h.ismstr), nil
+}
+
+// Err returns the error set on this Handshake.
+func (h *Handshake) Err() error { return h.err }
+
+// Handshake implements the connection.Handshaker interface. It is identical
+// to the RoundTrip methods on other types in this package. It will execute
+// the isMaster command.
+func (h *Handshake) Handshake(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+	wm, err := h.Encode()
+	if err != nil {
+		return description.Server{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return description.Server{}, err
+	}
+
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return description.Server{}, err
+	}
+	return h.Decode(wm).Result(addr)
+}
+
+// ClientDoc creates a client information document for use in an isMaster
+// command.
+func ClientDoc(app string) bsonx.Doc {
+	doc := bsonx.Doc{
+		{"driver",
+			bsonx.Document(bsonx.Doc{
+				{"name", bsonx.String("mongo-go-driver")},
+				{"version", bsonx.String(version.Driver)},
+			}),
+		},
+		{"os",
+			bsonx.Document(bsonx.Doc{
+				{"type", bsonx.String(runtime.GOOS)},
+				{"architecture", bsonx.String(runtime.GOARCH)},
+			}),
+		},
+		{"platform", bsonx.String(runtime.Version())},
+	}
+
+	if app != "" {
+		doc = append(doc, bsonx.Elem{"application", bsonx.Document(bsonx.Doc{{"name", bsonx.String(app)}})})
+	}
+
+	return doc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go
new file mode 100644
index 0000000..5059630
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/insert.go
@@ -0,0 +1,158 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// this is the amount of reserved buffer space in a message that the
+// driver reserves for command overhead.
+const reservedCommandBufferBytes = 16 * 10 * 10 * 10
+
+// Insert represents the insert command.
+//
+// The insert command inserts a set of documents into the database.
+//
+// Since the Insert command does not return any value other than ok or
+// an error, this type has no Err method.
+type Insert struct {
+	ContinueOnError bool
+	Clock           *session.ClusterClock
+	NS              Namespace
+	Docs            []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Insert
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (i *Insert) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := i.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(i.batches, desc)
+}
+
+func (i *Insert) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	command, err := encodeBatch(docs, i.Opts, InsertCommand, i.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	for _, opt := range i.Opts {
+		if opt.Key == "ordered" && !opt.Value.Boolean() {
+			i.ContinueOnError = true
+			break
+		}
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        i.Clock,
+			DB:           i.NS.DB,
+			Command:      command,
+			WriteConcern: i.WriteConcern,
+			Session:      i.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+func (i *Insert) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(i.Docs, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := i.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		i.batches = append(i.batches, cmd)
+	}
+	return nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (i *Insert) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Insert {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		i.err = err
+		return i
+	}
+
+	return i.decode(desc, rdr)
+}
+
+func (i *Insert) decode(desc description.SelectedServer, rdr bson.Raw) *Insert {
+	i.err = bson.Unmarshal(rdr, &i.result)
+	return i
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (i *Insert) Result() (result.Insert, error) {
+	if i.err != nil {
+		return result.Insert{}, i.err
+	}
+	return i.result, nil
+}
+
+// Err returns the error set on this command.
+func (i *Insert) Err() error { return i.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+//func (i *Insert) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.Insert, error) {
+func (i *Insert) RoundTrip(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+) (result.Insert, error) {
+	if i.batches == nil {
+		err := i.encode(desc)
+		if err != nil {
+			return result.Insert{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		i.batches,
+		i.ContinueOnError,
+		i.Session,
+		InsertCommand,
+	)
+
+	// if there are leftover batches, save them for retry
+	if batches != nil {
+		i.batches = batches
+	}
+
+	if err != nil {
+		return result.Insert{}, err
+	}
+
+	res := r.(result.Insert)
+	return res, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go
new file mode 100644
index 0000000..6bd8d09
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/ismaster.go
@@ -0,0 +1,121 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// IsMaster represents the isMaster command.
+//
+// The isMaster command is used for setting up a connection to MongoDB and
+// for monitoring a MongoDB server.
+//
+// Since IsMaster can only be run on a connection, there is no Dispatch method.
+type IsMaster struct {
+	Client             bsonx.Doc
+	Compressors        []string
+	SaslSupportedMechs string
+
+	err error
+	res result.IsMaster
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (im *IsMaster) Encode() (wiremessage.WireMessage, error) {
+	cmd := bsonx.Doc{{"isMaster", bsonx.Int32(1)}}
+	if im.Client != nil {
+		cmd = append(cmd, bsonx.Elem{"client", bsonx.Document(im.Client)})
+	}
+	if im.SaslSupportedMechs != "" {
+		cmd = append(cmd, bsonx.Elem{"saslSupportedMechs", bsonx.String(im.SaslSupportedMechs)})
+	}
+
+	// always send compressors even if empty slice
+	array := bsonx.Arr{}
+	for _, compressor := range im.Compressors {
+		array = append(array, bsonx.String(compressor))
+	}
+
+	cmd = append(cmd, bsonx.Elem{"compression", bsonx.Array(array)})
+
+	rdr, err := cmd.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: "admin.$cmd",
+		Flags:              wiremessage.SlaveOK,
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+	return query, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (im *IsMaster) Decode(wm wiremessage.WireMessage) *IsMaster {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		im.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return im
+	}
+	rdr, err := decodeCommandOpReply(reply)
+	if err != nil {
+		im.err = err
+		return im
+	}
+	err = bson.Unmarshal(rdr, &im.res)
+	if err != nil {
+		im.err = err
+		return im
+	}
+
+	// Reconstructs the $clusterTime doc after decode
+	if im.res.ClusterTime != nil {
+		im.res.ClusterTime = bsoncore.BuildDocument(nil, bsoncore.AppendDocumentElement(nil, "$clusterTime", im.res.ClusterTime))
+	}
+	return im
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (im *IsMaster) Result() (result.IsMaster, error) {
+	if im.err != nil {
+		return result.IsMaster{}, im.err
+	}
+
+	return im.res, nil
+}
+
+// Err returns the error set on this command.
+func (im *IsMaster) Err() error { return im.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (im *IsMaster) RoundTrip(ctx context.Context, rw wiremessage.ReadWriter) (result.IsMaster, error) {
+	wm, err := im.Encode()
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		return result.IsMaster{}, err
+	}
+	return im.Decode(wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go
new file mode 100644
index 0000000..37b1f89
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/kill_cursors.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// KillCursors represents the killCursors command.
+//
+// The killCursors command kills a set of cursors.
+type KillCursors struct {
+	Clock *session.ClusterClock
+	NS    Namespace
+	IDs   []int64
+
+	result result.KillCursors
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (kc *KillCursors) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := kc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (kc *KillCursors) encode(desc description.SelectedServer) (*Read, error) {
+	idVals := make([]bsonx.Val, 0, len(kc.IDs))
+	for _, id := range kc.IDs {
+		idVals = append(idVals, bsonx.Int64(id))
+	}
+	cmd := bsonx.Doc{
+		{"killCursors", bsonx.String(kc.NS.Collection)},
+		{"cursors", bsonx.Array(idVals)},
+	}
+
+	return &Read{
+		Clock:   kc.Clock,
+		DB:      kc.NS.DB,
+		Command: cmd,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (kc *KillCursors) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *KillCursors {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		kc.err = err
+		return kc
+	}
+	return kc.decode(desc, rdr)
+}
+
+func (kc *KillCursors) decode(desc description.SelectedServer, rdr bson.Raw) *KillCursors {
+	err := bson.Unmarshal(rdr, &kc.result)
+	if err != nil {
+		kc.err = err
+		return kc
+	}
+	return kc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (kc *KillCursors) Result() (result.KillCursors, error) {
+	if kc.err != nil {
+		return result.KillCursors{}, kc.err
+	}
+
+	return kc.result, nil
+}
+
+// Err returns the error set on this command.
+func (kc *KillCursors) Err() error { return kc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (kc *KillCursors) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.KillCursors, error) {
+	cmd, err := kc.encode(desc)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.KillCursors{}, err
+	}
+
+	return kc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go
new file mode 100644
index 0000000..0c3e76e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_collections.go
@@ -0,0 +1,102 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ListCollections represents the listCollections command.
+//
+// The listCollections command lists the collections in a database.
+type ListCollections struct {
+	Clock      *session.ClusterClock
+	DB         string
+	Filter     bsonx.Doc
+	CursorOpts []bsonx.Elem
+	Opts       []bsonx.Elem
+	ReadPref   *readpref.ReadPref
+	Session    *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (lc *ListCollections) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := lc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (lc *ListCollections) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listCollections", bsonx.Int32(1)}}
+
+	if lc.Filter != nil {
+		cmd = append(cmd, bsonx.Elem{"filter", bsonx.Document(lc.Filter)})
+	}
+	cmd = append(cmd, lc.Opts...)
+
+	return &Read{
+		Clock:    lc.Clock,
+		DB:       lc.DB,
+		Command:  cmd,
+		ReadPref: lc.ReadPref,
+		Session:  lc.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decolcng
+// are deferred until either the Result or Err methods are called.
+func (lc *ListCollections) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListCollections {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		lc.err = err
+		return lc
+	}
+	return lc.decode(desc, rdr)
+}
+
+func (lc *ListCollections) decode(desc description.SelectedServer, rdr bson.Raw) *ListCollections {
+	lc.result = rdr
+	return lc
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (lc *ListCollections) Result() (bson.Raw, error) {
+	if lc.err != nil {
+		return nil, lc.err
+	}
+	return lc.result, nil
+}
+
+// Err returns the error set on this command.
+func (lc *ListCollections) Err() error { return lc.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (lc *ListCollections) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := lc.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return nil, err
+	}
+
+	return lc.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go
new file mode 100644
index 0000000..d4fd843
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_databases.go
@@ -0,0 +1,98 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ListDatabases represents the listDatabases command.
+//
+// The listDatabases command lists the databases in a MongoDB deployment.
+type ListDatabases struct {
+	Clock   *session.ClusterClock
+	Filter  bsonx.Doc
+	Opts    []bsonx.Elem
+	Session *session.Client
+
+	result result.ListDatabases
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (ld *ListDatabases) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := ld.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (ld *ListDatabases) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listDatabases", bsonx.Int32(1)}}
+
+	if ld.Filter != nil {
+		cmd = append(cmd, bsonx.Elem{"filter", bsonx.Document(ld.Filter)})
+	}
+	cmd = append(cmd, ld.Opts...)
+
+	return &Read{
+		Clock:   ld.Clock,
+		DB:      "admin",
+		Command: cmd,
+		Session: ld.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (ld *ListDatabases) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListDatabases {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		ld.err = err
+		return ld
+	}
+	return ld.decode(desc, rdr)
+}
+
+func (ld *ListDatabases) decode(desc description.SelectedServer, rdr bson.Raw) *ListDatabases {
+	ld.err = bson.Unmarshal(rdr, &ld.result)
+	return ld
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ld *ListDatabases) Result() (result.ListDatabases, error) {
+	if ld.err != nil {
+		return result.ListDatabases{}, ld.err
+	}
+	return ld.result, nil
+}
+
+// Err returns the error set on this command.
+func (ld *ListDatabases) Err() error { return ld.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (ld *ListDatabases) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.ListDatabases, error) {
+	cmd, err := ld.encode(desc)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.ListDatabases{}, err
+	}
+
+	return ld.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go
new file mode 100644
index 0000000..48730ed
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/list_indexes.go
@@ -0,0 +1,106 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// ErrEmptyCursor is a signaling error when a cursor for list indexes is empty.
+var ErrEmptyCursor = errors.New("empty cursor")
+
+// ListIndexes represents the listIndexes command.
+//
+// The listIndexes command lists the indexes for a namespace.
+type ListIndexes struct {
+	Clock      *session.ClusterClock
+	NS         Namespace
+	CursorOpts []bsonx.Elem
+	Opts       []bsonx.Elem
+	Session    *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (li *ListIndexes) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	encoded, err := li.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+	return encoded.Encode(desc)
+}
+
+func (li *ListIndexes) encode(desc description.SelectedServer) (*Read, error) {
+	cmd := bsonx.Doc{{"listIndexes", bsonx.String(li.NS.Collection)}}
+	cmd = append(cmd, li.Opts...)
+
+	return &Read{
+		Clock:   li.Clock,
+		DB:      li.NS.DB,
+		Command: cmd,
+		Session: li.Session,
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoling
+// are deferred until either the Result or Err methods are called.
+func (li *ListIndexes) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *ListIndexes {
+	rdr, err := (&Read{}).Decode(desc, wm).Result()
+	if err != nil {
+		if IsNotFound(err) {
+			li.err = ErrEmptyCursor
+			return li
+		}
+		li.err = err
+		return li
+	}
+
+	return li.decode(desc, rdr)
+}
+
+func (li *ListIndexes) decode(desc description.SelectedServer, rdr bson.Raw) *ListIndexes {
+	li.result = rdr
+	return li
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (li *ListIndexes) Result() (bson.Raw, error) {
+	if li.err != nil {
+		return nil, li.err
+	}
+	return li.result, nil
+}
+
+// Err returns the error set on this command.
+func (li *ListIndexes) Err() error { return li.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (li *ListIndexes) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	cmd, err := li.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		if IsNotFound(err) {
+			return nil, ErrEmptyCursor
+		}
+		return nil, err
+	}
+
+	return li.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go
new file mode 100644
index 0000000..2a4f413
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/namespace.go
@@ -0,0 +1,79 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"errors"
+	"strings"
+)
+
+// Namespace encapsulates a database and collection name, which together
+// uniquely identifies a collection within a MongoDB cluster.
+type Namespace struct {
+	DB         string
+	Collection string
+}
+
+// NewNamespace returns a new Namespace for the
+// given database and collection.
+func NewNamespace(db, collection string) Namespace { return Namespace{DB: db, Collection: collection} }
+
+// ParseNamespace parses a namespace string into a Namespace.
+//
+// The namespace string must contain at least one ".", the first of which is the separator
+// between the database and collection names.  If not, the default (invalid) Namespace is returned.
+func ParseNamespace(name string) Namespace {
+	index := strings.Index(name, ".")
+	if index == -1 {
+		return Namespace{}
+	}
+
+	return Namespace{
+		DB:         name[:index],
+		Collection: name[index+1:],
+	}
+}
+
+// FullName returns the full namespace string, which is the result of joining the database
+// name and the collection name with a "." character.
+func (ns *Namespace) FullName() string {
+	return strings.Join([]string{ns.DB, ns.Collection}, ".")
+}
+
+// Validate validates the namespace.
+func (ns *Namespace) Validate() error {
+	if err := ns.validateDB(); err != nil {
+		return err
+	}
+
+	return ns.validateCollection()
+}
+
+// validateDB ensures the database name is not an empty string, contain a ".",
+// or contain a " ".
+func (ns *Namespace) validateDB() error {
+	if ns.DB == "" {
+		return errors.New("database name cannot be empty")
+	}
+	if strings.Contains(ns.DB, " ") {
+		return errors.New("database name cannot contain ' '")
+	}
+	if strings.Contains(ns.DB, ".") {
+		return errors.New("database name cannot contain '.'")
+	}
+
+	return nil
+}
+
+// validateCollection ensures the collection name is not an empty string.
+func (ns *Namespace) validateCollection() error {
+	if ns.Collection == "" {
+		return errors.New("collection name cannot be empty")
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go
new file mode 100644
index 0000000..c2d5952
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opmsg.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+func decodeCommandOpMsg(msg wiremessage.Msg) (bson.Raw, error) {
+	var mainDoc bsonx.Doc
+
+	for _, section := range msg.Sections {
+		switch converted := section.(type) {
+		case wiremessage.SectionBody:
+			err := mainDoc.UnmarshalBSON(converted.Document)
+			if err != nil {
+				return nil, err
+			}
+		case wiremessage.SectionDocumentSequence:
+			arr := bsonx.Arr{}
+			for _, doc := range converted.Documents {
+				newDoc := bsonx.Doc{}
+				err := newDoc.UnmarshalBSON(doc)
+				if err != nil {
+					return nil, err
+				}
+
+				arr = append(arr, bsonx.Document(newDoc))
+			}
+
+			mainDoc = append(mainDoc, bsonx.Elem{converted.Identifier, bsonx.Array(arr)})
+		}
+	}
+
+	byteArray, err := mainDoc.MarshalBSON()
+	if err != nil {
+		return nil, err
+	}
+
+	rdr := bson.Raw(byteArray)
+	err = rdr.Validate()
+	if err != nil {
+		return nil, NewCommandResponseError("malformed OP_MSG: invalid document", err)
+	}
+
+	err = extractError(rdr)
+	if err != nil {
+		return nil, err
+	}
+	return rdr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go
new file mode 100644
index 0000000..68c15ed
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/opreply.go
@@ -0,0 +1,43 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// decodeCommandOpReply handles decoding the OP_REPLY response to an OP_QUERY
+// command.
+func decodeCommandOpReply(reply wiremessage.Reply) (bson.Raw, error) {
+	if reply.NumberReturned == 0 {
+		return nil, ErrNoDocCommandResponse
+	}
+	if reply.NumberReturned > 1 {
+		return nil, ErrMultiDocCommandResponse
+	}
+	if len(reply.Documents) != 1 {
+		return nil, NewCommandResponseError("malformed OP_REPLY: NumberReturned does not match number of documents returned", nil)
+	}
+	rdr := reply.Documents[0]
+	err := rdr.Validate()
+	if err != nil {
+		return nil, NewCommandResponseError("malformed OP_REPLY: invalid document", err)
+	}
+	if reply.ResponseFlags&wiremessage.QueryFailure == wiremessage.QueryFailure {
+		return nil, QueryFailureError{
+			Message:  "command failure",
+			Response: reply.Documents[0],
+		}
+	}
+
+	err = extractError(rdr)
+	if err != nil {
+		return nil, err
+	}
+	return rdr, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go
new file mode 100644
index 0000000..d7b6547
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/read.go
@@ -0,0 +1,287 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"fmt"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/readconcern"
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Read represents a generic database read command.
+type Read struct {
+	DB          string
+	Command     bsonx.Doc
+	ReadPref    *readpref.ReadPref
+	ReadConcern *readconcern.ReadConcern
+	Clock       *session.ClusterClock
+	Session     *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+func (r *Read) createReadPref(serverKind description.ServerKind, topologyKind description.TopologyKind, isOpQuery bool) bsonx.Doc {
+	doc := bsonx.Doc{}
+	rp := r.ReadPref
+
+	if rp == nil {
+		if topologyKind == description.Single && serverKind != description.Mongos {
+			return append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+		}
+		return nil
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		if serverKind == description.Mongos {
+			return nil
+		}
+		if topologyKind == description.Single {
+			return append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+		}
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primary")})
+	case readpref.PrimaryPreferredMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("primaryPreferred")})
+	case readpref.SecondaryPreferredMode:
+		_, ok := r.ReadPref.MaxStaleness()
+		if serverKind == description.Mongos && isOpQuery && !ok && len(r.ReadPref.TagSets()) == 0 {
+			return nil
+		}
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondaryPreferred")})
+	case readpref.SecondaryMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("secondary")})
+	case readpref.NearestMode:
+		doc = append(doc, bsonx.Elem{"mode", bsonx.String("nearest")})
+	}
+
+	sets := make([]bsonx.Val, 0, len(r.ReadPref.TagSets()))
+	for _, ts := range r.ReadPref.TagSets() {
+		if len(ts) == 0 {
+			continue
+		}
+		set := bsonx.Doc{}
+		for _, t := range ts {
+			set = append(set, bsonx.Elem{t.Name, bsonx.String(t.Value)})
+		}
+		sets = append(sets, bsonx.Document(set))
+	}
+	if len(sets) > 0 {
+		doc = append(doc, bsonx.Elem{"tags", bsonx.Array(sets)})
+	}
+
+	if d, ok := r.ReadPref.MaxStaleness(); ok {
+		doc = append(doc, bsonx.Elem{"maxStalenessSeconds", bsonx.Int32(int32(d.Seconds()))})
+	}
+
+	return doc
+}
+
+// addReadPref will add a read preference to the query document.
+//
+// NOTE: This method must always return either a valid bson.Reader or an error.
+func (r *Read) addReadPref(rp *readpref.ReadPref, serverKind description.ServerKind, topologyKind description.TopologyKind, query bson.Raw) (bson.Raw, error) {
+	doc := r.createReadPref(serverKind, topologyKind, true)
+	if doc == nil {
+		return query, nil
+	}
+
+	qdoc := bsonx.Doc{}
+	err := bson.Unmarshal(query, &qdoc)
+	if err != nil {
+		return query, err
+	}
+	return bsonx.Doc{
+		{"$query", bsonx.Document(qdoc)},
+		{"$readPreference", bsonx.Document(doc)},
+	}.MarshalBSON()
+}
+
+// Encode r as OP_MSG
+func (r *Read) encodeOpMsg(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	msg := wiremessage.Msg{
+		MsgHeader: wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		Sections:  make([]wiremessage.Section, 0),
+	}
+
+	readPrefDoc := r.createReadPref(desc.Server.Kind, desc.Kind, false)
+	fullDocRdr, err := opmsgAddGlobals(cmd, r.DB, readPrefDoc)
+	if err != nil {
+		return nil, err
+	}
+
+	// type 0 doc
+	msg.Sections = append(msg.Sections, wiremessage.SectionBody{
+		PayloadType: wiremessage.SingleDocument,
+		Document:    fullDocRdr,
+	})
+
+	// no flags to add
+
+	return msg, nil
+}
+
+func (r *Read) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return wiremessage.SlaveOK
+	}
+
+	if r.ReadPref == nil {
+		// assume primary
+		return 0
+	}
+
+	if r.ReadPref.Mode() != readpref.PrimaryMode {
+		return wiremessage.SlaveOK
+	}
+
+	return 0
+}
+
+// Encode c as OP_QUERY
+func (r *Read) encodeOpQuery(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := marshalCommand(cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	if desc.Server.Kind == description.Mongos {
+		rdr, err = r.addReadPref(r.ReadPref, desc.Server.Kind, desc.Kind, rdr)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: r.DB + ".$cmd",
+		Flags:              r.slaveOK(desc),
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+
+	return query, nil
+}
+
+func (r *Read) decodeOpMsg(wm wiremessage.WireMessage) {
+	msg, ok := wm.(wiremessage.Msg)
+	if !ok {
+		r.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+
+	r.result, r.err = decodeCommandOpMsg(msg)
+}
+
+func (r *Read) decodeOpReply(wm wiremessage.WireMessage) {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		r.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+	r.result, r.err = decodeCommandOpReply(reply)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (r *Read) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := r.Command.Copy()
+	cmd, err := addReadConcern(cmd, desc, r.ReadConcern, r.Session)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd, err = addSessionFields(cmd, desc, r.Session)
+	if err != nil {
+		return nil, err
+	}
+
+	cmd = addClusterTime(cmd, desc, r.Session, r.Clock)
+
+	if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion {
+		return r.encodeOpQuery(desc, cmd)
+	}
+
+	return r.encodeOpMsg(desc, cmd)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (r *Read) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Read {
+	switch wm.(type) {
+	case wiremessage.Reply:
+		r.decodeOpReply(wm)
+	default:
+		r.decodeOpMsg(wm)
+	}
+
+	if r.err != nil {
+		// decode functions set error if an invalid response document was returned or if the OK flag in the response was 0
+		// if the OK flag was 0, a type Error is returned. otherwise, a special type is returned
+		if _, ok := r.err.(Error); !ok {
+			return r // for missing/invalid response docs, don't update cluster times
+		}
+	}
+
+	_ = updateClusterTimes(r.Session, r.Clock, r.result)
+	_ = updateOperationTime(r.Session, r.result)
+	return r
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (r *Read) Result() (bson.Raw, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+
+	return r.result, nil
+}
+
+// Err returns the error set on this command.
+func (r *Read) Err() error {
+	return r.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (r *Read) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	wm, err := r.Encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if r.Session != nil {
+		err = r.Session.UpdateUseTime()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return r.Decode(desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go
new file mode 100644
index 0000000..69758b8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/start_session.go
@@ -0,0 +1,82 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// StartSession represents a startSession command
+type StartSession struct {
+	Clock  *session.ClusterClock
+	result result.StartSession
+	err    error
+}
+
+// Encode will encode this command into a wiremessage for the given server description.
+func (ss *StartSession) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := ss.encode(desc)
+	return cmd.Encode(desc)
+}
+
+func (ss *StartSession) encode(desc description.SelectedServer) *Write {
+	cmd := bsonx.Doc{{"startSession", bsonx.Int32(1)}}
+	return &Write{
+		Clock:   ss.Clock,
+		DB:      "admin",
+		Command: cmd,
+	}
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding are deferred until
+// either the Result or Err methods are called.
+func (ss *StartSession) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *StartSession {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		ss.err = err
+		return ss
+	}
+
+	return ss.decode(desc, rdr)
+}
+
+func (ss *StartSession) decode(desc description.SelectedServer, rdr bson.Raw) *StartSession {
+	ss.err = bson.Unmarshal(rdr, &ss.result)
+	return ss
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (ss *StartSession) Result() (result.StartSession, error) {
+	if ss.err != nil {
+		return result.StartSession{}, ss.err
+	}
+
+	return ss.result, nil
+}
+
+// Err returns the error set on this command
+func (ss *StartSession) Err() error {
+	return ss.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter
+func (ss *StartSession) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (result.StartSession, error) {
+	cmd := ss.encode(desc)
+	rdr, err := cmd.RoundTrip(ctx, desc, rw)
+	if err != nil {
+		return result.StartSession{}, err
+	}
+
+	return ss.decode(desc, rdr).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go
new file mode 100644
index 0000000..29470ae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/update.go
@@ -0,0 +1,161 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Update represents the update command.
+//
+// The update command updates a set of documents with the database.
+type Update struct {
+	ContinueOnError bool
+	Clock           *session.ClusterClock
+	NS              Namespace
+	Docs            []bsonx.Doc
+	Opts            []bsonx.Elem
+	WriteConcern    *writeconcern.WriteConcern
+	Session         *session.Client
+
+	batches []*WriteBatch
+	result  result.Update
+	err     error
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (u *Update) Encode(desc description.SelectedServer) ([]wiremessage.WireMessage, error) {
+	err := u.encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	return batchesToWireMessage(u.batches, desc)
+}
+
+func (u *Update) encode(desc description.SelectedServer) error {
+	batches, err := splitBatches(u.Docs, int(desc.MaxBatchCount), int(desc.MaxDocumentSize))
+	if err != nil {
+		return err
+	}
+
+	for _, docs := range batches {
+		cmd, err := u.encodeBatch(docs, desc)
+		if err != nil {
+			return err
+		}
+
+		u.batches = append(u.batches, cmd)
+	}
+
+	return nil
+}
+
+func (u *Update) encodeBatch(docs []bsonx.Doc, desc description.SelectedServer) (*WriteBatch, error) {
+	copyDocs := make([]bsonx.Doc, 0, len(docs)) // copy of all the documents
+	for _, doc := range docs {
+		newDoc := doc.Copy()
+		copyDocs = append(copyDocs, newDoc)
+	}
+
+	var options []bsonx.Elem
+	for _, opt := range u.Opts {
+		switch opt.Key {
+		case "upsert", "collation", "arrayFilters":
+			// options that are encoded on each individual document
+			for idx := range copyDocs {
+				copyDocs[idx] = append(copyDocs[idx], opt)
+			}
+		default:
+			options = append(options, opt)
+		}
+	}
+
+	command, err := encodeBatch(copyDocs, options, UpdateCommand, u.NS.Collection)
+	if err != nil {
+		return nil, err
+	}
+
+	return &WriteBatch{
+		&Write{
+			Clock:        u.Clock,
+			DB:           u.NS.DB,
+			Command:      command,
+			WriteConcern: u.WriteConcern,
+			Session:      u.Session,
+		},
+		len(docs),
+	}, nil
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (u *Update) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Update {
+	rdr, err := (&Write{}).Decode(desc, wm).Result()
+	if err != nil {
+		u.err = err
+		return u
+	}
+	return u.decode(desc, rdr)
+}
+
+func (u *Update) decode(desc description.SelectedServer, rdr bson.Raw) *Update {
+	u.err = bson.Unmarshal(rdr, &u.result)
+	return u
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (u *Update) Result() (result.Update, error) {
+	if u.err != nil {
+		return result.Update{}, u.err
+	}
+	return u.result, nil
+}
+
+// Err returns the error set on this command.
+func (u *Update) Err() error { return u.err }
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriter.
+func (u *Update) RoundTrip(
+	ctx context.Context,
+	desc description.SelectedServer,
+	rw wiremessage.ReadWriter,
+) (result.Update, error) {
+	if u.batches == nil {
+		err := u.encode(desc)
+		if err != nil {
+			return result.Update{}, err
+		}
+	}
+
+	r, batches, err := roundTripBatches(
+		ctx, desc, rw,
+		u.batches,
+		u.ContinueOnError,
+		u.Session,
+		UpdateCommand,
+	)
+
+	// if there are leftover batches, save them for retry
+	if batches != nil {
+		u.batches = batches
+	}
+
+	if err != nil {
+		return result.Update{}, err
+	}
+
+	return r.(result.Update), nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go
new file mode 100644
index 0000000..3787fae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/command/write.go
@@ -0,0 +1,245 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package command
+
+import (
+	"context"
+	"fmt"
+
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/mongo/driver/session"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Write represents a generic write database command.
+// This can be used to send arbitrary write commands to the database.
+type Write struct {
+	DB           string
+	Command      bsonx.Doc
+	WriteConcern *writeconcern.WriteConcern
+	Clock        *session.ClusterClock
+	Session      *session.Client
+
+	result bson.Raw
+	err    error
+}
+
+// Encode c as OP_MSG
+func (w *Write) encodeOpMsg(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	var arr bsonx.Arr
+	var identifier string
+
+	cmd, arr, identifier = opmsgRemoveArray(cmd)
+
+	msg := wiremessage.Msg{
+		MsgHeader: wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		Sections:  make([]wiremessage.Section, 0),
+	}
+
+	fullDocRdr, err := opmsgAddGlobals(cmd, w.DB, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	// type 0 doc
+	msg.Sections = append(msg.Sections, wiremessage.SectionBody{
+		PayloadType: wiremessage.SingleDocument,
+		Document:    fullDocRdr,
+	})
+
+	// type 1 doc
+	if identifier != "" {
+		docSequence, err := opmsgCreateDocSequence(arr, identifier)
+		if err != nil {
+			return nil, err
+		}
+
+		msg.Sections = append(msg.Sections, docSequence)
+	}
+
+	// flags
+	if !writeconcern.AckWrite(w.WriteConcern) {
+		msg.FlagBits |= wiremessage.MoreToCome
+	}
+
+	return msg, nil
+}
+
+// Encode w as OP_QUERY
+func (w *Write) encodeOpQuery(desc description.SelectedServer, cmd bsonx.Doc) (wiremessage.WireMessage, error) {
+	rdr, err := marshalCommand(cmd)
+	if err != nil {
+		return nil, err
+	}
+
+	query := wiremessage.Query{
+		MsgHeader:          wiremessage.Header{RequestID: wiremessage.NextRequestID()},
+		FullCollectionName: w.DB + ".$cmd",
+		Flags:              w.slaveOK(desc),
+		NumberToReturn:     -1,
+		Query:              rdr,
+	}
+
+	return query, nil
+}
+
+func (w *Write) slaveOK(desc description.SelectedServer) wiremessage.QueryFlag {
+	if desc.Kind == description.Single && desc.Server.Kind != description.Mongos {
+		return wiremessage.SlaveOK
+	}
+
+	return 0
+}
+
+func (w *Write) decodeOpReply(wm wiremessage.WireMessage) {
+	reply, ok := wm.(wiremessage.Reply)
+	if !ok {
+		w.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+	w.result, w.err = decodeCommandOpReply(reply)
+}
+
+func (w *Write) decodeOpMsg(wm wiremessage.WireMessage) {
+	msg, ok := wm.(wiremessage.Msg)
+	if !ok {
+		w.err = fmt.Errorf("unsupported response wiremessage type %T", wm)
+		return
+	}
+
+	w.result, w.err = decodeCommandOpMsg(msg)
+}
+
+// Encode will encode this command into a wire message for the given server description.
+func (w *Write) Encode(desc description.SelectedServer) (wiremessage.WireMessage, error) {
+	cmd := w.Command.Copy()
+	var err error
+	if w.Session != nil && w.Session.TransactionStarting() {
+		// Starting transactions have a read concern, even in writes.
+		cmd, err = addReadConcern(cmd, desc, nil, w.Session)
+		if err != nil {
+			return nil, err
+		}
+	}
+	cmd, err = addWriteConcern(cmd, w.WriteConcern)
+	if err != nil {
+		return nil, err
+	}
+
+	if !writeconcern.AckWrite(w.WriteConcern) {
+		// unack write with explicit session --> raise an error
+		// unack write with implicit session --> do not send session ID (implicit session shouldn't have been created
+		// in the first place)
+
+		if w.Session != nil && w.Session.SessionType == session.Explicit {
+			return nil, errors.New("explicit sessions cannot be used with unacknowledged writes")
+		}
+	} else {
+		// only encode session ID for acknowledged writes
+		cmd, err = addSessionFields(cmd, desc, w.Session)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if w.Session != nil && w.Session.RetryWrite {
+		cmd = append(cmd, bsonx.Elem{"txnNumber", bsonx.Int64(w.Session.TxnNumber)})
+	}
+
+	cmd = addClusterTime(cmd, desc, w.Session, w.Clock)
+
+	if desc.WireVersion == nil || desc.WireVersion.Max < wiremessage.OpmsgWireVersion {
+		return w.encodeOpQuery(desc, cmd)
+	}
+
+	return w.encodeOpMsg(desc, cmd)
+}
+
+// Decode will decode the wire message using the provided server description. Errors during decoding
+// are deferred until either the Result or Err methods are called.
+func (w *Write) Decode(desc description.SelectedServer, wm wiremessage.WireMessage) *Write {
+	switch wm.(type) {
+	case wiremessage.Reply:
+		w.decodeOpReply(wm)
+	default:
+		w.decodeOpMsg(wm)
+	}
+
+	if w.err != nil {
+		if _, ok := w.err.(Error); !ok {
+			return w
+		}
+	}
+
+	_ = updateClusterTimes(w.Session, w.Clock, w.result)
+
+	if writeconcern.AckWrite(w.WriteConcern) {
+		// don't update session operation time for unacknowledged write
+		_ = updateOperationTime(w.Session, w.result)
+	}
+	return w
+}
+
+// Result returns the result of a decoded wire message and server description.
+func (w *Write) Result() (bson.Raw, error) {
+	if w.err != nil {
+		return nil, w.err
+	}
+
+	return w.result, nil
+}
+
+// Err returns the error set on this command.
+func (w *Write) Err() error {
+	return w.err
+}
+
+// RoundTrip handles the execution of this command using the provided wiremessage.ReadWriteCloser.
+func (w *Write) RoundTrip(ctx context.Context, desc description.SelectedServer, rw wiremessage.ReadWriter) (bson.Raw, error) {
+	wm, err := w.Encode(desc)
+	if err != nil {
+		return nil, err
+	}
+
+	err = rw.WriteWireMessage(ctx, wm)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if msg, ok := wm.(wiremessage.Msg); ok {
+		// don't expect response if using OP_MSG for an unacknowledged write
+		if msg.FlagBits&wiremessage.MoreToCome > 0 {
+			return nil, ErrUnacknowledgedWrite
+		}
+	}
+
+	wm, err = rw.ReadWireMessage(ctx)
+	if err != nil {
+		if _, ok := err.(Error); ok {
+			return nil, err
+		}
+		// Connection errors are transient
+		return nil, Error{Message: err.Error(), Labels: []string{TransientTransactionError, NetworkError}}
+	}
+
+	if w.Session != nil {
+		err = w.Session.UpdateUseTime()
+		if err != nil {
+			return nil, err
+		}
+	}
+	return w.Decode(desc, wm).Result()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go
new file mode 100644
index 0000000..5ec2ea0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/compressor/compression.go
@@ -0,0 +1,157 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package compressor
+
+import (
+	"bytes"
+	"compress/zlib"
+
+	"io"
+
+	"github.com/golang/snappy"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Compressor is the interface implemented by types that can compress and decompress wire messages. This is used
+// when sending and receiving messages to and from the server.
+type Compressor interface {
+	CompressBytes(src, dest []byte) ([]byte, error)
+	UncompressBytes(src, dest []byte) ([]byte, error)
+	CompressorID() wiremessage.CompressorID
+	Name() string
+}
+
+type writer struct {
+	buf []byte
+}
+
+// Write appends bytes to the writer
+func (w *writer) Write(p []byte) (n int, err error) {
+	index := len(w.buf)
+	if len(p) > cap(w.buf)-index {
+		buf := make([]byte, 2*cap(w.buf)+len(p))
+		copy(buf, w.buf)
+		w.buf = buf
+	}
+
+	w.buf = w.buf[:index+len(p)]
+	copy(w.buf[index:], p)
+	return len(p), nil
+}
+
+// SnappyCompressor uses the snappy method to compress data
+type SnappyCompressor struct {
+}
+
+// ZlibCompressor uses the zlib method to compress data
+type ZlibCompressor struct {
+	level      int
+	zlibWriter *zlib.Writer
+}
+
+// CompressBytes uses snappy to compress a slice of bytes.
+func (s *SnappyCompressor) CompressBytes(src, dest []byte) ([]byte, error) {
+	dest = dest[:0]
+	dest = snappy.Encode(dest, src)
+	return dest, nil
+}
+
+// UncompressBytes uses snappy to uncompress a slice of bytes.
+func (s *SnappyCompressor) UncompressBytes(src, dest []byte) ([]byte, error) {
+	var err error
+	dest, err = snappy.Decode(dest, src)
+	if err != nil {
+		return dest, err
+	}
+
+	return dest, nil
+}
+
+// CompressorID returns the ID for the snappy compressor.
+func (s *SnappyCompressor) CompressorID() wiremessage.CompressorID {
+	return wiremessage.CompressorSnappy
+}
+
+// Name returns the string name for the snappy compressor.
+func (s *SnappyCompressor) Name() string {
+	return "snappy"
+}
+
+// CompressBytes uses zlib to compress a slice of bytes.
+func (z *ZlibCompressor) CompressBytes(src, dest []byte) ([]byte, error) {
+	dest = dest[:0]
+	z.zlibWriter.Reset(&writer{
+		buf: dest,
+	})
+
+	_, err := z.zlibWriter.Write(src)
+	if err != nil {
+		_ = z.zlibWriter.Close()
+		return dest, err
+	}
+
+	err = z.zlibWriter.Close()
+	if err != nil {
+		return dest, err
+	}
+	return dest, nil
+}
+
+// UncompressBytes uses zlib to uncompress a slice of bytes. It assumes dest is empty and is the exact size that it
+// needs to be.
+func (z *ZlibCompressor) UncompressBytes(src, dest []byte) ([]byte, error) {
+	reader := bytes.NewReader(src)
+	zlibReader, err := zlib.NewReader(reader)
+
+	if err != nil {
+		return dest, err
+	}
+	defer func() {
+		_ = zlibReader.Close()
+	}()
+
+	_, err = io.ReadFull(zlibReader, dest)
+	if err != nil {
+		return dest, err
+	}
+
+	return dest, nil
+}
+
+// CompressorID returns the ID for the zlib compressor.
+func (z *ZlibCompressor) CompressorID() wiremessage.CompressorID {
+	return wiremessage.CompressorZLib
+}
+
+// Name returns the name for the zlib compressor.
+func (z *ZlibCompressor) Name() string {
+	return "zlib"
+}
+
+// CreateSnappy creates a snappy compressor
+func CreateSnappy() Compressor {
+	return &SnappyCompressor{}
+}
+
+// CreateZlib creates a zlib compressor
+func CreateZlib(level int) (Compressor, error) {
+	if level < 0 {
+		level = wiremessage.DefaultZlibLevel
+	}
+
+	var compressBuf bytes.Buffer
+	zlibWriter, err := zlib.NewWriterLevel(&compressBuf, level)
+
+	if err != nil {
+		return &ZlibCompressor{}, err
+	}
+
+	return &ZlibCompressor{
+		level:      level,
+		zlibWriter: zlibWriter,
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go
new file mode 100644
index 0000000..4d8976d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/addr.go
@@ -0,0 +1,21 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+const defaultPort = "27017"
+
+// Addr is a network address. It can be either an IP address or a DNS name.
+type Addr string
+
+// Network is the network protcol for this address. In most cases this will be "tcp" or "unix".
+func (Addr) Network() string { return "" }
+
+// String is the canonical version of this address, e.g. localhost:27017, 1.2.3.4:27017, example.com:27017
+func (Addr) String() string { return "" }
+
+// Canonicalize creates a canonicalized address.
+func (Addr) Canonicalize() Addr { return Addr("") }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go
new file mode 100644
index 0000000..222f6f7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/command_metadata.go
@@ -0,0 +1,28 @@
+package connection
+
+import "time"
+
+// commandMetadata contains metadata about a command sent to the server.
+type commandMetadata struct {
+	Name               string
+	Time               time.Time
+	Legacy             bool
+	FullCollectionName string
+}
+
+// createMetadata creates metadata for a command.
+func createMetadata(name string, legacy bool, fullCollName string) *commandMetadata {
+	return &commandMetadata{
+		Name:               name,
+		Time:               time.Now(),
+		Legacy:             legacy,
+		FullCollectionName: fullCollName,
+	}
+}
+
+// TimeDifference returns the difference between now and the time a command was sent in nanoseconds.
+func (cm *commandMetadata) TimeDifference() int64 {
+	t := time.Now()
+	duration := t.Sub(cm.Time)
+	return duration.Nanoseconds()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go
new file mode 100644
index 0000000..5e64037
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/connection.go
@@ -0,0 +1,851 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package connection contains the types for building and pooling connections that can speak the
+// MongoDB Wire Protocol. Since this low level library is meant to be used in the context of either
+// a driver or a server there are some extra identifiers on a connection so one can keep track of
+// what a connection is. This package purposefully hides the underlying network and abstracts the
+// writing to and reading from a connection to wireops.Op's. This package also provides types for
+// listening for and accepting Connections, as well as some types for handling connections and
+// proxying connections to another server.
+package connection
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"strings"
+	"sync/atomic"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/bsontype"
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"github.com/mongodb/mongo-go-driver/x/bsonx/bsoncore"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+var globalClientConnectionID uint64
+var emptyDoc bson.Raw
+
+func nextClientConnectionID() uint64 {
+	return atomic.AddUint64(&globalClientConnectionID, 1)
+}
+
+// Connection is used to read and write wire protocol messages to a network.
+type Connection interface {
+	WriteWireMessage(context.Context, wiremessage.WireMessage) error
+	ReadWireMessage(context.Context) (wiremessage.WireMessage, error)
+	Close() error
+	Expired() bool
+	Alive() bool
+	ID() string
+}
+
+// Dialer is used to make network connections.
+type Dialer interface {
+	DialContext(ctx context.Context, network, address string) (net.Conn, error)
+}
+
+// DialerFunc is a type implemented by functions that can be used as a Dialer.
+type DialerFunc func(ctx context.Context, network, address string) (net.Conn, error)
+
+// DialContext implements the Dialer interface.
+func (df DialerFunc) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
+	return df(ctx, network, address)
+}
+
+// DefaultDialer is the Dialer implementation that is used by this package. Changing this
+// will also change the Dialer used for this package. This should only be changed why all
+// of the connections being made need to use a different Dialer. Most of the time, using a
+// WithDialer option is more appropriate than changing this variable.
+var DefaultDialer Dialer = &net.Dialer{}
+
+// Handshaker is the interface implemented by types that can perform a MongoDB
+// handshake over a provided ReadWriter. This is used during connection
+// initialization.
+type Handshaker interface {
+	Handshake(context.Context, address.Address, wiremessage.ReadWriter) (description.Server, error)
+}
+
+// HandshakerFunc is an adapter to allow the use of ordinary functions as
+// connection handshakers.
+type HandshakerFunc func(context.Context, address.Address, wiremessage.ReadWriter) (description.Server, error)
+
+// Handshake implements the Handshaker interface.
+func (hf HandshakerFunc) Handshake(ctx context.Context, addr address.Address, rw wiremessage.ReadWriter) (description.Server, error) {
+	return hf(ctx, addr, rw)
+}
+
+type connection struct {
+	addr        address.Address
+	id          string
+	conn        net.Conn
+	compressBuf []byte                // buffer to compress messages
+	compressor  compressor.Compressor // use for compressing messages
+	// server can compress response with any compressor supported by driver
+	compressorMap    map[wiremessage.CompressorID]compressor.Compressor
+	commandMap       map[int64]*commandMetadata // map for monitoring commands sent to server
+	dead             bool
+	idleTimeout      time.Duration
+	idleDeadline     time.Time
+	lifetimeDeadline time.Time
+	cmdMonitor       *event.CommandMonitor
+	readTimeout      time.Duration
+	uncompressBuf    []byte // buffer to uncompress messages
+	writeTimeout     time.Duration
+	readBuf          []byte
+	writeBuf         []byte
+	wireMessageBuf   []byte // buffer to store uncompressed wire message before compressing
+}
+
+// New opens a connection to a given Addr
+//
+// The server description returned is nil if there was no handshaker provided.
+func New(ctx context.Context, addr address.Address, opts ...Option) (Connection, *description.Server, error) {
+	cfg, err := newConfig(opts...)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	nc, err := cfg.dialer.DialContext(ctx, addr.Network(), addr.String())
+	if err != nil {
+		return nil, nil, err
+	}
+
+	if cfg.tlsConfig != nil {
+		tlsConfig := cfg.tlsConfig.Clone()
+		nc, err = configureTLS(ctx, nc, addr, tlsConfig)
+		if err != nil {
+			return nil, nil, err
+		}
+	}
+
+	var lifetimeDeadline time.Time
+	if cfg.lifeTimeout > 0 {
+		lifetimeDeadline = time.Now().Add(cfg.lifeTimeout)
+	}
+
+	id := fmt.Sprintf("%s[-%d]", addr, nextClientConnectionID())
+	compressorMap := make(map[wiremessage.CompressorID]compressor.Compressor)
+
+	for _, comp := range cfg.compressors {
+		compressorMap[comp.CompressorID()] = comp
+	}
+
+	c := &connection{
+		id:               id,
+		conn:             nc,
+		compressBuf:      make([]byte, 256),
+		compressorMap:    compressorMap,
+		commandMap:       make(map[int64]*commandMetadata),
+		addr:             addr,
+		idleTimeout:      cfg.idleTimeout,
+		lifetimeDeadline: lifetimeDeadline,
+		readTimeout:      cfg.readTimeout,
+		writeTimeout:     cfg.writeTimeout,
+		readBuf:          make([]byte, 256),
+		uncompressBuf:    make([]byte, 256),
+		writeBuf:         make([]byte, 0, 256),
+		wireMessageBuf:   make([]byte, 256),
+	}
+
+	c.bumpIdleDeadline()
+
+	var desc *description.Server
+	if cfg.handshaker != nil {
+		d, err := cfg.handshaker.Handshake(ctx, c.addr, c)
+		if err != nil {
+			return nil, nil, err
+		}
+
+		if len(d.Compression) > 0 {
+		clientMethodLoop:
+			for _, comp := range cfg.compressors {
+				method := comp.Name()
+
+				for _, serverMethod := range d.Compression {
+					if method != serverMethod {
+						continue
+					}
+
+					c.compressor = comp // found matching compressor
+					break clientMethodLoop
+				}
+			}
+
+		}
+
+		desc = &d
+	}
+
+	c.cmdMonitor = cfg.cmdMonitor // attach the command monitor later to avoid monitoring auth
+	return c, desc, nil
+}
+
+func configureTLS(ctx context.Context, nc net.Conn, addr address.Address, config *TLSConfig) (net.Conn, error) {
+	if !config.InsecureSkipVerify {
+		hostname := addr.String()
+		colonPos := strings.LastIndex(hostname, ":")
+		if colonPos == -1 {
+			colonPos = len(hostname)
+		}
+
+		hostname = hostname[:colonPos]
+		config.ServerName = hostname
+	}
+
+	client := tls.Client(nc, config.Config)
+
+	errChan := make(chan error, 1)
+	go func() {
+		errChan <- client.Handshake()
+	}()
+
+	select {
+	case err := <-errChan:
+		if err != nil {
+			return nil, err
+		}
+	case <-ctx.Done():
+		return nil, errors.New("server connection cancelled/timeout during TLS handshake")
+	}
+	return client, nil
+}
+
+func (c *connection) Alive() bool {
+	return !c.dead
+}
+
+func (c *connection) Expired() bool {
+	now := time.Now()
+	if !c.idleDeadline.IsZero() && now.After(c.idleDeadline) {
+		return true
+	}
+
+	if !c.lifetimeDeadline.IsZero() && now.After(c.lifetimeDeadline) {
+		return true
+	}
+
+	return c.dead
+}
+
+func canCompress(cmd string) bool {
+	if cmd == "isMaster" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "authenticate" ||
+		cmd == "createUser" || cmd == "updateUser" || cmd == "copydbSaslStart" || cmd == "copydbgetnonce" || cmd == "copydb" {
+		return false
+	}
+	return true
+}
+
+func (c *connection) compressMessage(wm wiremessage.WireMessage) (wiremessage.WireMessage, error) {
+	var requestID int32
+	var responseTo int32
+	var origOpcode wiremessage.OpCode
+
+	switch converted := wm.(type) {
+	case wiremessage.Query:
+		firstElem, err := converted.Query.IndexErr(0)
+		if err != nil {
+			return wiremessage.Compressed{}, err
+		}
+
+		key := firstElem.Key()
+		if !canCompress(key) {
+			return wm, nil // return original message because this command can't be compressed
+		}
+		requestID = converted.MsgHeader.RequestID
+		origOpcode = wiremessage.OpQuery
+		responseTo = converted.MsgHeader.ResponseTo
+	case wiremessage.Msg:
+		firstElem, err := converted.Sections[0].(wiremessage.SectionBody).Document.IndexErr(0)
+		if err != nil {
+			return wiremessage.Compressed{}, err
+		}
+
+		key := firstElem.Key()
+		if !canCompress(key) {
+			return wm, nil
+		}
+
+		requestID = converted.MsgHeader.RequestID
+		origOpcode = wiremessage.OpMsg
+		responseTo = converted.MsgHeader.ResponseTo
+	}
+
+	// can compress
+	c.wireMessageBuf = c.wireMessageBuf[:0] // truncate
+	var err error
+	c.wireMessageBuf, err = wm.AppendWireMessage(c.wireMessageBuf)
+	if err != nil {
+		return wiremessage.Compressed{}, err
+	}
+
+	c.wireMessageBuf = c.wireMessageBuf[16:] // strip header
+	c.compressBuf = c.compressBuf[:0]
+	compressedBytes, err := c.compressor.CompressBytes(c.wireMessageBuf, c.compressBuf)
+	if err != nil {
+		return wiremessage.Compressed{}, err
+	}
+
+	compressedMessage := wiremessage.Compressed{
+		MsgHeader: wiremessage.Header{
+			// MessageLength and OpCode will be set when marshalling wire message by SetDefaults()
+			RequestID:  requestID,
+			ResponseTo: responseTo,
+		},
+		OriginalOpCode:    origOpcode,
+		UncompressedSize:  int32(len(c.wireMessageBuf)), // length of uncompressed message excluding MsgHeader
+		CompressorID:      wiremessage.CompressorID(c.compressor.CompressorID()),
+		CompressedMessage: compressedBytes,
+	}
+
+	return compressedMessage, nil
+}
+
+// returns []byte of uncompressed message with reconstructed header, original opcode, error
+func (c *connection) uncompressMessage(compressed wiremessage.Compressed) ([]byte, wiremessage.OpCode, error) {
+	// server doesn't guarantee the same compression method will be used each time so the CompressorID field must be
+	// used to find the correct method for uncompressing data
+	uncompressor := c.compressorMap[compressed.CompressorID]
+
+	// reset uncompressBuf
+	c.uncompressBuf = c.uncompressBuf[:0]
+	if int(compressed.UncompressedSize) > cap(c.uncompressBuf) {
+		c.uncompressBuf = make([]byte, 0, compressed.UncompressedSize)
+	}
+
+	uncompressedMessage, err := uncompressor.UncompressBytes(compressed.CompressedMessage, c.uncompressBuf)
+
+	if err != nil {
+		return nil, 0, err
+	}
+
+	origHeader := wiremessage.Header{
+		MessageLength: int32(len(uncompressedMessage)) + 16, // add 16 for original header
+		RequestID:     compressed.MsgHeader.RequestID,
+		ResponseTo:    compressed.MsgHeader.ResponseTo,
+	}
+
+	switch compressed.OriginalOpCode {
+	case wiremessage.OpReply:
+		origHeader.OpCode = wiremessage.OpReply
+	case wiremessage.OpMsg:
+		origHeader.OpCode = wiremessage.OpMsg
+	default:
+		return nil, 0, fmt.Errorf("opcode %s not implemented", compressed.OriginalOpCode)
+	}
+
+	var fullMessage []byte
+	fullMessage = origHeader.AppendHeader(fullMessage)
+	fullMessage = append(fullMessage, uncompressedMessage...)
+	return fullMessage, origHeader.OpCode, nil
+}
+
+func canMonitor(cmd string) bool {
+	if cmd == "authenticate" || cmd == "saslStart" || cmd == "saslContinue" || cmd == "getnonce" || cmd == "createUser" ||
+		cmd == "updateUser" || cmd == "copydbgetnonce" || cmd == "copydbsaslstart" || cmd == "copydb" {
+		return false
+	}
+
+	return true
+}
+
+func (c *connection) commandStartedEvent(ctx context.Context, wm wiremessage.WireMessage) error {
+	if c.cmdMonitor == nil || c.cmdMonitor.Started == nil {
+		return nil
+	}
+
+	startedEvent := &event.CommandStartedEvent{
+		ConnectionID: c.id,
+	}
+
+	var cmd bsonx.Doc
+	var err error
+	var legacy bool
+	var fullCollName string
+
+	var acknowledged bool
+	switch converted := wm.(type) {
+	case wiremessage.Query:
+		cmd, err = converted.CommandDocument()
+		if err != nil {
+			return err
+		}
+
+		acknowledged = converted.AcknowledgedWrite()
+		startedEvent.DatabaseName = converted.DatabaseName()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		legacy = converted.Legacy()
+		fullCollName = converted.FullCollectionName
+	case wiremessage.Msg:
+		cmd, err = converted.GetMainDocument()
+		if err != nil {
+			return err
+		}
+
+		acknowledged = converted.AcknowledgedWrite()
+		arr, identifier, err := converted.GetSequenceArray()
+		if err != nil {
+			return err
+		}
+		if arr != nil {
+			cmd = cmd.Copy() // make copy to avoid changing original command
+			cmd = append(cmd, bsonx.Elem{identifier, bsonx.Array(arr)})
+		}
+
+		dbVal, err := cmd.LookupErr("$db")
+		if err != nil {
+			return err
+		}
+
+		startedEvent.DatabaseName = dbVal.StringValue()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+	case wiremessage.GetMore:
+		cmd = converted.CommandDocument()
+		startedEvent.DatabaseName = converted.DatabaseName()
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		acknowledged = true
+		legacy = true
+		fullCollName = converted.FullCollectionName
+	case wiremessage.KillCursors:
+		cmd = converted.CommandDocument()
+		startedEvent.DatabaseName = converted.DatabaseName
+		startedEvent.RequestID = int64(converted.MsgHeader.RequestID)
+		legacy = true
+	}
+
+	rawcmd, _ := cmd.MarshalBSON()
+	startedEvent.Command = rawcmd
+	startedEvent.CommandName = cmd[0].Key
+	if !canMonitor(startedEvent.CommandName) {
+		startedEvent.Command = emptyDoc
+	}
+
+	c.cmdMonitor.Started(ctx, startedEvent)
+
+	if !acknowledged {
+		if c.cmdMonitor.Succeeded == nil {
+			return nil
+		}
+
+		// unack writes must provide a CommandSucceededEvent with an { ok: 1 } reply
+		finishedEvent := event.CommandFinishedEvent{
+			DurationNanos: 0,
+			CommandName:   startedEvent.CommandName,
+			RequestID:     startedEvent.RequestID,
+			ConnectionID:  c.id,
+		}
+
+		c.cmdMonitor.Succeeded(ctx, &event.CommandSucceededEvent{
+			CommandFinishedEvent: finishedEvent,
+			Reply:                bsoncore.BuildDocument(nil, bsoncore.AppendInt32Element(nil, "ok", 1)),
+		})
+
+		return nil
+	}
+
+	c.commandMap[startedEvent.RequestID] = createMetadata(startedEvent.CommandName, legacy, fullCollName)
+	return nil
+}
+
+func processReply(reply bsonx.Doc) (bool, string) {
+	var success bool
+	var errmsg string
+	var errCode int32
+
+	for _, elem := range reply {
+		switch elem.Key {
+		case "ok":
+			switch elem.Value.Type() {
+			case bsontype.Int32:
+				if elem.Value.Int32() == 1 {
+					success = true
+				}
+			case bsontype.Int64:
+				if elem.Value.Int64() == 1 {
+					success = true
+				}
+			case bsontype.Double:
+				if elem.Value.Double() == 1 {
+					success = true
+				}
+			}
+		case "errmsg":
+			if str, ok := elem.Value.StringValueOK(); ok {
+				errmsg = str
+			}
+		case "code":
+			if c, ok := elem.Value.Int32OK(); ok {
+				errCode = c
+			}
+		}
+	}
+
+	if success {
+		return true, ""
+	}
+
+	fullErrMsg := fmt.Sprintf("Error code %d: %s", errCode, errmsg)
+	return false, fullErrMsg
+}
+
+func (c *connection) commandFinishedEvent(ctx context.Context, wm wiremessage.WireMessage) error {
+	if c.cmdMonitor == nil {
+		return nil
+	}
+
+	var reply bsonx.Doc
+	var requestID int64
+	var err error
+
+	switch converted := wm.(type) {
+	case wiremessage.Reply:
+		requestID = int64(converted.MsgHeader.ResponseTo)
+	case wiremessage.Msg:
+		requestID = int64(converted.MsgHeader.ResponseTo)
+	}
+	cmdMetadata := c.commandMap[requestID]
+	delete(c.commandMap, requestID)
+
+	switch converted := wm.(type) {
+	case wiremessage.Reply:
+		if cmdMetadata.Legacy {
+			reply, err = converted.GetMainLegacyDocument(cmdMetadata.FullCollectionName)
+		} else {
+			reply, err = converted.GetMainDocument()
+		}
+	case wiremessage.Msg:
+		reply, err = converted.GetMainDocument()
+	}
+	if err != nil {
+		return err
+	}
+
+	success, errmsg := processReply(reply)
+
+	if (success && c.cmdMonitor.Succeeded == nil) || (!success && c.cmdMonitor.Failed == nil) {
+		return nil
+	}
+
+	finishedEvent := event.CommandFinishedEvent{
+		DurationNanos: cmdMetadata.TimeDifference(),
+		CommandName:   cmdMetadata.Name,
+		RequestID:     requestID,
+		ConnectionID:  c.id,
+	}
+
+	if success {
+		if !canMonitor(finishedEvent.CommandName) {
+			successEvent := &event.CommandSucceededEvent{
+				Reply:                emptyDoc,
+				CommandFinishedEvent: finishedEvent,
+			}
+			c.cmdMonitor.Succeeded(ctx, successEvent)
+			return nil
+		}
+
+		// if response has type 1 document sequence, the sequence must be included as a BSON array in the event's reply.
+		if opmsg, ok := wm.(wiremessage.Msg); ok {
+			arr, identifier, err := opmsg.GetSequenceArray()
+			if err != nil {
+				return err
+			}
+			if arr != nil {
+				reply = reply.Copy() // make copy to avoid changing original command
+				reply = append(reply, bsonx.Elem{identifier, bsonx.Array(arr)})
+			}
+		}
+
+		replyraw, _ := reply.MarshalBSON()
+		successEvent := &event.CommandSucceededEvent{
+			Reply:                replyraw,
+			CommandFinishedEvent: finishedEvent,
+		}
+
+		c.cmdMonitor.Succeeded(ctx, successEvent)
+		return nil
+	}
+
+	failureEvent := &event.CommandFailedEvent{
+		Failure:              errmsg,
+		CommandFinishedEvent: finishedEvent,
+	}
+
+	c.cmdMonitor.Failed(ctx, failureEvent)
+	return nil
+}
+
+func (c *connection) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	var err error
+	if c.dead {
+		return Error{
+			ConnectionID: c.id,
+			message:      "connection is dead",
+		}
+	}
+
+	select {
+	case <-ctx.Done():
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to write",
+		}
+	default:
+	}
+
+	deadline := time.Time{}
+	if c.writeTimeout != 0 {
+		deadline = time.Now().Add(c.writeTimeout)
+	}
+
+	if dl, ok := ctx.Deadline(); ok && (deadline.IsZero() || dl.Before(deadline)) {
+		deadline = dl
+	}
+
+	if err := c.conn.SetWriteDeadline(deadline); err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "failed to set write deadline",
+		}
+	}
+
+	// Truncate the write buffer
+	c.writeBuf = c.writeBuf[:0]
+
+	messageToWrite := wm
+	// Compress if possible
+	if c.compressor != nil {
+		compressed, err := c.compressMessage(wm)
+		if err != nil {
+			return Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to compress wire message",
+			}
+		}
+		messageToWrite = compressed
+	}
+
+	c.writeBuf, err = messageToWrite.AppendWireMessage(c.writeBuf)
+	if err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to encode wire message",
+		}
+	}
+
+	_, err = c.conn.Write(c.writeBuf)
+	if err != nil {
+		c.Close()
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to write wire message to network",
+		}
+	}
+
+	c.bumpIdleDeadline()
+	err = c.commandStartedEvent(ctx, wm)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+func (c *connection) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	if c.dead {
+		return nil, Error{
+			ConnectionID: c.id,
+			message:      "connection is dead",
+		}
+	}
+
+	select {
+	case <-ctx.Done():
+		// We close the connection because we don't know if there
+		// is an unread message on the wire.
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to read",
+		}
+	default:
+	}
+
+	deadline := time.Time{}
+	if c.readTimeout != 0 {
+		deadline = time.Now().Add(c.readTimeout)
+	}
+
+	if ctxDL, ok := ctx.Deadline(); ok && (deadline.IsZero() || ctxDL.Before(deadline)) {
+		deadline = ctxDL
+	}
+
+	if err := c.conn.SetReadDeadline(deadline); err != nil {
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      ctx.Err(),
+			message:      "failed to set read deadline",
+		}
+	}
+
+	var sizeBuf [4]byte
+	_, err := io.ReadFull(c.conn, sizeBuf[:])
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to decode message length",
+		}
+	}
+
+	size := readInt32(sizeBuf[:], 0)
+
+	// Isn't the best reuse, but resizing a []byte to be larger
+	// is difficult.
+	if cap(c.readBuf) > int(size) {
+		c.readBuf = c.readBuf[:size]
+	} else {
+		c.readBuf = make([]byte, size)
+	}
+
+	c.readBuf[0], c.readBuf[1], c.readBuf[2], c.readBuf[3] = sizeBuf[0], sizeBuf[1], sizeBuf[2], sizeBuf[3]
+
+	_, err = io.ReadFull(c.conn, c.readBuf[4:])
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to read full message",
+		}
+	}
+
+	hdr, err := wiremessage.ReadHeader(c.readBuf, 0)
+	if err != nil {
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "unable to decode header",
+		}
+	}
+
+	messageToDecode := c.readBuf
+	opcodeToCheck := hdr.OpCode
+
+	if hdr.OpCode == wiremessage.OpCompressed {
+		var compressed wiremessage.Compressed
+		err := compressed.UnmarshalWireMessage(c.readBuf)
+		if err != nil {
+			defer c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_COMPRESSED",
+			}
+		}
+
+		uncompressed, origOpcode, err := c.uncompressMessage(compressed)
+		if err != nil {
+			defer c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to uncompress message",
+			}
+		}
+		messageToDecode = uncompressed
+		opcodeToCheck = origOpcode
+	}
+
+	var wm wiremessage.WireMessage
+	switch opcodeToCheck {
+	case wiremessage.OpReply:
+		var r wiremessage.Reply
+		err := r.UnmarshalWireMessage(messageToDecode)
+		if err != nil {
+			c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_REPLY",
+			}
+		}
+		wm = r
+	case wiremessage.OpMsg:
+		var reply wiremessage.Msg
+		err := reply.UnmarshalWireMessage(messageToDecode)
+		if err != nil {
+			c.Close()
+			return nil, Error{
+				ConnectionID: c.id,
+				Wrapped:      err,
+				message:      "unable to decode OP_MSG",
+			}
+		}
+		wm = reply
+	default:
+		c.Close()
+		return nil, Error{
+			ConnectionID: c.id,
+			message:      fmt.Sprintf("opcode %s not implemented", hdr.OpCode),
+		}
+	}
+
+	c.bumpIdleDeadline()
+	err = c.commandFinishedEvent(ctx, wm)
+	if err != nil {
+		return nil, err // TODO: do we care if monitoring fails?
+	}
+
+	return wm, nil
+}
+
+func (c *connection) bumpIdleDeadline() {
+	if c.idleTimeout > 0 {
+		c.idleDeadline = time.Now().Add(c.idleTimeout)
+	}
+}
+
+func (c *connection) Close() error {
+	c.dead = true
+	err := c.conn.Close()
+	if err != nil {
+		return Error{
+			ConnectionID: c.id,
+			Wrapped:      err,
+			message:      "failed to close net.Conn",
+		}
+	}
+
+	return nil
+}
+
+func (c *connection) ID() string {
+	return c.id
+}
+
+func (c *connection) initialize(ctx context.Context, appName string) error {
+	return nil
+}
+
+func readInt32(b []byte, pos int32) int32 {
+	return (int32(b[pos+0])) | (int32(b[pos+1]) << 8) | (int32(b[pos+2]) << 16) | (int32(b[pos+3]) << 24)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go
new file mode 100644
index 0000000..eebca94
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/error.go
@@ -0,0 +1,41 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "fmt"
+
+// Error represents a connection error.
+type Error struct {
+	ConnectionID string
+	Wrapped      error
+
+	message string
+}
+
+// Error implements the error interface.
+func (e Error) Error() string {
+	if e.Wrapped != nil {
+		return fmt.Sprintf("connection(%s) %s: %s", e.ConnectionID, e.message, e.Wrapped.Error())
+	}
+	return fmt.Sprintf("connection(%s) %s", e.ConnectionID, e.message)
+}
+
+// NetworkError represents an error that occurred while reading from or writing
+// to a network socket.
+type NetworkError struct {
+	ConnectionID string
+	Wrapped      error
+}
+
+func (ne NetworkError) Error() string {
+	return fmt.Sprintf("connection(%s): %s", ne.ConnectionID, ne.Wrapped.Error())
+}
+
+// PoolError is an error returned from a Pool method.
+type PoolError string
+
+func (pe PoolError) Error() string { return string(pe) }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go
new file mode 100644
index 0000000..554e38d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_300.go
@@ -0,0 +1,13 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build !go1.12
+
+package connection
+
+import "time"
+
+const tcpKeepalive = 300 * time.Second
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go
new file mode 100644
index 0000000..9eeeb69
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/keepalive_default.go
@@ -0,0 +1,11 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+//+build go1.12
+
+package connection
+
+const tcpKeepalive = 0 // will be set by default on Go 1.12 and higher
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go
new file mode 100644
index 0000000..76f4f7d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/listener.go
@@ -0,0 +1,27 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+// Listener is a generic mongodb network protocol listener. It can return connections
+// that speak the mongodb wire protocol.
+//
+// Multiple goroutines may invoke methods on a Listener simultaneously.
+//
+// TODO(GODRIVER-270): Implement this.
+type Listener interface {
+	// Accept waits for and returns the next Connection to the listener.
+	Accept() (Connection, error)
+
+	// Close closes the listener.
+	Close() error
+
+	// Addr returns the listener's network address.
+	Addr() Addr
+}
+
+// Listen creates a new listener on the provided network and address.
+func Listen(network, address string) (Listener, error) { return nil, nil }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go
new file mode 100644
index 0000000..cd19931
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/options.go
@@ -0,0 +1,148 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"net"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/event"
+	"github.com/mongodb/mongo-go-driver/x/network/compressor"
+)
+
+type config struct {
+	appName        string
+	connectTimeout time.Duration
+	dialer         Dialer
+	handshaker     Handshaker
+	idleTimeout    time.Duration
+	lifeTimeout    time.Duration
+	cmdMonitor     *event.CommandMonitor
+	readTimeout    time.Duration
+	writeTimeout   time.Duration
+	tlsConfig      *TLSConfig
+	compressors    []compressor.Compressor
+}
+
+func newConfig(opts ...Option) (*config, error) {
+	cfg := &config{
+		connectTimeout: 30 * time.Second,
+		dialer:         nil,
+		idleTimeout:    10 * time.Minute,
+		lifeTimeout:    30 * time.Minute,
+	}
+
+	for _, opt := range opts {
+		err := opt(cfg)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if cfg.dialer == nil {
+		cfg.dialer = &net.Dialer{
+			KeepAlive: tcpKeepalive,
+			Timeout:   cfg.connectTimeout,
+		}
+	}
+
+	return cfg, nil
+}
+
+// Option is used to configure a connection.
+type Option func(*config) error
+
+// WithAppName sets the application name which gets sent to MongoDB when it
+// first connects.
+func WithAppName(fn func(string) string) Option {
+	return func(c *config) error {
+		c.appName = fn(c.appName)
+		return nil
+	}
+}
+
+// WithCompressors sets the compressors that can be used for communication.
+func WithCompressors(fn func([]compressor.Compressor) []compressor.Compressor) Option {
+	return func(c *config) error {
+		c.compressors = fn(c.compressors)
+		return nil
+	}
+}
+
+// WithConnectTimeout configures the maximum amount of time a dial will wait for a
+// connect to complete. The default is 30 seconds.
+func WithConnectTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.connectTimeout = fn(c.connectTimeout)
+		return nil
+	}
+}
+
+// WithDialer configures the Dialer to use when making a new connection to MongoDB.
+func WithDialer(fn func(Dialer) Dialer) Option {
+	return func(c *config) error {
+		c.dialer = fn(c.dialer)
+		return nil
+	}
+}
+
+// WithHandshaker configures the Handshaker that wll be used to initialize newly
+// dialed connections.
+func WithHandshaker(fn func(Handshaker) Handshaker) Option {
+	return func(c *config) error {
+		c.handshaker = fn(c.handshaker)
+		return nil
+	}
+}
+
+// WithIdleTimeout configures the maximum idle time to allow for a connection.
+func WithIdleTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.idleTimeout = fn(c.idleTimeout)
+		return nil
+	}
+}
+
+// WithLifeTimeout configures the maximum life of a connection.
+func WithLifeTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.lifeTimeout = fn(c.lifeTimeout)
+		return nil
+	}
+}
+
+// WithReadTimeout configures the maximum read time for a connection.
+func WithReadTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.readTimeout = fn(c.readTimeout)
+		return nil
+	}
+}
+
+// WithWriteTimeout configures the maximum write time for a connection.
+func WithWriteTimeout(fn func(time.Duration) time.Duration) Option {
+	return func(c *config) error {
+		c.writeTimeout = fn(c.writeTimeout)
+		return nil
+	}
+}
+
+// WithTLSConfig configures the TLS options for a connection.
+func WithTLSConfig(fn func(*TLSConfig) *TLSConfig) Option {
+	return func(c *config) error {
+		c.tlsConfig = fn(c.tlsConfig)
+		return nil
+	}
+}
+
+// WithMonitor configures a event for command monitoring.
+func WithMonitor(fn func(*event.CommandMonitor) *event.CommandMonitor) Option {
+	return func(c *config) error {
+		c.cmdMonitor = fn(c.cmdMonitor)
+		return nil
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go
new file mode 100644
index 0000000..3a25ad3
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/pool.go
@@ -0,0 +1,310 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/description"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+	"golang.org/x/sync/semaphore"
+)
+
+// ErrPoolClosed is returned from an attempt to use a closed pool.
+var ErrPoolClosed = PoolError("pool is closed")
+
+// ErrSizeLargerThanCapacity is returned from an attempt to create a pool with a size
+// larger than the capacity.
+var ErrSizeLargerThanCapacity = PoolError("size is larger than capacity")
+
+// ErrPoolConnected is returned from an attempt to connect an already connected pool
+var ErrPoolConnected = PoolError("pool is connected")
+
+// ErrPoolDisconnected is returned from an attempt to disconnect an already disconnected
+// or disconnecting pool.
+var ErrPoolDisconnected = PoolError("pool is disconnected or disconnecting")
+
+// ErrConnectionClosed is returned from an attempt to use an already closed connection.
+var ErrConnectionClosed = Error{ConnectionID: "<closed>", message: "connection is closed"}
+
+// These constants represent the connection states of a pool.
+const (
+	disconnected int32 = iota
+	disconnecting
+	connected
+)
+
+// Pool is used to pool Connections to a server.
+type Pool interface {
+	// Get must return a nil *description.Server if the returned connection is
+	// not a newly dialed connection.
+	Get(context.Context) (Connection, *description.Server, error)
+	// Connect handles the initialization of a Pool and allow Connections to be
+	// retrieved and pooled. Implementations must return an error if Connect is
+	// called more than once before calling Disconnect.
+	Connect(context.Context) error
+	// Disconnect closest connections managed by this Pool. Implementations must
+	// either wait until all of the connections in use have been returned and
+	// closed or the context expires before returning. If the context expires
+	// via cancellation, deadline, timeout, or some other manner, implementations
+	// must close the in use connections. If this method returns with no errors,
+	// all connections managed by this pool must be closed. Calling Disconnect
+	// multiple times after a single Connect call must result in an error.
+	Disconnect(context.Context) error
+	Drain() error
+}
+
+type pool struct {
+	address    address.Address
+	opts       []Option
+	conns      chan *pooledConnection
+	generation uint64
+	sem        *semaphore.Weighted
+	connected  int32
+	nextid     uint64
+	capacity   uint64
+	inflight   map[uint64]*pooledConnection
+
+	sync.Mutex
+}
+
+// NewPool creates a new pool that will hold size number of idle connections
+// and will create a max of capacity connections. It will use the provided
+// options.
+func NewPool(addr address.Address, size, capacity uint64, opts ...Option) (Pool, error) {
+	if size > capacity {
+		return nil, ErrSizeLargerThanCapacity
+	}
+	p := &pool{
+		address:    addr,
+		conns:      make(chan *pooledConnection, size),
+		generation: 0,
+		sem:        semaphore.NewWeighted(int64(capacity)),
+		connected:  disconnected,
+		capacity:   capacity,
+		inflight:   make(map[uint64]*pooledConnection),
+		opts:       opts,
+	}
+	return p, nil
+}
+
+func (p *pool) Drain() error {
+	atomic.AddUint64(&p.generation, 1)
+	return nil
+}
+
+func (p *pool) Connect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&p.connected, disconnected, connected) {
+		return ErrPoolConnected
+	}
+	atomic.AddUint64(&p.generation, 1)
+	return nil
+}
+
+func (p *pool) Disconnect(ctx context.Context) error {
+	if !atomic.CompareAndSwapInt32(&p.connected, connected, disconnecting) {
+		return ErrPoolDisconnected
+	}
+
+	// We first clear out the idle connections, then we attempt to acquire the entire capacity
+	// semaphore. If the context is either cancelled, the deadline expires, or there is a timeout
+	// the semaphore acquire method will return an error. If that happens, we will aggressively
+	// close the remaining open connections. If we were able to successfully acquire the semaphore,
+	// then all of the in flight connections have been closed and we release the semaphore.
+loop:
+	for {
+		select {
+		case pc := <-p.conns:
+			// This error would be overwritten by the semaphore
+			_ = p.closeConnection(pc)
+		default:
+			break loop
+		}
+	}
+	err := p.sem.Acquire(ctx, int64(p.capacity))
+	if err != nil {
+		p.Lock()
+		// We copy the remaining connections to close into a slice, then
+		// iterate the slice to do the closing. This allows us to use a single
+		// function to actually clean up and close connections at the expense of
+		// a double iteration in the worst case.
+		toClose := make([]*pooledConnection, 0, len(p.inflight))
+		for _, pc := range p.inflight {
+			toClose = append(toClose, pc)
+		}
+		p.Unlock()
+		for _, pc := range toClose {
+			_ = pc.Close()
+		}
+	} else {
+		p.sem.Release(int64(p.capacity))
+	}
+	atomic.StoreInt32(&p.connected, disconnected)
+	return nil
+}
+
+func (p *pool) Get(ctx context.Context) (Connection, *description.Server, error) {
+	if atomic.LoadInt32(&p.connected) != connected {
+		return nil, nil, ErrPoolClosed
+	}
+
+	err := p.sem.Acquire(ctx, 1)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	return p.get(ctx)
+}
+
+func (p *pool) get(ctx context.Context) (Connection, *description.Server, error) {
+	g := atomic.LoadUint64(&p.generation)
+	select {
+	case c := <-p.conns:
+		if c.Expired() {
+			go p.closeConnection(c)
+			return p.get(ctx)
+		}
+
+		return &acquired{Connection: c, sem: p.sem}, nil, nil
+	case <-ctx.Done():
+		p.sem.Release(1)
+		return nil, nil, ctx.Err()
+	default:
+		c, desc, err := New(ctx, p.address, p.opts...)
+		if err != nil {
+			p.sem.Release(1)
+			return nil, nil, err
+		}
+
+		pc := &pooledConnection{
+			Connection: c,
+			p:          p,
+			generation: g,
+			id:         atomic.AddUint64(&p.nextid, 1),
+		}
+		p.Lock()
+		if atomic.LoadInt32(&p.connected) != connected {
+			p.Unlock()
+			p.sem.Release(1)
+			p.closeConnection(pc)
+			return nil, nil, ErrPoolClosed
+		}
+		defer p.Unlock()
+		p.inflight[pc.id] = pc
+		return &acquired{Connection: pc, sem: p.sem}, desc, nil
+	}
+}
+
+func (p *pool) closeConnection(pc *pooledConnection) error {
+	if !atomic.CompareAndSwapInt32(&pc.closed, 0, 1) {
+		return nil
+	}
+	p.Lock()
+	delete(p.inflight, pc.id)
+	p.Unlock()
+	return pc.Connection.Close()
+}
+
+func (p *pool) returnConnection(pc *pooledConnection) error {
+	if atomic.LoadInt32(&p.connected) != connected || pc.Expired() {
+		return p.closeConnection(pc)
+	}
+
+	select {
+	case p.conns <- pc:
+		return nil
+	default:
+		return p.closeConnection(pc)
+	}
+}
+
+func (p *pool) isExpired(generation uint64) bool {
+	return generation < atomic.LoadUint64(&p.generation)
+}
+
+type pooledConnection struct {
+	Connection
+	p          *pool
+	generation uint64
+	id         uint64
+	closed     int32
+}
+
+func (pc *pooledConnection) Close() error {
+	return pc.p.returnConnection(pc)
+}
+
+func (pc *pooledConnection) Expired() bool {
+	return pc.Connection.Expired() || pc.p.isExpired(pc.generation)
+}
+
+type acquired struct {
+	Connection
+
+	sem *semaphore.Weighted
+	sync.Mutex
+}
+
+func (a *acquired) WriteWireMessage(ctx context.Context, wm wiremessage.WireMessage) error {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return ErrConnectionClosed
+	}
+	return a.Connection.WriteWireMessage(ctx, wm)
+}
+
+func (a *acquired) ReadWireMessage(ctx context.Context) (wiremessage.WireMessage, error) {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return nil, ErrConnectionClosed
+	}
+	return a.Connection.ReadWireMessage(ctx)
+}
+
+func (a *acquired) Close() error {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return nil
+	}
+	err := a.Connection.Close()
+	a.sem.Release(1)
+	a.Connection = nil
+	return err
+}
+
+func (a *acquired) Expired() bool {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return true
+	}
+	return a.Connection.Expired()
+}
+
+func (a *acquired) Alive() bool {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return false
+	}
+	return a.Connection.Alive()
+}
+
+func (a *acquired) ID() string {
+	a.Lock()
+	defer a.Unlock()
+	if a.Connection == nil {
+		return "<closed>"
+	}
+	return a.Connection.ID()
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go
new file mode 100644
index 0000000..62e18d2
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/proxy.go
@@ -0,0 +1,26 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+
+// Proxy implements a MongoDB proxy. It will use the given pool to connect to a
+// MongoDB server and proxy the traffic between connections it is given and the
+// server. It will pass each of the wireops it reads from the handled connection
+// to a Processor. If an error is returned from the processor, the wireop will
+// not be forwarded onto the server. If there is not an error the returned message
+// will be passed onto the server. If both the return message and the error are nil,
+// the original wiremessage will be passed onto the server.
+//
+// TODO(GODRIVER-268): Implement this.
+type Proxy struct {
+	Processor wiremessage.Transformer
+	Pool      Pool
+}
+
+// HandleConnection implements the Handler interface.
+func (*Proxy) HandleConnection(Connection) { return }
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go
new file mode 100644
index 0000000..033d0d7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/server.go
@@ -0,0 +1,40 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "context"
+
+// Server is used to handle incoming Connections. It handles the boilerplate of accepting a
+// Connection and cleaning it up after running a Handler. This also makes it easier to build
+// higher level processors, like proxies, by handling the life cycle of the underlying
+// connection.
+//
+// TODO(GODRIVER-269): Implement this.
+type Server struct {
+	Addr    Addr
+	Handler Handler
+}
+
+// ListenAndServe listens on the network address srv.Addr and calls Serve to
+// handle requests on incoming connections. If srv.Addr is blank, "localhost:27017"
+// is used.
+func (*Server) ListenAndServe() error { return nil }
+
+// Serve accepts incoming connections on the Listener l, creating a new service
+// goroutine for each. The service goroutines call srv.Handler and do not processing
+// beforehand. When srv.Handler returns, the connection is closed.
+func (*Server) Serve(Listener) error { return nil }
+
+// Shutdown gracefully shuts down the server by closing the active listeners. Shutdown
+// does not handle or wait for all open connections to close and return before returning.
+func (*Server) Shutdown(context.Context) error { return nil }
+
+// Handler handles an individual Connection. Returning signals that the Connection
+// is no longer needed and can be closed.
+type Handler interface {
+	HandleConnection(Connection)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go
new file mode 100644
index 0000000..0536418
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig.go
@@ -0,0 +1,237 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/asn1"
+	"encoding/hex"
+	"encoding/pem"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"strings"
+)
+
+// TLSConfig contains options for configuring a TLS connection to the server.
+type TLSConfig struct {
+	*tls.Config
+	clientCertPass func() string
+}
+
+// NewTLSConfig creates a new TLSConfig.
+func NewTLSConfig() *TLSConfig {
+	cfg := &TLSConfig{}
+	cfg.Config = new(tls.Config)
+
+	return cfg
+}
+
+// SetClientCertDecryptPassword sets a function to retrieve the decryption password
+// necessary to read a certificate. This is a function instead of a string to
+// provide greater flexibility when deciding how to retrieve and store the password.
+func (c *TLSConfig) SetClientCertDecryptPassword(f func() string) {
+	c.clientCertPass = f
+}
+
+// SetInsecure sets whether the client should verify the server's certificate
+// chain and hostnames.
+func (c *TLSConfig) SetInsecure(allow bool) {
+	c.InsecureSkipVerify = allow
+}
+
+// AddCACertFromFile adds a root CA certificate to the configuration given a path
+// to the containing file.
+func (c *TLSConfig) AddCACertFromFile(file string) error {
+	data, err := ioutil.ReadFile(file)
+	if err != nil {
+		return err
+	}
+
+	certBytes, err := loadCert(data)
+	if err != nil {
+		return err
+	}
+
+	cert, err := x509.ParseCertificate(certBytes)
+	if err != nil {
+		return err
+	}
+
+	if c.RootCAs == nil {
+		c.RootCAs = x509.NewCertPool()
+	}
+
+	c.RootCAs.AddCert(cert)
+
+	return nil
+}
+
+// AddClientCertFromFile adds a client certificate to the configuration given a path to the
+// containing file and returns the certificate's subject name.
+func (c *TLSConfig) AddClientCertFromFile(clientFile string) (string, error) {
+	data, err := ioutil.ReadFile(clientFile)
+	if err != nil {
+		return "", err
+	}
+
+	var currentBlock *pem.Block
+	var certBlock, certDecodedBlock, keyBlock []byte
+
+	remaining := data
+	start := 0
+	for {
+		currentBlock, remaining = pem.Decode(remaining)
+		if currentBlock == nil {
+			break
+		}
+
+		if currentBlock.Type == "CERTIFICATE" {
+			certBlock = data[start : len(data)-len(remaining)]
+			certDecodedBlock = currentBlock.Bytes
+			start += len(certBlock)
+		} else if strings.HasSuffix(currentBlock.Type, "PRIVATE KEY") {
+			if c.clientCertPass != nil && x509.IsEncryptedPEMBlock(currentBlock) {
+				var encoded bytes.Buffer
+				buf, err := x509.DecryptPEMBlock(currentBlock, []byte(c.clientCertPass()))
+				if err != nil {
+					return "", err
+				}
+
+				pem.Encode(&encoded, &pem.Block{Type: currentBlock.Type, Bytes: buf})
+				keyBlock = encoded.Bytes()
+				start = len(data) - len(remaining)
+			} else {
+				keyBlock = data[start : len(data)-len(remaining)]
+				start += len(keyBlock)
+			}
+		}
+	}
+	if len(certBlock) == 0 {
+		return "", fmt.Errorf("failed to find CERTIFICATE")
+	}
+	if len(keyBlock) == 0 {
+		return "", fmt.Errorf("failed to find PRIVATE KEY")
+	}
+
+	cert, err := tls.X509KeyPair(certBlock, keyBlock)
+	if err != nil {
+		return "", err
+	}
+
+	c.Certificates = append(c.Certificates, cert)
+
+	// The documentation for the tls.X509KeyPair indicates that the Leaf certificate is not
+	// retained.
+	crt, err := x509.ParseCertificate(certDecodedBlock)
+	if err != nil {
+		return "", err
+	}
+
+	return x509CertSubject(crt), nil
+}
+
+func loadCert(data []byte) ([]byte, error) {
+	var certBlock *pem.Block
+
+	for certBlock == nil {
+		if data == nil || len(data) == 0 {
+			return nil, errors.New(".pem file must have both a CERTIFICATE and an RSA PRIVATE KEY section")
+		}
+
+		block, rest := pem.Decode(data)
+		if block == nil {
+			return nil, errors.New("invalid .pem file")
+		}
+
+		switch block.Type {
+		case "CERTIFICATE":
+			if certBlock != nil {
+				return nil, errors.New("multiple CERTIFICATE sections in .pem file")
+			}
+
+			certBlock = block
+		}
+
+		data = rest
+	}
+
+	return certBlock.Bytes, nil
+}
+
+// Because the functionality to convert a pkix.Name to a string wasn't added until Go 1.10, we
+// need to copy the implementation (along with the attributeTypeNames map below).
+func x509CertSubject(cert *x509.Certificate) string {
+	r := cert.Subject.ToRDNSequence()
+
+	s := ""
+	for i := 0; i < len(r); i++ {
+		rdn := r[len(r)-1-i]
+		if i > 0 {
+			s += ","
+		}
+		for j, tv := range rdn {
+			if j > 0 {
+				s += "+"
+			}
+
+			oidString := tv.Type.String()
+			typeName, ok := attributeTypeNames[oidString]
+			if !ok {
+				derBytes, err := asn1.Marshal(tv.Value)
+				if err == nil {
+					s += oidString + "=#" + hex.EncodeToString(derBytes)
+					continue // No value escaping necessary.
+				}
+
+				typeName = oidString
+			}
+
+			valueString := fmt.Sprint(tv.Value)
+			escaped := make([]rune, 0, len(valueString))
+
+			for k, c := range valueString {
+				escape := false
+
+				switch c {
+				case ',', '+', '"', '\\', '<', '>', ';':
+					escape = true
+
+				case ' ':
+					escape = k == 0 || k == len(valueString)-1
+
+				case '#':
+					escape = k == 0
+				}
+
+				if escape {
+					escaped = append(escaped, '\\', c)
+				} else {
+					escaped = append(escaped, c)
+				}
+			}
+
+			s += typeName + "=" + string(escaped)
+		}
+	}
+
+	return s
+}
+
+var attributeTypeNames = map[string]string{
+	"2.5.4.6":  "C",
+	"2.5.4.10": "O",
+	"2.5.4.11": "OU",
+	"2.5.4.3":  "CN",
+	"2.5.4.5":  "SERIALNUMBER",
+	"2.5.4.7":  "L",
+	"2.5.4.8":  "ST",
+	"2.5.4.9":  "STREET",
+	"2.5.4.17": "POSTALCODE",
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go
new file mode 100644
index 0000000..a753dfd
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connection/tlsconfig_clone_17.go
@@ -0,0 +1,42 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connection
+
+import "crypto/tls"
+
+// Clone returns a shallow clone of c. It is safe to clone a Config that is being
+// used concurrently by a TLS client or server.
+func (c *TLSConfig) Clone() *TLSConfig {
+	cfg := cloneconfig(c.Config)
+	return &TLSConfig{cfg, c.clientCertPass}
+}
+
+func cloneconfig(c *tls.Config) *tls.Config {
+	return &tls.Config{
+		Rand:                        c.Rand,
+		Time:                        c.Time,
+		Certificates:                c.Certificates,
+		NameToCertificate:           c.NameToCertificate,
+		GetCertificate:              c.GetCertificate,
+		RootCAs:                     c.RootCAs,
+		NextProtos:                  c.NextProtos,
+		ServerName:                  c.ServerName,
+		ClientAuth:                  c.ClientAuth,
+		ClientCAs:                   c.ClientCAs,
+		InsecureSkipVerify:          c.InsecureSkipVerify,
+		CipherSuites:                c.CipherSuites,
+		PreferServerCipherSuites:    c.PreferServerCipherSuites,
+		SessionTicketsDisabled:      c.SessionTicketsDisabled,
+		SessionTicketKey:            c.SessionTicketKey,
+		ClientSessionCache:          c.ClientSessionCache,
+		MinVersion:                  c.MinVersion,
+		MaxVersion:                  c.MaxVersion,
+		CurvePreferences:            c.CurvePreferences,
+		DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+		Renegotiation:               c.Renegotiation,
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go
new file mode 100644
index 0000000..263a84c
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/connstring/connstring.go
@@ -0,0 +1,773 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package connstring
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/internal"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/network/wiremessage"
+)
+
+// Parse parses the provided uri and returns a URI object.
+func Parse(s string) (ConnString, error) {
+	var p parser
+	err := p.parse(s)
+	if err != nil {
+		err = internal.WrapErrorf(err, "error parsing uri (%s)", s)
+	}
+	return p.ConnString, err
+}
+
+// ConnString represents a connection string to mongodb.
+type ConnString struct {
+	Original                           string
+	AppName                            string
+	AuthMechanism                      string
+	AuthMechanismProperties            map[string]string
+	AuthSource                         string
+	Compressors                        []string
+	Connect                            ConnectMode
+	ConnectSet                         bool
+	ConnectTimeout                     time.Duration
+	ConnectTimeoutSet                  bool
+	Database                           string
+	HeartbeatInterval                  time.Duration
+	HeartbeatIntervalSet               bool
+	Hosts                              []string
+	J                                  bool
+	JSet                               bool
+	LocalThreshold                     time.Duration
+	LocalThresholdSet                  bool
+	MaxConnIdleTime                    time.Duration
+	MaxConnIdleTimeSet                 bool
+	MaxPoolSize                        uint16
+	MaxPoolSizeSet                     bool
+	Password                           string
+	PasswordSet                        bool
+	ReadConcernLevel                   string
+	ReadPreference                     string
+	ReadPreferenceTagSets              []map[string]string
+	RetryWrites                        bool
+	RetryWritesSet                     bool
+	MaxStaleness                       time.Duration
+	MaxStalenessSet                    bool
+	ReplicaSet                         string
+	ServerSelectionTimeout             time.Duration
+	ServerSelectionTimeoutSet          bool
+	SocketTimeout                      time.Duration
+	SocketTimeoutSet                   bool
+	SSL                                bool
+	SSLSet                             bool
+	SSLClientCertificateKeyFile        string
+	SSLClientCertificateKeyFileSet     bool
+	SSLClientCertificateKeyPassword    func() string
+	SSLClientCertificateKeyPasswordSet bool
+	SSLInsecure                        bool
+	SSLInsecureSet                     bool
+	SSLCaFile                          string
+	SSLCaFileSet                       bool
+	WString                            string
+	WNumber                            int
+	WNumberSet                         bool
+	Username                           string
+	ZlibLevel                          int
+
+	WTimeout              time.Duration
+	WTimeoutSet           bool
+	WTimeoutSetFromOption bool
+
+	Options        map[string][]string
+	UnknownOptions map[string][]string
+}
+
+func (u *ConnString) String() string {
+	return u.Original
+}
+
+// ConnectMode informs the driver on how to connect
+// to the server.
+type ConnectMode uint8
+
+// ConnectMode constants.
+const (
+	AutoConnect ConnectMode = iota
+	SingleConnect
+)
+
+type parser struct {
+	ConnString
+}
+
+func (p *parser) parse(original string) error {
+	p.Original = original
+	uri := original
+
+	var err error
+	var isSRV bool
+	if strings.HasPrefix(uri, "mongodb+srv://") {
+		isSRV = true
+		// remove the scheme
+		uri = uri[14:]
+	} else if strings.HasPrefix(uri, "mongodb://") {
+		// remove the scheme
+		uri = uri[10:]
+	} else {
+		return fmt.Errorf("scheme must be \"mongodb\" or \"mongodb+srv\"")
+	}
+
+	if idx := strings.Index(uri, "@"); idx != -1 {
+		userInfo := uri[:idx]
+		uri = uri[idx+1:]
+
+		username := userInfo
+		var password string
+
+		if idx := strings.Index(userInfo, ":"); idx != -1 {
+			username = userInfo[:idx]
+			password = userInfo[idx+1:]
+			p.PasswordSet = true
+		}
+
+		if len(username) > 1 {
+			if strings.Contains(username, "/") {
+				return fmt.Errorf("unescaped slash in username")
+			}
+		}
+
+		p.Username, err = url.QueryUnescape(username)
+		if err != nil {
+			return internal.WrapErrorf(err, "invalid username")
+		}
+		if len(password) > 1 {
+			if strings.Contains(password, ":") {
+				return fmt.Errorf("unescaped colon in password")
+			}
+			if strings.Contains(password, "/") {
+				return fmt.Errorf("unescaped slash in password")
+			}
+			p.Password, err = url.QueryUnescape(password)
+			if err != nil {
+				return internal.WrapErrorf(err, "invalid password")
+			}
+		}
+	}
+
+	// fetch the hosts field
+	hosts := uri
+	if idx := strings.IndexAny(uri, "/?@"); idx != -1 {
+		if uri[idx] == '@' {
+			return fmt.Errorf("unescaped @ sign in user info")
+		}
+		if uri[idx] == '?' {
+			return fmt.Errorf("must have a / before the query ?")
+		}
+		hosts = uri[:idx]
+	}
+
+	var connectionArgsFromTXT []string
+	parsedHosts := strings.Split(hosts, ",")
+
+	if isSRV {
+		parsedHosts = strings.Split(hosts, ",")
+		if len(parsedHosts) != 1 {
+			return fmt.Errorf("URI with SRV must include one and only one hostname")
+		}
+		parsedHosts, err = fetchSeedlistFromSRV(parsedHosts[0])
+		if err != nil {
+			return err
+		}
+
+		// error ignored because finding a TXT record should not be
+		// considered an error.
+		recordsFromTXT, _ := net.LookupTXT(hosts)
+
+		// This is a temporary fix to get around bug https://github.com/golang/go/issues/21472.
+		// It will currently incorrectly concatenate multiple TXT records to one
+		// on windows.
+		if runtime.GOOS == "windows" {
+			recordsFromTXT = []string{strings.Join(recordsFromTXT, "")}
+		}
+
+		if len(recordsFromTXT) > 1 {
+			return errors.New("multiple records from TXT not supported")
+		}
+		if len(recordsFromTXT) > 0 {
+			connectionArgsFromTXT = strings.FieldsFunc(recordsFromTXT[0], func(r rune) bool { return r == ';' || r == '&' })
+
+			err := validateTXTResult(connectionArgsFromTXT)
+			if err != nil {
+				return err
+			}
+
+		}
+
+		// SSL is enabled by default for SRV, but can be manually disabled with "ssl=false".
+		p.SSL = true
+		p.SSLSet = true
+	}
+
+	for _, host := range parsedHosts {
+		err = p.addHost(host)
+		if err != nil {
+			return internal.WrapErrorf(err, "invalid host \"%s\"", host)
+		}
+	}
+	if len(p.Hosts) == 0 {
+		return fmt.Errorf("must have at least 1 host")
+	}
+
+	uri = uri[len(hosts):]
+
+	extractedDatabase, err := extractDatabaseFromURI(uri)
+	if err != nil {
+		return err
+	}
+
+	uri = extractedDatabase.uri
+	p.Database = extractedDatabase.db
+
+	connectionArgsFromQueryString, err := extractQueryArgsFromURI(uri)
+	connectionArgPairs := append(connectionArgsFromTXT, connectionArgsFromQueryString...)
+
+	for _, pair := range connectionArgPairs {
+		err = p.addOption(pair)
+		if err != nil {
+			return err
+		}
+	}
+
+	err = p.setDefaultAuthParams(extractedDatabase.db)
+	if err != nil {
+		return err
+	}
+
+	err = p.validateAuth()
+	if err != nil {
+		return err
+	}
+
+	// Check for invalid write concern (i.e. w=0 and j=true)
+	if p.WNumberSet && p.WNumber == 0 && p.JSet && p.J {
+		return writeconcern.ErrInconsistent
+	}
+
+	// If WTimeout was set from manual options passed in, set WTImeoutSet to true.
+	if p.WTimeoutSetFromOption {
+		p.WTimeoutSet = true
+	}
+
+	return nil
+}
+
+func (p *parser) setDefaultAuthParams(dbName string) error {
+	switch strings.ToLower(p.AuthMechanism) {
+	case "plain":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "$external"
+			}
+		}
+	case "gssapi":
+		if p.AuthMechanismProperties == nil {
+			p.AuthMechanismProperties = map[string]string{
+				"SERVICE_NAME": "mongodb",
+			}
+		} else if v, ok := p.AuthMechanismProperties["SERVICE_NAME"]; !ok || v == "" {
+			p.AuthMechanismProperties["SERVICE_NAME"] = "mongodb"
+		}
+		fallthrough
+	case "mongodb-x509":
+		if p.AuthSource == "" {
+			p.AuthSource = "$external"
+		} else if p.AuthSource != "$external" {
+			return fmt.Errorf("auth source must be $external")
+		}
+	case "mongodb-cr":
+		fallthrough
+	case "scram-sha-1":
+		fallthrough
+	case "scram-sha-256":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "admin"
+			}
+		}
+	case "":
+		if p.AuthSource == "" {
+			p.AuthSource = dbName
+			if p.AuthSource == "" {
+				p.AuthSource = "admin"
+			}
+		}
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func (p *parser) validateAuth() error {
+	switch strings.ToLower(p.AuthMechanism) {
+	case "mongodb-cr":
+		if p.Username == "" {
+			return fmt.Errorf("username required for MONGO-CR")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for MONGO-CR")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-CR cannot have mechanism properties")
+		}
+	case "mongodb-x509":
+		if p.Password != "" {
+			return fmt.Errorf("password cannot be specified for MONGO-X509")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("MONGO-X509 cannot have mechanism properties")
+		}
+	case "gssapi":
+		if p.Username == "" {
+			return fmt.Errorf("username required for GSSAPI")
+		}
+		for k := range p.AuthMechanismProperties {
+			if k != "SERVICE_NAME" && k != "CANONICALIZE_HOST_NAME" && k != "SERVICE_REALM" {
+				return fmt.Errorf("invalid auth property for GSSAPI")
+			}
+		}
+	case "plain":
+		if p.Username == "" {
+			return fmt.Errorf("username required for PLAIN")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for PLAIN")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("PLAIN cannot have mechanism properties")
+		}
+	case "scram-sha-1":
+		if p.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-1")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-1")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-1 cannot have mechanism properties")
+		}
+	case "scram-sha-256":
+		if p.Username == "" {
+			return fmt.Errorf("username required for SCRAM-SHA-256")
+		}
+		if p.Password == "" {
+			return fmt.Errorf("password required for SCRAM-SHA-256")
+		}
+		if p.AuthMechanismProperties != nil {
+			return fmt.Errorf("SCRAM-SHA-256 cannot have mechanism properties")
+		}
+	case "":
+	default:
+		return fmt.Errorf("invalid auth mechanism")
+	}
+	return nil
+}
+
+func fetchSeedlistFromSRV(host string) ([]string, error) {
+	var err error
+
+	_, _, err = net.SplitHostPort(host)
+
+	if err == nil {
+		// we were able to successfully extract a port from the host,
+		// but should not be able to when using SRV
+		return nil, fmt.Errorf("URI with srv must not include a port number")
+	}
+
+	_, addresses, err := net.LookupSRV("mongodb", "tcp", host)
+	if err != nil {
+		return nil, err
+	}
+	parsedHosts := make([]string, len(addresses))
+	for i, address := range addresses {
+		trimmedAddressTarget := strings.TrimSuffix(address.Target, ".")
+		err := validateSRVResult(trimmedAddressTarget, host)
+		if err != nil {
+			return nil, err
+		}
+		parsedHosts[i] = fmt.Sprintf("%s:%d", trimmedAddressTarget, address.Port)
+	}
+
+	return parsedHosts, nil
+}
+
+func (p *parser) addHost(host string) error {
+	if host == "" {
+		return nil
+	}
+	host, err := url.QueryUnescape(host)
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid host \"%s\"", host)
+	}
+
+	_, port, err := net.SplitHostPort(host)
+	// this is unfortunate that SplitHostPort actually requires
+	// a port to exist.
+	if err != nil {
+		if addrError, ok := err.(*net.AddrError); !ok || addrError.Err != "missing port in address" {
+			return err
+		}
+	}
+
+	if port != "" {
+		d, err := strconv.Atoi(port)
+		if err != nil {
+			return internal.WrapErrorf(err, "port must be an integer")
+		}
+		if d <= 0 || d >= 65536 {
+			return fmt.Errorf("port must be in the range [1, 65535]")
+		}
+	}
+	p.Hosts = append(p.Hosts, host)
+	return nil
+}
+
+func (p *parser) addOption(pair string) error {
+	kv := strings.SplitN(pair, "=", 2)
+	if len(kv) != 2 || kv[0] == "" {
+		return fmt.Errorf("invalid option")
+	}
+
+	key, err := url.QueryUnescape(kv[0])
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid option key \"%s\"", kv[0])
+	}
+
+	value, err := url.QueryUnescape(kv[1])
+	if err != nil {
+		return internal.WrapErrorf(err, "invalid option value \"%s\"", kv[1])
+	}
+
+	lowerKey := strings.ToLower(key)
+	switch lowerKey {
+	case "appname":
+		p.AppName = value
+	case "authmechanism":
+		p.AuthMechanism = value
+	case "authmechanismproperties":
+		p.AuthMechanismProperties = make(map[string]string)
+		pairs := strings.Split(value, ",")
+		for _, pair := range pairs {
+			kv := strings.SplitN(pair, ":", 2)
+			if len(kv) != 2 || kv[0] == "" {
+				return fmt.Errorf("invalid authMechanism property")
+			}
+			p.AuthMechanismProperties[kv[0]] = kv[1]
+		}
+	case "authsource":
+		p.AuthSource = value
+	case "compressors":
+		compressors := strings.Split(value, ",")
+		if len(compressors) < 1 {
+			return fmt.Errorf("must have at least 1 compressor")
+		}
+		p.Compressors = compressors
+	case "connect":
+		switch strings.ToLower(value) {
+		case "automatic":
+		case "direct":
+			p.Connect = SingleConnect
+		default:
+			return fmt.Errorf("invalid 'connect' value: %s", value)
+		}
+
+		p.ConnectSet = true
+	case "connecttimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.ConnectTimeout = time.Duration(n) * time.Millisecond
+		p.ConnectTimeoutSet = true
+	case "heartbeatintervalms", "heartbeatfrequencyms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.HeartbeatInterval = time.Duration(n) * time.Millisecond
+		p.HeartbeatIntervalSet = true
+	case "journal":
+		switch value {
+		case "true":
+			p.J = true
+		case "false":
+			p.J = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.JSet = true
+	case "localthresholdms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.LocalThreshold = time.Duration(n) * time.Millisecond
+		p.LocalThresholdSet = true
+	case "maxidletimems":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxConnIdleTime = time.Duration(n) * time.Millisecond
+		p.MaxConnIdleTimeSet = true
+	case "maxpoolsize":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxPoolSize = uint16(n)
+		p.MaxPoolSizeSet = true
+	case "readconcernlevel":
+		p.ReadConcernLevel = value
+	case "readpreference":
+		p.ReadPreference = value
+	case "readpreferencetags":
+		tags := make(map[string]string)
+		items := strings.Split(value, ",")
+		for _, item := range items {
+			parts := strings.Split(item, ":")
+			if len(parts) != 2 {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+			tags[parts[0]] = parts[1]
+		}
+		p.ReadPreferenceTagSets = append(p.ReadPreferenceTagSets, tags)
+	case "maxstaleness":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.MaxStaleness = time.Duration(n) * time.Second
+		p.MaxStalenessSet = true
+	case "replicaset":
+		p.ReplicaSet = value
+	case "retrywrites":
+		p.RetryWrites = value == "true"
+		p.RetryWritesSet = true
+	case "serverselectiontimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.ServerSelectionTimeout = time.Duration(n) * time.Millisecond
+		p.ServerSelectionTimeoutSet = true
+	case "sockettimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.SocketTimeout = time.Duration(n) * time.Millisecond
+		p.SocketTimeoutSet = true
+	case "ssl":
+		switch value {
+		case "true":
+			p.SSL = true
+		case "false":
+			p.SSL = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.SSLSet = true
+	case "sslclientcertificatekeyfile":
+		p.SSL = true
+		p.SSLSet = true
+		p.SSLClientCertificateKeyFile = value
+		p.SSLClientCertificateKeyFileSet = true
+	case "sslclientcertificatekeypassword":
+		p.SSLClientCertificateKeyPassword = func() string { return value }
+		p.SSLClientCertificateKeyPasswordSet = true
+	case "sslinsecure":
+		switch value {
+		case "true":
+			p.SSLInsecure = true
+		case "false":
+			p.SSLInsecure = false
+		default:
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		p.SSLInsecureSet = true
+	case "sslcertificateauthorityfile":
+		p.SSL = true
+		p.SSLSet = true
+		p.SSLCaFile = value
+		p.SSLCaFileSet = true
+	case "w":
+		if w, err := strconv.Atoi(value); err == nil {
+			if w < 0 {
+				return fmt.Errorf("invalid value for %s: %s", key, value)
+			}
+
+			p.WNumber = w
+			p.WNumberSet = true
+			p.WString = ""
+			break
+		}
+
+		p.WString = value
+		p.WNumberSet = false
+
+	case "wtimeoutms":
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.WTimeout = time.Duration(n) * time.Millisecond
+		p.WTimeoutSet = true
+	case "wtimeout":
+		// Defer to wtimeoutms, but not to a manually-set option.
+		if p.WTimeoutSet {
+			break
+		}
+		n, err := strconv.Atoi(value)
+		if err != nil || n < 0 {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+		p.WTimeout = time.Duration(n) * time.Millisecond
+	case "zlibcompressionlevel":
+		level, err := strconv.Atoi(value)
+		if err != nil || (level < -1 || level > 9) {
+			return fmt.Errorf("invalid value for %s: %s", key, value)
+		}
+
+		if level == -1 {
+			level = wiremessage.DefaultZlibLevel
+		}
+		p.ZlibLevel = level
+	default:
+		if p.UnknownOptions == nil {
+			p.UnknownOptions = make(map[string][]string)
+		}
+		p.UnknownOptions[lowerKey] = append(p.UnknownOptions[lowerKey], value)
+	}
+
+	if p.Options == nil {
+		p.Options = make(map[string][]string)
+	}
+	p.Options[lowerKey] = append(p.Options[lowerKey], value)
+
+	return nil
+}
+
+func validateSRVResult(recordFromSRV, inputHostName string) error {
+	separatedInputDomain := strings.Split(inputHostName, ".")
+	separatedRecord := strings.Split(recordFromSRV, ".")
+	if len(separatedRecord) < 2 {
+		return errors.New("DNS name must contain at least 2 labels")
+	}
+	if len(separatedRecord) < len(separatedInputDomain) {
+		return errors.New("Domain suffix from SRV record not matched input domain")
+	}
+
+	inputDomainSuffix := separatedInputDomain[1:]
+	domainSuffixOffset := len(separatedRecord) - (len(separatedInputDomain) - 1)
+
+	recordDomainSuffix := separatedRecord[domainSuffixOffset:]
+	for ix, label := range inputDomainSuffix {
+		if label != recordDomainSuffix[ix] {
+			return errors.New("Domain suffix from SRV record not matched input domain")
+		}
+	}
+	return nil
+}
+
+var allowedTXTOptions = map[string]struct{}{
+	"authsource": {},
+	"replicaset": {},
+}
+
+func validateTXTResult(paramsFromTXT []string) error {
+	for _, param := range paramsFromTXT {
+		kv := strings.SplitN(param, "=", 2)
+		if len(kv) != 2 {
+			return errors.New("Invalid TXT record")
+		}
+		key := strings.ToLower(kv[0])
+		if _, ok := allowedTXTOptions[key]; !ok {
+			return fmt.Errorf("Cannot specify option '%s' in TXT record", kv[0])
+		}
+	}
+	return nil
+}
+
+func extractQueryArgsFromURI(uri string) ([]string, error) {
+	if len(uri) == 0 {
+		return nil, nil
+	}
+
+	if uri[0] != '?' {
+		return nil, errors.New("must have a ? separator between path and query")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return nil, nil
+	}
+	return strings.FieldsFunc(uri, func(r rune) bool { return r == ';' || r == '&' }), nil
+
+}
+
+type extractedDatabase struct {
+	uri string
+	db  string
+}
+
+// extractDatabaseFromURI is a helper function to retrieve information about
+// the database from the passed in URI. It accepts as an argument the currently
+// parsed URI and returns the remainder of the uri, the database it found,
+// and any error it encounters while parsing.
+func extractDatabaseFromURI(uri string) (extractedDatabase, error) {
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	if uri[0] != '/' {
+		return extractedDatabase{}, errors.New("must have a / separator between hosts and path")
+	}
+
+	uri = uri[1:]
+	if len(uri) == 0 {
+		return extractedDatabase{}, nil
+	}
+
+	database := uri
+	if idx := strings.IndexRune(uri, '?'); idx != -1 {
+		database = uri[:idx]
+	}
+
+	escapedDatabase, err := url.QueryUnescape(database)
+	if err != nil {
+		return extractedDatabase{}, internal.WrapErrorf(err, "invalid database \"%s\"", database)
+	}
+
+	uri = uri[len(database):]
+
+	return extractedDatabase{
+		uri: uri,
+		db:  escapedDatabase,
+	}, nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go
new file mode 100644
index 0000000..758b112
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/description.go
@@ -0,0 +1,10 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// Unknown is an unknown server or topology kind.
+const Unknown = 0
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go
new file mode 100644
index 0000000..f0236c0
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/feature.go
@@ -0,0 +1,36 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+)
+
+// MaxStalenessSupported returns an error if the given server version
+// does not support max staleness.
+func MaxStalenessSupported(wireVersion *VersionRange) error {
+	if wireVersion != nil && wireVersion.Max < 5 {
+		return fmt.Errorf("max staleness is only supported for servers 3.4 or newer")
+	}
+
+	return nil
+}
+
+// ScramSHA1Supported returns an error if the given server version
+// does not support scram-sha-1.
+func ScramSHA1Supported(wireVersion *VersionRange) error {
+	if wireVersion != nil && wireVersion.Max < 3 {
+		return fmt.Errorf("SCRAM-SHA-1 is only supported for servers 3.0 or newer")
+	}
+
+	return nil
+}
+
+// SessionsSupported returns true of the given server version indicates that it supports sessions.
+func SessionsSupported(wireVersion *VersionRange) bool {
+	return wireVersion != nil && wireVersion.Max >= 6
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go
new file mode 100644
index 0000000..d6857ce
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server.go
@@ -0,0 +1,144 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/tag"
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+	"github.com/mongodb/mongo-go-driver/x/network/result"
+)
+
+// UnsetRTT is the unset value for a round trip time.
+const UnsetRTT = -1 * time.Millisecond
+
+// SelectedServer represents a selected server that is a member of a topology.
+type SelectedServer struct {
+	Server
+	Kind TopologyKind
+}
+
+// Server represents a description of a server. This is created from an isMaster
+// command.
+type Server struct {
+	Addr address.Address
+
+	AverageRTT            time.Duration
+	AverageRTTSet         bool
+	Compression           []string // compression methods returned by server
+	CanonicalAddr         address.Address
+	ElectionID            primitive.ObjectID
+	HeartbeatInterval     time.Duration
+	LastError             error
+	LastUpdateTime        time.Time
+	LastWriteTime         time.Time
+	MaxBatchCount         uint32
+	MaxDocumentSize       uint32
+	MaxMessageSize        uint32
+	Members               []address.Address
+	ReadOnly              bool
+	SessionTimeoutMinutes uint32
+	SetName               string
+	SetVersion            uint32
+	Tags                  tag.Set
+	Kind                  ServerKind
+	WireVersion           *VersionRange
+
+	SaslSupportedMechs []string // user-specific from server handshake
+}
+
+// NewServer creates a new server description from the given parameters.
+func NewServer(addr address.Address, isMaster result.IsMaster) Server {
+	i := Server{
+		Addr: addr,
+
+		CanonicalAddr:         address.Address(isMaster.Me).Canonicalize(),
+		Compression:           isMaster.Compression,
+		ElectionID:            isMaster.ElectionID,
+		LastUpdateTime:        time.Now().UTC(),
+		LastWriteTime:         isMaster.LastWriteTimestamp,
+		MaxBatchCount:         isMaster.MaxWriteBatchSize,
+		MaxDocumentSize:       isMaster.MaxBSONObjectSize,
+		MaxMessageSize:        isMaster.MaxMessageSizeBytes,
+		SaslSupportedMechs:    isMaster.SaslSupportedMechs,
+		SessionTimeoutMinutes: isMaster.LogicalSessionTimeoutMinutes,
+		SetName:               isMaster.SetName,
+		SetVersion:            isMaster.SetVersion,
+		Tags:                  tag.NewTagSetFromMap(isMaster.Tags),
+	}
+
+	if i.CanonicalAddr == "" {
+		i.CanonicalAddr = addr
+	}
+
+	if isMaster.OK != 1 {
+		i.LastError = fmt.Errorf("not ok")
+		return i
+	}
+
+	for _, host := range isMaster.Hosts {
+		i.Members = append(i.Members, address.Address(host).Canonicalize())
+	}
+
+	for _, passive := range isMaster.Passives {
+		i.Members = append(i.Members, address.Address(passive).Canonicalize())
+	}
+
+	for _, arbiter := range isMaster.Arbiters {
+		i.Members = append(i.Members, address.Address(arbiter).Canonicalize())
+	}
+
+	i.Kind = Standalone
+
+	if isMaster.IsReplicaSet {
+		i.Kind = RSGhost
+	} else if isMaster.SetName != "" {
+		if isMaster.IsMaster {
+			i.Kind = RSPrimary
+		} else if isMaster.Hidden {
+			i.Kind = RSMember
+		} else if isMaster.Secondary {
+			i.Kind = RSSecondary
+		} else if isMaster.ArbiterOnly {
+			i.Kind = RSArbiter
+		} else {
+			i.Kind = RSMember
+		}
+	} else if isMaster.Msg == "isdbgrid" {
+		i.Kind = Mongos
+	}
+
+	i.WireVersion = &VersionRange{
+		Min: isMaster.MinWireVersion,
+		Max: isMaster.MaxWireVersion,
+	}
+
+	return i
+}
+
+// SetAverageRTT sets the average round trip time for this server description.
+func (s Server) SetAverageRTT(rtt time.Duration) Server {
+	s.AverageRTT = rtt
+	if rtt == UnsetRTT {
+		s.AverageRTTSet = false
+	} else {
+		s.AverageRTTSet = true
+	}
+
+	return s
+}
+
+// DataBearing returns true if the server is a data bearing server.
+func (s Server) DataBearing() bool {
+	return s.Kind == RSPrimary ||
+		s.Kind == RSSecondary ||
+		s.Kind == Mongos ||
+		s.Kind == Standalone
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go
new file mode 100644
index 0000000..657791b
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_kind.go
@@ -0,0 +1,43 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// ServerKind represents the type of a server.
+type ServerKind uint32
+
+// These constants are the possible types of servers.
+const (
+	Standalone  ServerKind = 1
+	RSMember    ServerKind = 2
+	RSPrimary   ServerKind = 4 + RSMember
+	RSSecondary ServerKind = 8 + RSMember
+	RSArbiter   ServerKind = 16 + RSMember
+	RSGhost     ServerKind = 32 + RSMember
+	Mongos      ServerKind = 256
+)
+
+// String implements the fmt.Stringer interface.
+func (kind ServerKind) String() string {
+	switch kind {
+	case Standalone:
+		return "Standalone"
+	case RSMember:
+		return "RSOther"
+	case RSPrimary:
+		return "RSPrimary"
+	case RSSecondary:
+		return "RSSecondary"
+	case RSArbiter:
+		return "RSArbiter"
+	case RSGhost:
+		return "RSGhost"
+	case Mongos:
+		return "Mongos"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go
new file mode 100644
index 0000000..9c31b6e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/server_selector.go
@@ -0,0 +1,279 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"fmt"
+	"math"
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/mongo/readpref"
+	"github.com/mongodb/mongo-go-driver/tag"
+)
+
+// ServerSelector is an interface implemented by types that can select a server given a
+// topology description.
+type ServerSelector interface {
+	SelectServer(Topology, []Server) ([]Server, error)
+}
+
+// ServerSelectorFunc is a function that can be used as a ServerSelector.
+type ServerSelectorFunc func(Topology, []Server) ([]Server, error)
+
+// SelectServer implements the ServerSelector interface.
+func (ssf ServerSelectorFunc) SelectServer(t Topology, s []Server) ([]Server, error) {
+	return ssf(t, s)
+}
+
+type compositeSelector struct {
+	selectors []ServerSelector
+}
+
+// CompositeSelector combines multiple selectors into a single selector.
+func CompositeSelector(selectors []ServerSelector) ServerSelector {
+	return &compositeSelector{selectors: selectors}
+}
+
+func (cs *compositeSelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	var err error
+	for _, sel := range cs.selectors {
+		candidates, err = sel.SelectServer(t, candidates)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return candidates, nil
+}
+
+type latencySelector struct {
+	latency time.Duration
+}
+
+// LatencySelector creates a ServerSelector which selects servers based on their latency.
+func LatencySelector(latency time.Duration) ServerSelector {
+	return &latencySelector{latency: latency}
+}
+
+func (ls *latencySelector) SelectServer(t Topology, candidates []Server) ([]Server, error) {
+	if ls.latency < 0 {
+		return candidates, nil
+	}
+
+	switch len(candidates) {
+	case 0, 1:
+		return candidates, nil
+	default:
+		min := time.Duration(math.MaxInt64)
+		for _, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT < min {
+					min = candidate.AverageRTT
+				}
+			}
+		}
+
+		if min == math.MaxInt64 {
+			return candidates, nil
+		}
+
+		max := min + ls.latency
+
+		var result []Server
+		for _, candidate := range candidates {
+			if candidate.AverageRTTSet {
+				if candidate.AverageRTT <= max {
+					result = append(result, candidate)
+				}
+			}
+		}
+
+		return result, nil
+	}
+}
+
+// WriteSelector selects all the writable servers.
+func WriteSelector() ServerSelector {
+	return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) {
+		switch t.Kind {
+		case Single:
+			return candidates, nil
+		default:
+			result := []Server{}
+			for _, candidate := range candidates {
+				switch candidate.Kind {
+				case Mongos, RSPrimary, Standalone:
+					result = append(result, candidate)
+				}
+			}
+			return result, nil
+		}
+	})
+}
+
+// ReadPrefSelector selects servers based on the provided read preference.
+func ReadPrefSelector(rp *readpref.ReadPref) ServerSelector {
+	return ServerSelectorFunc(func(t Topology, candidates []Server) ([]Server, error) {
+		if _, set := rp.MaxStaleness(); set {
+			for _, s := range candidates {
+				if s.Kind != Unknown {
+					if err := MaxStalenessSupported(s.WireVersion); err != nil {
+						return nil, err
+					}
+				}
+			}
+		}
+
+		switch t.Kind {
+		case Single:
+			return candidates, nil
+		case ReplicaSetNoPrimary, ReplicaSetWithPrimary:
+			return selectForReplicaSet(rp, t, candidates)
+		case Sharded:
+			return selectByKind(candidates, Mongos), nil
+		}
+
+		return nil, nil
+	})
+}
+
+func selectForReplicaSet(rp *readpref.ReadPref, t Topology, candidates []Server) ([]Server, error) {
+	if err := verifyMaxStaleness(rp, t); err != nil {
+		return nil, err
+	}
+
+	switch rp.Mode() {
+	case readpref.PrimaryMode:
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.PrimaryPreferredMode:
+		selected := selectByKind(candidates, RSPrimary)
+
+		if len(selected) == 0 {
+			selected = selectSecondaries(rp, candidates)
+			return selectByTagSet(selected, rp.TagSets()), nil
+		}
+
+		return selected, nil
+	case readpref.SecondaryPreferredMode:
+		selected := selectSecondaries(rp, candidates)
+		selected = selectByTagSet(selected, rp.TagSets())
+		if len(selected) > 0 {
+			return selected, nil
+		}
+		return selectByKind(candidates, RSPrimary), nil
+	case readpref.SecondaryMode:
+		selected := selectSecondaries(rp, candidates)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	case readpref.NearestMode:
+		selected := selectByKind(candidates, RSPrimary)
+		selected = append(selected, selectSecondaries(rp, candidates)...)
+		return selectByTagSet(selected, rp.TagSets()), nil
+	}
+
+	return nil, fmt.Errorf("unsupported mode: %d", rp.Mode())
+}
+
+func selectSecondaries(rp *readpref.ReadPref, candidates []Server) []Server {
+	secondaries := selectByKind(candidates, RSSecondary)
+	if len(secondaries) == 0 {
+		return secondaries
+	}
+	if maxStaleness, set := rp.MaxStaleness(); set {
+		primaries := selectByKind(candidates, RSPrimary)
+		if len(primaries) == 0 {
+			baseTime := secondaries[0].LastWriteTime
+			for i := 1; i < len(secondaries); i++ {
+				if secondaries[i].LastWriteTime.After(baseTime) {
+					baseTime = secondaries[i].LastWriteTime
+				}
+			}
+
+			var selected []Server
+			for _, secondary := range secondaries {
+				estimatedStaleness := baseTime.Sub(secondary.LastWriteTime) + secondary.HeartbeatInterval
+				if estimatedStaleness <= maxStaleness {
+					selected = append(selected, secondary)
+				}
+			}
+
+			return selected
+		}
+
+		primary := primaries[0]
+
+		var selected []Server
+		for _, secondary := range secondaries {
+			estimatedStaleness := secondary.LastUpdateTime.Sub(secondary.LastWriteTime) - primary.LastUpdateTime.Sub(primary.LastWriteTime) + secondary.HeartbeatInterval
+			if estimatedStaleness <= maxStaleness {
+				selected = append(selected, secondary)
+			}
+		}
+		return selected
+	}
+
+	return secondaries
+}
+
+func selectByTagSet(candidates []Server, tagSets []tag.Set) []Server {
+	if len(tagSets) == 0 {
+		return candidates
+	}
+
+	for _, ts := range tagSets {
+		var results []Server
+		for _, s := range candidates {
+			if len(s.Tags) > 0 && s.Tags.ContainsAll(ts) {
+				results = append(results, s)
+			}
+		}
+
+		if len(results) > 0 {
+			return results
+		}
+	}
+
+	return []Server{}
+}
+
+func selectByKind(candidates []Server, kind ServerKind) []Server {
+	var result []Server
+	for _, s := range candidates {
+		if s.Kind == kind {
+			result = append(result, s)
+		}
+	}
+
+	return result
+}
+
+func verifyMaxStaleness(rp *readpref.ReadPref, t Topology) error {
+	maxStaleness, set := rp.MaxStaleness()
+	if !set {
+		return nil
+	}
+
+	if maxStaleness < 90*time.Second {
+		return fmt.Errorf("max staleness (%s) must be greater than or equal to 90s", maxStaleness)
+	}
+
+	if len(t.Servers) < 1 {
+		// Maybe we should return an error here instead?
+		return nil
+	}
+
+	// we'll assume all candidates have the same heartbeat interval.
+	s := t.Servers[0]
+	idleWritePeriod := 10 * time.Second
+
+	if maxStaleness < s.HeartbeatInterval+idleWritePeriod {
+		return fmt.Errorf(
+			"max staleness (%s) must be greater than or equal to the heartbeat interval (%s) plus idle write period (%s)",
+			maxStaleness, s.HeartbeatInterval, idleWritePeriod,
+		)
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go
new file mode 100644
index 0000000..caf447d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology.go
@@ -0,0 +1,89 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import (
+	"sort"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/x/network/address"
+)
+
+// Topology represents a description of a mongodb topology
+type Topology struct {
+	Servers               []Server
+	Kind                  TopologyKind
+	SessionTimeoutMinutes uint32
+}
+
+// Server returns the server for the given address. Returns false if the server
+// could not be found.
+func (t Topology) Server(addr address.Address) (Server, bool) {
+	for _, server := range t.Servers {
+		if server.Addr.String() == addr.String() {
+			return server, true
+		}
+	}
+	return Server{}, false
+}
+
+// TopologyDiff is the difference between two different topology descriptions.
+type TopologyDiff struct {
+	Added   []Server
+	Removed []Server
+}
+
+// DiffTopology compares the two topology descriptions and returns the difference.
+func DiffTopology(old, new Topology) TopologyDiff {
+	var diff TopologyDiff
+
+	// TODO: do this without sorting...
+	oldServers := serverSorter(old.Servers)
+	newServers := serverSorter(new.Servers)
+
+	sort.Sort(oldServers)
+	sort.Sort(newServers)
+
+	i := 0
+	j := 0
+	for {
+		if i < len(oldServers) && j < len(newServers) {
+			comp := strings.Compare(oldServers[i].Addr.String(), newServers[j].Addr.String())
+			switch comp {
+			case 1:
+				//left is bigger than
+				diff.Added = append(diff.Added, newServers[j])
+				j++
+			case -1:
+				// right is bigger
+				diff.Removed = append(diff.Removed, oldServers[i])
+				i++
+			case 0:
+				i++
+				j++
+			}
+		} else if i < len(oldServers) {
+			diff.Removed = append(diff.Removed, oldServers[i])
+			i++
+		} else if j < len(newServers) {
+			diff.Added = append(diff.Added, newServers[j])
+			j++
+		} else {
+			break
+		}
+	}
+
+	return diff
+}
+
+type serverSorter []Server
+
+func (ss serverSorter) Len() int      { return len(ss) }
+func (ss serverSorter) Swap(i, j int) { ss[i], ss[j] = ss[j], ss[i] }
+func (ss serverSorter) Less(i, j int) bool {
+	return strings.Compare(ss[i].Addr.String(), ss[j].Addr.String()) < 0
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go
new file mode 100644
index 0000000..69f8177
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/topology_kind.go
@@ -0,0 +1,37 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+// TopologyKind represents a specific topology configuration.
+type TopologyKind uint32
+
+// These constants are the available topology configurations.
+const (
+	Single                TopologyKind = 1
+	ReplicaSet            TopologyKind = 2
+	ReplicaSetNoPrimary   TopologyKind = 4 + ReplicaSet
+	ReplicaSetWithPrimary TopologyKind = 8 + ReplicaSet
+	Sharded               TopologyKind = 256
+)
+
+// String implements the fmt.Stringer interface.
+func (kind TopologyKind) String() string {
+	switch kind {
+	case Single:
+		return "Single"
+	case ReplicaSet:
+		return "ReplicaSet"
+	case ReplicaSetNoPrimary:
+		return "ReplicaSetNoPrimary"
+	case ReplicaSetWithPrimary:
+		return "ReplicaSetWithPrimary"
+	case Sharded:
+		return "Sharded"
+	}
+
+	return "Unknown"
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go
new file mode 100644
index 0000000..60cda4e
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version.go
@@ -0,0 +1,44 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import "strconv"
+
+// Version represents a software version.
+type Version struct {
+	Desc  string
+	Parts []uint8
+}
+
+// AtLeast ensures that the version is at least as large as the "other" version.
+func (v Version) AtLeast(other ...uint8) bool {
+	for i := range other {
+		if i == len(v.Parts) {
+			return false
+		}
+		if v.Parts[i] < other[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// String provides the string represtation of the Version.
+func (v Version) String() string {
+	if v.Desc == "" {
+		var s string
+		for i, p := range v.Parts {
+			if i != 0 {
+				s += "."
+			}
+			s += strconv.Itoa(int(p))
+		}
+		return s
+	}
+
+	return v.Desc
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go
new file mode 100644
index 0000000..984dff8
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/description/version_range.go
@@ -0,0 +1,31 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package description
+
+import "fmt"
+
+// VersionRange represents a range of versions.
+type VersionRange struct {
+	Min int32
+	Max int32
+}
+
+// NewVersionRange creates a new VersionRange given a min and a max.
+func NewVersionRange(min, max int32) VersionRange {
+	return VersionRange{Min: min, Max: max}
+}
+
+// Includes returns a bool indicating whether the supplied integer is included
+// in the range.
+func (vr VersionRange) Includes(v int32) bool {
+	return v >= vr.Min && v <= vr.Max
+}
+
+// String implements the fmt.Stringer interface.
+func (vr VersionRange) String() string {
+	return fmt.Sprintf("[%d, %d]", vr.Min, vr.Max)
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go
new file mode 100644
index 0000000..d317bb7
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/result/result.go
@@ -0,0 +1,173 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package result contains the results from various operations.
+package result
+
+import (
+	"time"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/bson/primitive"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Upsert contains the information for a single upsert.
+type Upsert struct {
+	Index int64       `bson:"index"`
+	ID    interface{} `bson:"_id"`
+}
+
+// Insert is a result from an Insert command.
+type Insert struct {
+	N                 int
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// StartSession is a result from a StartSession command.
+type StartSession struct {
+	ID bsonx.Doc `bson:"id"`
+}
+
+// EndSessions is a result from an EndSessions command.
+type EndSessions struct{}
+
+// Delete is a result from a Delete command.
+type Delete struct {
+	N                 int
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// Update is a result of an Update command.
+type Update struct {
+	MatchedCount      int64              `bson:"n"`
+	ModifiedCount     int64              `bson:"nModified"`
+	Upserted          []Upsert           `bson:"upserted"`
+	WriteErrors       []WriteError       `bson:"writeErrors"`
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// Distinct is a result from a Distinct command.
+type Distinct struct {
+	Values []interface{}
+}
+
+// FindAndModify is a result from a findAndModify command.
+type FindAndModify struct {
+	Value           bson.Raw
+	LastErrorObject struct {
+		UpdatedExisting bool
+		Upserted        interface{}
+	}
+}
+
+// WriteError is an error from a write operation that is not a write concern
+// error.
+type WriteError struct {
+	Index  int
+	Code   int
+	ErrMsg string
+}
+
+// WriteConcernError is an error related to a write concern.
+type WriteConcernError struct {
+	Code    int
+	ErrMsg  string
+	ErrInfo bson.Raw
+}
+
+// ListDatabases is the result from a listDatabases command.
+type ListDatabases struct {
+	Databases []struct {
+		Name       string
+		SizeOnDisk int64 `bson:"sizeOnDisk"`
+		Empty      bool
+	}
+	TotalSize int64 `bson:"totalSize"`
+}
+
+// IsMaster is a result of an IsMaster command.
+type IsMaster struct {
+	Arbiters                     []string           `bson:"arbiters,omitempty"`
+	ArbiterOnly                  bool               `bson:"arbiterOnly,omitempty"`
+	ClusterTime                  bson.Raw           `bson:"$clusterTime,omitempty"`
+	Compression                  []string           `bson:"compression,omitempty"`
+	ElectionID                   primitive.ObjectID `bson:"electionId,omitempty"`
+	Hidden                       bool               `bson:"hidden,omitempty"`
+	Hosts                        []string           `bson:"hosts,omitempty"`
+	IsMaster                     bool               `bson:"ismaster,omitempty"`
+	IsReplicaSet                 bool               `bson:"isreplicaset,omitempty"`
+	LastWriteTimestamp           time.Time          `bson:"lastWriteDate,omitempty"`
+	LogicalSessionTimeoutMinutes uint32             `bson:"logicalSessionTimeoutMinutes,omitempty"`
+	MaxBSONObjectSize            uint32             `bson:"maxBsonObjectSize,omitempty"`
+	MaxMessageSizeBytes          uint32             `bson:"maxMessageSizeBytes,omitempty"`
+	MaxWriteBatchSize            uint32             `bson:"maxWriteBatchSize,omitempty"`
+	Me                           string             `bson:"me,omitempty"`
+	MaxWireVersion               int32              `bson:"maxWireVersion,omitempty"`
+	MinWireVersion               int32              `bson:"minWireVersion,omitempty"`
+	Msg                          string             `bson:"msg,omitempty"`
+	OK                           int32              `bson:"ok"`
+	Passives                     []string           `bson:"passives,omitempty"`
+	ReadOnly                     bool               `bson:"readOnly,omitempty"`
+	SaslSupportedMechs           []string           `bson:"saslSupportedMechs,omitempty"`
+	Secondary                    bool               `bson:"secondary,omitempty"`
+	SetName                      string             `bson:"setName,omitempty"`
+	SetVersion                   uint32             `bson:"setVersion,omitempty"`
+	Tags                         map[string]string  `bson:"tags,omitempty"`
+}
+
+// BuildInfo is a result of a BuildInfo command.
+type BuildInfo struct {
+	OK           bool    `bson:"ok"`
+	GitVersion   string  `bson:"gitVersion,omitempty"`
+	Version      string  `bson:"version,omitempty"`
+	VersionArray []uint8 `bson:"versionArray,omitempty"`
+}
+
+// IsZero returns true if the BuildInfo is the zero value.
+func (bi BuildInfo) IsZero() bool {
+	if !bi.OK && bi.GitVersion == "" && bi.Version == "" && bi.VersionArray == nil {
+		return true
+	}
+
+	return false
+}
+
+// GetLastError is a result of a GetLastError command.
+type GetLastError struct {
+	ConnectionID uint32 `bson:"connectionId"`
+}
+
+// KillCursors is a result of a KillCursors command.
+type KillCursors struct {
+	CursorsKilled   []int64 `bson:"cursorsKilled"`
+	CursorsNotFound []int64 `bson:"cursorsNotFound"`
+	CursorsAlive    []int64 `bson:"cursorsAlive"`
+}
+
+// CreateIndexes is a result of a CreateIndexes command.
+type CreateIndexes struct {
+	CreatedCollectionAutomatically bool `bson:"createdCollectionAutomatically"`
+	IndexesBefore                  int  `bson:"numIndexesBefore"`
+	IndexesAfter                   int  `bson:"numIndexesAfter"`
+}
+
+// TransactionResult holds the result of committing or aborting a transaction.
+type TransactionResult struct {
+	WriteConcernError *WriteConcernError `bson:"writeConcernError"`
+}
+
+// BulkWrite holds the result of a bulk write operation.
+type BulkWrite struct {
+	InsertedCount int64
+	MatchedCount  int64
+	ModifiedCount int64
+	DeletedCount  int64
+	UpsertedCount int64
+	UpsertedIDs   map[int64]interface{}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go
new file mode 100644
index 0000000..3d9f784
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/appenders.go
@@ -0,0 +1,20 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+func appendInt32(b []byte, i int32) []byte {
+	return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24))
+}
+
+func appendCString(b []byte, str string) []byte {
+	b = append(b, str...)
+	return append(b, 0x00)
+}
+
+func appendInt64(b []byte, i int64) []byte {
+	return append(b, byte(i), byte(i>>8), byte(i>>16), byte(i>>24), byte(i>>32), byte(i>>40), byte(i>>48), byte(i>>56))
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go
new file mode 100644
index 0000000..8c339a5
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command.go
@@ -0,0 +1,49 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Command represents the OP_COMMAND message of the MongoDB wire protocol.
+type Command struct {
+	MsgHeader   Header
+	Database    string
+	CommandName string
+	Metadata    string
+	CommandArgs string
+	InputDocs   []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (c Command) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (c Command) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (c Command) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (c Command) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (c Command) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (c *Command) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go
new file mode 100644
index 0000000..d5773d6
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/command_reply.go
@@ -0,0 +1,47 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// CommandReply represents the OP_COMMANDREPLY message of the MongoDB wire protocol.
+type CommandReply struct {
+	MsgHeader    Header
+	Metadata     bson.Raw
+	CommandReply bson.Raw
+	OutputDocs   []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (cr CommandReply) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (cr CommandReply) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (cr CommandReply) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (cr CommandReply) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (cr CommandReply) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (cr *CommandReply) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go
new file mode 100644
index 0000000..f8f3884
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/compressed.go
@@ -0,0 +1,110 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+)
+
+// Compressed represents the OP_COMPRESSED message of the MongoDB wire protocol.
+type Compressed struct {
+	MsgHeader         Header
+	OriginalOpCode    OpCode
+	UncompressedSize  int32
+	CompressorID      CompressorID
+	CompressedMessage []byte
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (c Compressed) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, c.Len())
+	return c.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (c Compressed) ValidateWireMessage() error {
+	if int(c.MsgHeader.MessageLength) != c.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+
+	if c.MsgHeader.OpCode != OpCompressed {
+		return errors.New("incorrect header: opcode is not OpCompressed")
+	}
+
+	if c.OriginalOpCode != c.MsgHeader.OpCode {
+		return errors.New("incorrect header: original opcode does not match opcode in message header")
+	}
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of MsgHeader if it is 0. It will also set the OpCode to
+// OpCompressed if the OpCode is 0. If either of these properties are non-zero and not correct, this method will return
+// both the []byte with the wire message appended to it and an invalid header error.
+func (c Compressed) AppendWireMessage(b []byte) ([]byte, error) {
+	err := c.MsgHeader.SetDefaults(c.Len(), OpCompressed)
+
+	b = c.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(c.OriginalOpCode))
+	b = appendInt32(b, c.UncompressedSize)
+	b = append(b, byte(c.CompressorID))
+	b = append(b, c.CompressedMessage...)
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (c Compressed) String() string {
+	return fmt.Sprintf(
+		`OP_COMPRESSED{MsgHeader: %s, Uncompressed Size: %d, CompressorId: %d, Compressed message: %s}`,
+		c.MsgHeader, c.UncompressedSize, c.CompressorID, c.CompressedMessage,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (c Compressed) Len() int {
+	// Header + OpCode + UncompressedSize + CompressorId + CompressedMessage
+	return 16 + 4 + 4 + 1 + len(c.CompressedMessage)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (c *Compressed) UnmarshalWireMessage(b []byte) error {
+	var err error
+	c.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+
+	if len(b) < int(c.MsgHeader.MessageLength) {
+		return Error{Type: ErrOpCompressed, Message: "[]byte too small"}
+	}
+
+	c.OriginalOpCode = OpCode(readInt32(b, 16)) // skip first 16 for header
+	c.UncompressedSize = readInt32(b, 20)
+	c.CompressorID = CompressorID(b[24])
+
+	// messageLength - Header - OpCode - UncompressedSize - CompressorId
+	msgLen := c.MsgHeader.MessageLength - 16 - 4 - 4 - 1
+	c.CompressedMessage = b[25 : 25+msgLen]
+
+	return nil
+}
+
+// CompressorID is the ID for each type of Compressor.
+type CompressorID uint8
+
+// These constants represent the individual compressor IDs for an OP_COMPRESSED.
+const (
+	CompressorNoOp CompressorID = iota
+	CompressorSnappy
+	CompressorZLib
+)
+
+// DefaultZlibLevel is the default level for zlib compression
+const DefaultZlibLevel = 6
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go
new file mode 100644
index 0000000..0a502da
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/delete.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Delete represents the OP_DELETE message of the MongoDB wire protocol.
+type Delete struct {
+	MsgHeader          Header
+	FullCollectionName string
+	Flags              DeleteFlag
+	Selector           bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (d Delete) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (d Delete) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (d Delete) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (d Delete) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (d Delete) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (d *Delete) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// DeleteFlag represents the flags on an OP_DELETE message.
+type DeleteFlag int32
+
+// These constants represent the individual flags on an OP_DELETE message.
+const (
+	SingleRemove DeleteFlag = 1 << iota
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go
new file mode 100644
index 0000000..f197113
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/get_more.go
@@ -0,0 +1,103 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+	"strings"
+)
+
+// GetMore represents the OP_GET_MORE message of the MongoDB wire protocol.
+type GetMore struct {
+	MsgHeader          Header
+	Zero               int32
+	FullCollectionName string
+	NumberToReturn     int32
+	CursorID           int64
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (gm GetMore) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, gm.Len())
+	return gm.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (gm GetMore) ValidateWireMessage() error {
+	if int(gm.MsgHeader.MessageLength) != gm.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if gm.MsgHeader.OpCode != OpGetMore {
+		return errors.New("incorrect header: op code is not OpGetMore")
+	}
+	if strings.Index(gm.FullCollectionName, ".") == -1 {
+		return errors.New("incorrect header: collection name does not contain a dot")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpGetMore if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (gm GetMore) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = gm.MsgHeader.SetDefaults(gm.Len(), OpGetMore)
+
+	b = gm.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, gm.Zero)
+	b = appendCString(b, gm.FullCollectionName)
+	b = appendInt32(b, gm.NumberToReturn)
+	b = appendInt64(b, gm.CursorID)
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (gm GetMore) String() string {
+	return fmt.Sprintf(
+		`OP_GET_MORE{MsgHeader: %s, Zero: %d, FullCollectionName: %s, NumberToReturn: %d, CursorID: %d}`,
+		gm.MsgHeader, gm.Zero, gm.FullCollectionName, gm.NumberToReturn, gm.CursorID,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (gm GetMore) Len() int {
+	// Header + Zero + CollectionName + Null Terminator + Return + CursorID
+	return 16 + 4 + len(gm.FullCollectionName) + 1 + 4 + 8
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (gm *GetMore) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (gm GetMore) CommandDocument() bsonx.Doc {
+	parts := strings.Split(gm.FullCollectionName, ".")
+	collName := parts[len(parts)-1]
+
+	doc := bsonx.Doc{
+		{"getMore", bsonx.Int64(gm.CursorID)},
+		{"collection", bsonx.String(collName)},
+	}
+	if gm.NumberToReturn != 0 {
+		doc = doc.Append("batchSize", bsonx.Int32(gm.NumberToReturn))
+	}
+
+	return doc
+}
+
+// DatabaseName returns the name of the database for this command.
+func (gm GetMore) DatabaseName() string {
+	return strings.Split(gm.FullCollectionName, ".")[0]
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go
new file mode 100644
index 0000000..ad1c291
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/header.go
@@ -0,0 +1,87 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"fmt"
+)
+
+// ErrInvalidHeader is returned when methods are called on a malformed Header.
+var ErrInvalidHeader error = Error{Type: ErrHeader, Message: "invalid header"}
+
+// ErrHeaderTooSmall is returned when the size of the header is too small to be valid.
+var ErrHeaderTooSmall error = Error{Type: ErrHeader, Message: "the header is too small to be valid"}
+
+// ErrHeaderTooFewBytes is returned when a call to ReadHeader does not contain enough
+// bytes to be a valid header.
+var ErrHeaderTooFewBytes error = Error{Type: ErrHeader, Message: "invalid header because []byte too small"}
+
+// ErrHeaderInvalidLength is returned when the MessageLength of a header is
+// set but is not set to the correct size.
+var ErrHeaderInvalidLength error = Error{Type: ErrHeader, Message: "invalid header because MessageLength is imporperly set"}
+
+// ErrHeaderIncorrectOpCode is returned when the OpCode on a header is set but
+// is not set to the correct OpCode.
+var ErrHeaderIncorrectOpCode error = Error{Type: ErrHeader, Message: "invalid header because OpCode is improperly set"}
+
+// Header represents the header of a MongoDB wire protocol message.
+type Header struct {
+	MessageLength int32
+	RequestID     int32
+	ResponseTo    int32
+	OpCode        OpCode
+}
+
+// ReadHeader reads a header from the given slice of bytes starting at offset
+// pos.
+func ReadHeader(b []byte, pos int32) (Header, error) {
+	if len(b) < 16 {
+		return Header{}, ErrHeaderTooFewBytes
+	}
+	return Header{
+		MessageLength: readInt32(b, 0),
+		RequestID:     readInt32(b, 4),
+		ResponseTo:    readInt32(b, 8),
+		OpCode:        OpCode(readInt32(b, 12)),
+	}, nil
+}
+
+func (h Header) String() string {
+	return fmt.Sprintf(
+		`Header{MessageLength: %d, RequestID: %d, ResponseTo: %d, OpCode: %v}`,
+		h.MessageLength, h.RequestID, h.ResponseTo, h.OpCode,
+	)
+}
+
+// AppendHeader will append this header to the given slice of bytes.
+func (h Header) AppendHeader(b []byte) []byte {
+	b = appendInt32(b, h.MessageLength)
+	b = appendInt32(b, h.RequestID)
+	b = appendInt32(b, h.ResponseTo)
+	b = appendInt32(b, int32(h.OpCode))
+
+	return b
+}
+
+// SetDefaults sets the length and opcode of this header.
+func (h *Header) SetDefaults(length int, opcode OpCode) error {
+	switch h.MessageLength {
+	case int32(length):
+	case 0:
+		h.MessageLength = int32(length)
+	default:
+		return ErrHeaderInvalidLength
+	}
+	switch h.OpCode {
+	case opcode:
+	case OpCode(0):
+		h.OpCode = opcode
+	default:
+		return ErrHeaderIncorrectOpCode
+	}
+	return nil
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go
new file mode 100644
index 0000000..498228d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/insert.go
@@ -0,0 +1,55 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Insert represents the OP_INSERT message of the MongoDB wire protocol.
+type Insert struct {
+	MsgHeader          Header
+	Flags              InsertFlag
+	FullCollectionName string
+	Documents          []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (i Insert) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (i Insert) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (i Insert) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (i Insert) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (i Insert) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (i *Insert) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// InsertFlag represents the flags on an OP_INSERT message.
+type InsertFlag int32
+
+// These constants represent the individual flags on an OP_INSERT message.
+const (
+	ContinueOnError InsertFlag = 1 << iota
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go
new file mode 100644
index 0000000..228a6ae
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/kill_cursors.go
@@ -0,0 +1,92 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// KillCursors represents the OP_KILL_CURSORS message of the MongoDB wire protocol.
+type KillCursors struct {
+	MsgHeader         Header
+	Zero              int32
+	NumberOfCursorIDs int32
+	CursorIDs         []int64
+
+	DatabaseName   string
+	CollectionName string
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (kc KillCursors) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, kc.Len())
+	return kc.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (kc KillCursors) ValidateWireMessage() error {
+	if int(kc.MsgHeader.MessageLength) != kc.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if kc.MsgHeader.OpCode != OpKillCursors {
+		return errors.New("incorrect header: op code is not OpGetMore")
+	}
+	if kc.NumberOfCursorIDs != int32(len(kc.CursorIDs)) {
+		return errors.New("incorrect number of cursor IDs")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (kc KillCursors) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = kc.MsgHeader.SetDefaults(kc.Len(), OpKillCursors)
+
+	b = kc.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, kc.Zero)
+	b = appendInt32(b, kc.NumberOfCursorIDs)
+	for _, id := range kc.CursorIDs {
+		b = appendInt64(b, id)
+	}
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (kc KillCursors) String() string {
+	return fmt.Sprintf(
+		`OP_KILL_CURSORS{MsgHeader: %s, Zero: %d, Number of Cursor IDS: %d, Cursor IDs: %v}`,
+		kc.MsgHeader, kc.Zero, kc.NumberOfCursorIDs, kc.CursorIDs,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (kc KillCursors) Len() int {
+	// Header + Zero + Number IDs + 8 * Number IDs
+	return 16 + 4 + 4 + int(kc.NumberOfCursorIDs*8)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (kc *KillCursors) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (kc KillCursors) CommandDocument() bsonx.Doc {
+	cursors := make([]bsonx.Val, len(kc.CursorIDs))
+	for i, id := range kc.CursorIDs {
+		cursors[i] = bsonx.Int64(id)
+	}
+
+	return bsonx.Doc{
+		{"killCursors", bsonx.String(kc.CollectionName)},
+		{"cursors", bsonx.Array(cursors)},
+	}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go
new file mode 100644
index 0000000..07f35ab
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/msg.go
@@ -0,0 +1,298 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Msg represents the OP_MSG message of the MongoDB wire protocol.
+type Msg struct {
+	MsgHeader Header
+	FlagBits  MsgFlag
+	Sections  []Section
+	Checksum  uint32
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (m Msg) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, m.Len())
+	return m.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (m Msg) ValidateWireMessage() error {
+	if int(m.MsgHeader.MessageLength) != m.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if m.MsgHeader.OpCode != OpMsg {
+		return errors.New("incorrect header: opcode is not OpMsg")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMesssage will set the MessageLength property of the MsgHeader if it is zero. It will also set the Opcode
+// to OP_MSG if it is zero. If either of these properties are non-zero and not correct, this method will return both the
+// []byte with the wire message appended to it and an invalid header error.
+func (m Msg) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = m.MsgHeader.SetDefaults(m.Len(), OpMsg)
+
+	b = m.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(m.FlagBits))
+
+	for _, section := range m.Sections {
+		newB := make([]byte, 0)
+		newB = section.AppendSection(newB)
+
+		b = section.AppendSection(b)
+	}
+
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (m Msg) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (m Msg) Len() int {
+	// Header + Flags + len of each section + optional checksum
+	totalLen := 16 + 4 // header and flag
+
+	for _, section := range m.Sections {
+		totalLen += section.Len()
+	}
+
+	if m.FlagBits&ChecksumPresent > 0 {
+		totalLen += 4
+	}
+
+	return totalLen
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (m *Msg) UnmarshalWireMessage(b []byte) error {
+	var err error
+
+	m.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if len(b) < int(m.MsgHeader.MessageLength) {
+		return Error{
+			Type:    ErrOpMsg,
+			Message: "[]byte too small",
+		}
+	}
+
+	m.FlagBits = MsgFlag(readInt32(b, 16))
+
+	// read each section
+	sectionBytes := m.MsgHeader.MessageLength - 16 - 4 // number of bytes taken up by sections
+	hasChecksum := m.FlagBits&ChecksumPresent > 0
+	if hasChecksum {
+		sectionBytes -= 4 // 4 bytes at end for checksum
+	}
+
+	m.Sections = make([]Section, 0)
+	position := 20 // position to read from
+	for sectionBytes > 0 {
+		sectionType := SectionType(b[position])
+		position++
+
+		switch sectionType {
+		case SingleDocument:
+			rdr, size, err := readDocument(b, int32(position))
+			if err.Message != "" {
+				err.Type = ErrOpMsg
+				return err
+			}
+
+			position += size
+			sb := SectionBody{
+				Document: rdr,
+			}
+			sb.PayloadType = sb.Kind()
+
+			sectionBytes -= int32(sb.Len())
+			m.Sections = append(m.Sections, sb)
+		case DocumentSequence:
+			sds := SectionDocumentSequence{}
+			sds.Size = readInt32(b, int32(position))
+			position += 4
+
+			identifier, err := readCString(b, int32(position))
+			if err != nil {
+				return err
+			}
+
+			sds.Identifier = identifier
+			position += len(identifier) + 1 // +1 for \0
+			sds.PayloadType = sds.Kind()
+
+			// length of documents to read
+			// sequenceLen - 4 bytes for size field - identifierLength (including \0)
+			docsLen := int(sds.Size) - 4 - len(identifier) - 1
+			for docsLen > 0 {
+				rdr, size, err := readDocument(b, int32(position))
+				if err.Message != "" {
+					err.Type = ErrOpMsg
+					return err
+				}
+
+				position += size
+				sds.Documents = append(sds.Documents, rdr)
+				docsLen -= size
+			}
+
+			sectionBytes -= int32(sds.Len())
+			m.Sections = append(m.Sections, sds)
+		}
+	}
+
+	if hasChecksum {
+		m.Checksum = uint32(readInt32(b, int32(position)))
+	}
+
+	return nil
+}
+
+// GetMainDocument returns the document containing the message to send.
+func (m *Msg) GetMainDocument() (bsonx.Doc, error) {
+	return bsonx.ReadDoc(m.Sections[0].(SectionBody).Document)
+}
+
+// GetSequenceArray returns this message's document sequence as a BSON array along with the array identifier.
+// If this message has no associated document sequence, a nil array is returned.
+func (m *Msg) GetSequenceArray() (bsonx.Arr, string, error) {
+	if len(m.Sections) == 1 {
+		return nil, "", nil
+	}
+
+	arr := bsonx.Arr{}
+	sds := m.Sections[1].(SectionDocumentSequence)
+
+	for _, rdr := range sds.Documents {
+		doc, err := bsonx.ReadDoc([]byte(rdr))
+		if err != nil {
+			return nil, "", err
+		}
+
+		arr = append(arr, bsonx.Document(doc))
+	}
+
+	return arr, sds.Identifier, nil
+}
+
+// AcknowledgedWrite returns true if this msg represents an acknowledged write command.
+func (m *Msg) AcknowledgedWrite() bool {
+	return m.FlagBits&MoreToCome == 0
+}
+
+// MsgFlag represents the flags on an OP_MSG message.
+type MsgFlag uint32
+
+// These constants represent the individual flags on an OP_MSG message.
+const (
+	ChecksumPresent MsgFlag = 1 << iota
+	MoreToCome
+
+	ExhaustAllowed MsgFlag = 1 << 16
+)
+
+// Section represents a section on an OP_MSG message.
+type Section interface {
+	Kind() SectionType
+	Len() int
+	AppendSection([]byte) []byte
+}
+
+// SectionBody represents the kind body of an OP_MSG message.
+type SectionBody struct {
+	PayloadType SectionType
+	Document    bson.Raw
+}
+
+// Kind implements the Section interface.
+func (sb SectionBody) Kind() SectionType {
+	return SingleDocument
+}
+
+// Len implements the Section interface
+func (sb SectionBody) Len() int {
+	return 1 + len(sb.Document) // 1 for PayloadType
+}
+
+// AppendSection implements the Section interface.
+func (sb SectionBody) AppendSection(dest []byte) []byte {
+	dest = append(dest, byte(SingleDocument))
+	dest = append(dest, sb.Document...)
+	return dest
+}
+
+// SectionDocumentSequence represents the kind document sequence of an OP_MSG message.
+type SectionDocumentSequence struct {
+	PayloadType SectionType
+	Size        int32
+	Identifier  string
+	Documents   []bson.Raw
+}
+
+// Kind implements the Section interface.
+func (sds SectionDocumentSequence) Kind() SectionType {
+	return DocumentSequence
+}
+
+// Len implements the Section interface
+func (sds SectionDocumentSequence) Len() int {
+	// PayloadType + Size + Identifier + 1 (null terminator) + totalDocLen
+	totalDocLen := 0
+	for _, doc := range sds.Documents {
+		totalDocLen += len(doc)
+	}
+
+	return 1 + 4 + len(sds.Identifier) + 1 + totalDocLen
+}
+
+// PayloadLen returns the length of the payload
+func (sds SectionDocumentSequence) PayloadLen() int {
+	// 4 bytes for size field, len identifier (including \0), and total docs len
+	return sds.Len() - 1
+}
+
+// AppendSection implements the Section interface
+func (sds SectionDocumentSequence) AppendSection(dest []byte) []byte {
+	dest = append(dest, byte(DocumentSequence))
+	dest = appendInt32(dest, sds.Size)
+	dest = appendCString(dest, sds.Identifier)
+
+	for _, doc := range sds.Documents {
+		dest = append(dest, doc...)
+	}
+
+	return dest
+}
+
+// SectionType represents the type for 1 section in an OP_MSG
+type SectionType uint8
+
+// These constants represent the individual section types for a section in an OP_MSG
+const (
+	SingleDocument SectionType = iota
+	DocumentSequence
+)
+
+// OpmsgWireVersion is the minimum wire version needed to use OP_MSG
+const OpmsgWireVersion = 6
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go
new file mode 100644
index 0000000..568a3ec
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/query.go
@@ -0,0 +1,336 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/mongo/writeconcern"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Query represents the OP_QUERY message of the MongoDB wire protocol.
+type Query struct {
+	MsgHeader            Header
+	Flags                QueryFlag
+	FullCollectionName   string
+	NumberToSkip         int32
+	NumberToReturn       int32
+	Query                bson.Raw
+	ReturnFieldsSelector bson.Raw
+
+	SkipSet   bool
+	Limit     *int32
+	BatchSize *int32
+}
+
+var optionsMap = map[string]string{
+	"$orderby":     "sort",
+	"$hint":        "hint",
+	"$comment":     "comment",
+	"$maxScan":     "maxScan",
+	"$max":         "max",
+	"$min":         "min",
+	"$returnKey":   "returnKey",
+	"$showDiskLoc": "showRecordId",
+	"$maxTimeMS":   "maxTimeMS",
+	"$snapshot":    "snapshot",
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+//
+// See AppendWireMessage for a description of the rules this method follows.
+func (q Query) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, q.Len())
+	return q.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (q Query) ValidateWireMessage() error {
+	if int(q.MsgHeader.MessageLength) != q.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if q.MsgHeader.OpCode != OpQuery {
+		return errors.New("incorrect header: op code is not OpQuery")
+	}
+	if strings.Index(q.FullCollectionName, ".") == -1 {
+		return errors.New("incorrect header: collection name does not contain a dot")
+	}
+	if q.Query != nil && len(q.Query) > 0 {
+		err := q.Query.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	if q.ReturnFieldsSelector != nil && len(q.ReturnFieldsSelector) > 0 {
+		err := q.ReturnFieldsSelector.Validate()
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpQuery if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (q Query) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = q.MsgHeader.SetDefaults(q.Len(), OpQuery)
+
+	b = q.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(q.Flags))
+	b = appendCString(b, q.FullCollectionName)
+	b = appendInt32(b, q.NumberToSkip)
+	b = appendInt32(b, q.NumberToReturn)
+	b = append(b, q.Query...)
+	b = append(b, q.ReturnFieldsSelector...)
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (q Query) String() string {
+	return fmt.Sprintf(
+		`OP_QUERY{MsgHeader: %s, Flags: %s, FullCollectionname: %s, NumberToSkip: %d, NumberToReturn: %d, Query: %s, ReturnFieldsSelector: %s}`,
+		q.MsgHeader, q.Flags, q.FullCollectionName, q.NumberToSkip, q.NumberToReturn, q.Query, q.ReturnFieldsSelector,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (q Query) Len() int {
+	// Header + Flags + CollectionName + Null Byte + Skip + Return + Query + ReturnFieldsSelector
+	return 16 + 4 + len(q.FullCollectionName) + 1 + 4 + 4 + len(q.Query) + len(q.ReturnFieldsSelector)
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (q *Query) UnmarshalWireMessage(b []byte) error {
+	var err error
+	q.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if len(b) < int(q.MsgHeader.MessageLength) {
+		return Error{Type: ErrOpQuery, Message: "[]byte too small"}
+	}
+
+	q.Flags = QueryFlag(readInt32(b, 16))
+	q.FullCollectionName, err = readCString(b, 20)
+	if err != nil {
+		return err
+	}
+	pos := 20 + len(q.FullCollectionName) + 1
+	q.NumberToSkip = readInt32(b, int32(pos))
+	pos += 4
+	q.NumberToReturn = readInt32(b, int32(pos))
+	pos += 4
+
+	var size int
+	var wmerr Error
+	q.Query, size, wmerr = readDocument(b, int32(pos))
+	if wmerr.Message != "" {
+		wmerr.Type = ErrOpQuery
+		return wmerr
+	}
+	pos += size
+	if pos < len(b) {
+		q.ReturnFieldsSelector, size, wmerr = readDocument(b, int32(pos))
+		if wmerr.Message != "" {
+			wmerr.Type = ErrOpQuery
+			return wmerr
+		}
+		pos += size
+	}
+
+	return nil
+}
+
+// AcknowledgedWrite returns true if this command represents an acknowledged write
+func (q *Query) AcknowledgedWrite() bool {
+	wcElem, err := q.Query.LookupErr("writeConcern")
+	if err != nil {
+		// no wc --> ack
+		return true
+	}
+
+	return writeconcern.AcknowledgedValue(wcElem)
+}
+
+// Legacy returns true if the query represents a legacy find operation.
+func (q Query) Legacy() bool {
+	return !strings.Contains(q.FullCollectionName, "$cmd")
+}
+
+// DatabaseName returns the database name for the query.
+func (q Query) DatabaseName() string {
+	if q.Legacy() {
+		return strings.Split(q.FullCollectionName, ".")[0]
+	}
+
+	return q.FullCollectionName[:len(q.FullCollectionName)-5] // remove .$cmd
+}
+
+// CollectionName returns the collection name for the query.
+func (q Query) CollectionName() string {
+	parts := strings.Split(q.FullCollectionName, ".")
+	return parts[len(parts)-1]
+}
+
+// CommandDocument creates a BSON document representing this command.
+func (q Query) CommandDocument() (bsonx.Doc, error) {
+	if q.Legacy() {
+		return q.legacyCommandDocument()
+	}
+
+	cmd, err := bsonx.ReadDoc([]byte(q.Query))
+	if err != nil {
+		return nil, err
+	}
+
+	cmdElem := cmd[0]
+	if cmdElem.Key == "$query" {
+		cmd = cmdElem.Value.Document()
+	}
+
+	return cmd, nil
+}
+
+func (q Query) legacyCommandDocument() (bsonx.Doc, error) {
+	doc, err := bsonx.ReadDoc(q.Query)
+	if err != nil {
+		return nil, err
+	}
+
+	parts := strings.Split(q.FullCollectionName, ".")
+	collName := parts[len(parts)-1]
+	doc = append(bsonx.Doc{{"find", bsonx.String(collName)}}, doc...)
+
+	var filter bsonx.Doc
+	var queryIndex int
+	for i, elem := range doc {
+		if newKey, ok := optionsMap[elem.Key]; ok {
+			doc[i].Key = newKey
+			continue
+		}
+
+		if elem.Key == "$query" {
+			filter = elem.Value.Document()
+		} else {
+			// the element is the filter
+			filter = filter.Append(elem.Key, elem.Value)
+		}
+
+		queryIndex = i
+	}
+
+	doc = append(doc[:queryIndex], doc[queryIndex+1:]...) // remove $query
+	if len(filter) != 0 {
+		doc = doc.Append("filter", bsonx.Document(filter))
+	}
+
+	doc, err = q.convertLegacyParams(doc)
+	if err != nil {
+		return nil, err
+	}
+
+	return doc, nil
+}
+
+func (q Query) convertLegacyParams(doc bsonx.Doc) (bsonx.Doc, error) {
+	if q.ReturnFieldsSelector != nil {
+		projDoc, err := bsonx.ReadDoc(q.ReturnFieldsSelector)
+		if err != nil {
+			return nil, err
+		}
+		doc = doc.Append("projection", bsonx.Document(projDoc))
+	}
+	if q.Limit != nil {
+		limit := *q.Limit
+		if limit < 0 {
+			limit *= -1
+			doc = doc.Append("singleBatch", bsonx.Boolean(true))
+		}
+
+		doc = doc.Append("limit", bsonx.Int32(*q.Limit))
+	}
+	if q.BatchSize != nil {
+		doc = doc.Append("batchSize", bsonx.Int32(*q.BatchSize))
+	}
+	if q.SkipSet {
+		doc = doc.Append("skip", bsonx.Int32(q.NumberToSkip))
+	}
+	if q.Flags&TailableCursor > 0 {
+		doc = doc.Append("tailable", bsonx.Boolean(true))
+	}
+	if q.Flags&OplogReplay > 0 {
+		doc = doc.Append("oplogReplay", bsonx.Boolean(true))
+	}
+	if q.Flags&NoCursorTimeout > 0 {
+		doc = doc.Append("noCursorTimeout", bsonx.Boolean(true))
+	}
+	if q.Flags&AwaitData > 0 {
+		doc = doc.Append("awaitData", bsonx.Boolean(true))
+	}
+	if q.Flags&Partial > 0 {
+		doc = doc.Append("allowPartialResults", bsonx.Boolean(true))
+	}
+
+	return doc, nil
+}
+
+// QueryFlag represents the flags on an OP_QUERY message.
+type QueryFlag int32
+
+// These constants represent the individual flags on an OP_QUERY message.
+const (
+	_ QueryFlag = 1 << iota
+	TailableCursor
+	SlaveOK
+	OplogReplay
+	NoCursorTimeout
+	AwaitData
+	Exhaust
+	Partial
+)
+
+// String implements the fmt.Stringer interface.
+func (qf QueryFlag) String() string {
+	strs := make([]string, 0)
+	if qf&TailableCursor == TailableCursor {
+		strs = append(strs, "TailableCursor")
+	}
+	if qf&SlaveOK == SlaveOK {
+		strs = append(strs, "SlaveOK")
+	}
+	if qf&OplogReplay == OplogReplay {
+		strs = append(strs, "OplogReplay")
+	}
+	if qf&NoCursorTimeout == NoCursorTimeout {
+		strs = append(strs, "NoCursorTimeout")
+	}
+	if qf&AwaitData == AwaitData {
+		strs = append(strs, "AwaitData")
+	}
+	if qf&Exhaust == Exhaust {
+		strs = append(strs, "Exhaust")
+	}
+	if qf&Partial == Partial {
+		strs = append(strs, "Partial")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go
new file mode 100644
index 0000000..bbf394d
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/readers.go
@@ -0,0 +1,51 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"bytes"
+	"errors"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+)
+
+func readInt32(b []byte, pos int32) int32 {
+	return (int32(b[pos+0])) | (int32(b[pos+1]) << 8) | (int32(b[pos+2]) << 16) | (int32(b[pos+3]) << 24)
+}
+
+func readCString(b []byte, pos int32) (string, error) {
+	null := bytes.IndexByte(b[pos:], 0x00)
+	if null == -1 {
+		return "", errors.New("invalid cstring")
+	}
+	return string(b[pos : int(pos)+null]), nil
+}
+
+func readInt64(b []byte, pos int32) int64 {
+	return (int64(b[pos+0])) | (int64(b[pos+1]) << 8) | (int64(b[pos+2]) << 16) | (int64(b[pos+3]) << 24) | (int64(b[pos+4]) << 32) |
+		(int64(b[pos+5]) << 40) | (int64(b[pos+6]) << 48) | (int64(b[pos+7]) << 56)
+
+}
+
+// readDocument will attempt to read a bson.Reader from the given slice of bytes
+// from the given position.
+func readDocument(b []byte, pos int32) (bson.Raw, int, Error) {
+	if int(pos)+4 > len(b) {
+		return nil, 0, Error{Message: "document too small to be valid"}
+	}
+	size := int(readInt32(b, int32(pos)))
+	if int(pos)+size > len(b) {
+		return nil, 0, Error{Message: "document size is larger than available bytes"}
+	}
+	if b[int(pos)+size-1] != 0x00 {
+		return nil, 0, Error{Message: "document invalid, last byte is not null"}
+	}
+	// TODO(GODRIVER-138): When we add 3.0 support, alter this so we either do one larger make or use a pool.
+	rdr := make(bson.Raw, size)
+	copy(rdr, b[pos:int(pos)+size])
+	return rdr, size, Error{Type: ErrNil}
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go
new file mode 100644
index 0000000..dc8e450
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/reply.go
@@ -0,0 +1,201 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import (
+	"errors"
+	"fmt"
+	"strings"
+
+	"github.com/mongodb/mongo-go-driver/bson"
+	"github.com/mongodb/mongo-go-driver/x/bsonx"
+)
+
+// Reply represents the OP_REPLY message of the MongoDB wire protocol.
+type Reply struct {
+	MsgHeader      Header
+	ResponseFlags  ReplyFlag
+	CursorID       int64
+	StartingFrom   int32
+	NumberReturned int32
+	Documents      []bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+//
+// See AppendWireMessage for a description of the rules this method follows.
+func (r Reply) MarshalWireMessage() ([]byte, error) {
+	b := make([]byte, 0, r.Len())
+	return r.AppendWireMessage(b)
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (r Reply) ValidateWireMessage() error {
+	if int(r.MsgHeader.MessageLength) != r.Len() {
+		return errors.New("incorrect header: message length is not correct")
+	}
+	if r.MsgHeader.OpCode != OpReply {
+		return errors.New("incorrect header: op code is not OpReply")
+	}
+
+	return nil
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+//
+// AppendWireMessage will set the MessageLength property of the MsgHeader
+// if it is zero. It will also set the OpCode to OpQuery if the OpCode is
+// zero. If either of these properties are non-zero and not correct, this
+// method will return both the []byte with the wire message appended to it
+// and an invalid header error.
+func (r Reply) AppendWireMessage(b []byte) ([]byte, error) {
+	var err error
+	err = r.MsgHeader.SetDefaults(r.Len(), OpReply)
+
+	b = r.MsgHeader.AppendHeader(b)
+	b = appendInt32(b, int32(r.ResponseFlags))
+	b = appendInt64(b, r.CursorID)
+	b = appendInt32(b, r.StartingFrom)
+	b = appendInt32(b, r.NumberReturned)
+	for _, d := range r.Documents {
+		b = append(b, d...)
+	}
+	return b, err
+}
+
+// String implements the fmt.Stringer interface.
+func (r Reply) String() string {
+	return fmt.Sprintf(
+		`OP_REPLY{MsgHeader: %s, ResponseFlags: %s, CursorID: %d, StartingFrom: %d, NumberReturned: %d, Documents: %v}`,
+		r.MsgHeader, r.ResponseFlags, r.CursorID, r.StartingFrom, r.NumberReturned, r.Documents,
+	)
+}
+
+// Len implements the WireMessage interface.
+func (r Reply) Len() int {
+	// Header + Flags + CursorID + StartingFrom + NumberReturned + Length of Length of Documents
+	docsLen := 0
+	for _, d := range r.Documents {
+		docsLen += len(d)
+	}
+	return 16 + 4 + 8 + 4 + 4 + docsLen
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (r *Reply) UnmarshalWireMessage(b []byte) error {
+	var err error
+	r.MsgHeader, err = ReadHeader(b, 0)
+	if err != nil {
+		return err
+	}
+	if r.MsgHeader.MessageLength < 36 {
+		return errors.New("invalid OP_REPLY: header length too small")
+	}
+	if len(b) < int(r.MsgHeader.MessageLength) {
+		return errors.New("invalid OP_REPLY: []byte too small")
+	}
+
+	r.ResponseFlags = ReplyFlag(readInt32(b, 16))
+	r.CursorID = readInt64(b, 20)
+	r.StartingFrom = readInt32(b, 28)
+	r.NumberReturned = readInt32(b, 32)
+	pos := 36
+	for pos < len(b) {
+		rdr, size, err := readDocument(b, int32(pos))
+		if err.Message != "" {
+			err.Type = ErrOpReply
+			return err
+		}
+		r.Documents = append(r.Documents, rdr)
+		pos += size
+	}
+
+	return nil
+}
+
+// GetMainLegacyDocument constructs and returns a BSON document for this reply.
+func (r *Reply) GetMainLegacyDocument(fullCollectionName string) (bsonx.Doc, error) {
+	if r.ResponseFlags&CursorNotFound > 0 {
+		fmt.Println("cursor not found err")
+		return bsonx.Doc{
+			{"ok", bsonx.Int32(0)},
+		}, nil
+	}
+	if r.ResponseFlags&QueryFailure > 0 {
+		firstDoc := r.Documents[0]
+		return bsonx.Doc{
+			{"ok", bsonx.Int32(0)},
+			{"errmsg", bsonx.String(firstDoc.Lookup("$err").StringValue())},
+			{"code", bsonx.Int32(firstDoc.Lookup("code").Int32())},
+		}, nil
+	}
+
+	doc := bsonx.Doc{
+		{"ok", bsonx.Int32(1)},
+	}
+
+	batchStr := "firstBatch"
+	if r.StartingFrom != 0 {
+		batchStr = "nextBatch"
+	}
+
+	batchArr := make([]bsonx.Val, len(r.Documents))
+	for i, docRaw := range r.Documents {
+		doc, err := bsonx.ReadDoc(docRaw)
+		if err != nil {
+			return nil, err
+		}
+
+		batchArr[i] = bsonx.Document(doc)
+	}
+
+	cursorDoc := bsonx.Doc{
+		{"id", bsonx.Int64(r.CursorID)},
+		{"ns", bsonx.String(fullCollectionName)},
+		{batchStr, bsonx.Array(batchArr)},
+	}
+
+	doc = doc.Append("cursor", bsonx.Document(cursorDoc))
+	return doc, nil
+}
+
+// GetMainDocument returns the main BSON document for this reply.
+func (r *Reply) GetMainDocument() (bsonx.Doc, error) {
+	return bsonx.ReadDoc([]byte(r.Documents[0]))
+}
+
+// ReplyFlag represents the flags of an OP_REPLY message.
+type ReplyFlag int32
+
+// These constants represent the individual flags of an OP_REPLY message.
+const (
+	CursorNotFound ReplyFlag = 1 << iota
+	QueryFailure
+	ShardConfigStale
+	AwaitCapable
+)
+
+// String implements the fmt.Stringer interface.
+func (rf ReplyFlag) String() string {
+	strs := make([]string, 0)
+	if rf&CursorNotFound == CursorNotFound {
+		strs = append(strs, "CursorNotFound")
+	}
+	if rf&QueryFailure == QueryFailure {
+		strs = append(strs, "QueryFailure")
+	}
+	if rf&ShardConfigStale == ShardConfigStale {
+		strs = append(strs, "ShardConfigStale")
+	}
+	if rf&AwaitCapable == AwaitCapable {
+		strs = append(strs, "AwaitCapable")
+	}
+	str := "["
+	str += strings.Join(strs, ", ")
+	str += "]"
+	return str
+}
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go
new file mode 100644
index 0000000..a987327
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/update.go
@@ -0,0 +1,57 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+package wiremessage
+
+import "github.com/mongodb/mongo-go-driver/bson"
+
+// Update represents the OP_UPDATE message of the MongoDB wire protocol.
+type Update struct {
+	MsgHeader          Header
+	FullCollectionName string
+	Flags              UpdateFlag
+	Selector           bson.Raw
+	Update             bson.Raw
+}
+
+// MarshalWireMessage implements the Marshaler and WireMessage interfaces.
+func (u Update) MarshalWireMessage() ([]byte, error) {
+	panic("not implemented")
+}
+
+// ValidateWireMessage implements the Validator and WireMessage interfaces.
+func (u Update) ValidateWireMessage() error {
+	panic("not implemented")
+}
+
+// AppendWireMessage implements the Appender and WireMessage interfaces.
+func (u Update) AppendWireMessage([]byte) ([]byte, error) {
+	panic("not implemented")
+}
+
+// String implements the fmt.Stringer interface.
+func (u Update) String() string {
+	panic("not implemented")
+}
+
+// Len implements the WireMessage interface.
+func (u Update) Len() int {
+	panic("not implemented")
+}
+
+// UnmarshalWireMessage implements the Unmarshaler interface.
+func (u *Update) UnmarshalWireMessage([]byte) error {
+	panic("not implemented")
+}
+
+// UpdateFlag represents the flags on an OP_UPDATE message.
+type UpdateFlag int32
+
+// These constants represent the individual flags on an OP_UPDATE message.
+const (
+	Upsert UpdateFlag = 1 << iota
+	MultiUpdate
+)
diff --git a/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go
new file mode 100644
index 0000000..c2cb153
--- /dev/null
+++ b/vendor/github.com/mongodb/mongo-go-driver/x/network/wiremessage/wiremessage.go
@@ -0,0 +1,206 @@
+// Copyright (C) MongoDB, Inc. 2017-present.
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
+
+// Package wiremessage contains types for speaking the MongoDB Wire Protocol. Since this low
+// level library is meant to be used in the context of a driver and in the context of a server
+// all of the flags and types of the wire protocol are implemented. For each op there are two
+// corresponding implementations. One prefixed with Immutable which can be created by casting a
+// []byte to the type, and another prefixed with Mutable that is a struct with methods to mutate
+// the op.
+package wiremessage
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"sync/atomic"
+)
+
+// ErrInvalidMessageLength is returned when the provided message length is too small to be valid.
+var ErrInvalidMessageLength = errors.New("the message length is too small, it must be at least 16")
+
+// ErrUnknownOpCode is returned when the provided opcode is not a valid opcode.
+var ErrUnknownOpCode = errors.New("the opcode is unknown")
+
+var globalRequestID int32
+
+// CurrentRequestID returns the current request ID.
+func CurrentRequestID() int32 { return atomic.LoadInt32(&globalRequestID) }
+
+// NextRequestID returns the next request ID.
+func NextRequestID() int32 { return atomic.AddInt32(&globalRequestID, 1) }
+
+// Error represents an error related to wire protocol messages.
+type Error struct {
+	Type    ErrorType
+	Message string
+}
+
+// Error implements the err interface.
+func (e Error) Error() string {
+	return e.Message
+}
+
+// ErrorType is the type of error, which indicates from which part of the code
+// the error originated.
+type ErrorType uint16
+
+// These constants are the types of errors exposed by this package.
+const (
+	ErrNil ErrorType = iota
+	ErrHeader
+	ErrOpQuery
+	ErrOpReply
+	ErrOpCompressed
+	ErrOpMsg
+	ErrRead
+)
+
+// OpCode represents a MongoDB wire protocol opcode.
+type OpCode int32
+
+// These constants are the valid opcodes for the version of the wireprotocol
+// supported by this library. The skipped OpCodes are historical OpCodes that
+// are no longer used.
+const (
+	OpReply        OpCode = 1
+	_              OpCode = 1001
+	OpUpdate       OpCode = 2001
+	OpInsert       OpCode = 2002
+	_              OpCode = 2003
+	OpQuery        OpCode = 2004
+	OpGetMore      OpCode = 2005
+	OpDelete       OpCode = 2006
+	OpKillCursors  OpCode = 2007
+	OpCommand      OpCode = 2010
+	OpCommandReply OpCode = 2011
+	OpCompressed   OpCode = 2012
+	OpMsg          OpCode = 2013
+)
+
+// String implements the fmt.Stringer interface.
+func (oc OpCode) String() string {
+	switch oc {
+	case OpReply:
+		return "OP_REPLY"
+	case OpUpdate:
+		return "OP_UPDATE"
+	case OpInsert:
+		return "OP_INSERT"
+	case OpQuery:
+		return "OP_QUERY"
+	case OpGetMore:
+		return "OP_GET_MORE"
+	case OpDelete:
+		return "OP_DELETE"
+	case OpKillCursors:
+		return "OP_KILL_CURSORS"
+	case OpCommand:
+		return "OP_COMMAND"
+	case OpCommandReply:
+		return "OP_COMMANDREPLY"
+	case OpCompressed:
+		return "OP_COMPRESSED"
+	case OpMsg:
+		return "OP_MSG"
+	default:
+		return "<invalid opcode>"
+	}
+}
+
+// WireMessage represents a message in the MongoDB wire protocol.
+type WireMessage interface {
+	Marshaler
+	Validator
+	Appender
+	fmt.Stringer
+
+	// Len returns the length in bytes of this WireMessage.
+	Len() int
+}
+
+// Validator is the interface implemented by types that can validate
+// themselves as a MongoDB wire protocol message.
+type Validator interface {
+	ValidateWireMessage() error
+}
+
+// Marshaler is the interface implemented by types that can marshal
+// themselves into a valid MongoDB wire protocol message.
+type Marshaler interface {
+	MarshalWireMessage() ([]byte, error)
+}
+
+// Appender is the interface implemented by types that can append themselves, as
+// a MongoDB wire protocol message, to the provided slice of bytes.
+type Appender interface {
+	AppendWireMessage([]byte) ([]byte, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a
+// MongoDB wire protocol message version of themselves. The input can be
+// assumed to be a valid MongoDB wire protocol message. UnmarshalWireMessage
+// must copy the data if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalWireMessage([]byte) error
+}
+
+// Writer is the interface implemented by types that can have WireMessages
+// written to them.
+//
+// Implementation must obey the cancellation, timeouts, and deadlines of the
+// provided context.Context object.
+type Writer interface {
+	WriteWireMessage(context.Context, WireMessage) error
+}
+
+// Reader is the interface implemented by types that can have WireMessages
+// read from them.
+//
+// Implementation must obey the cancellation, timeouts, and deadlines of the
+// provided context.Context object.
+type Reader interface {
+	ReadWireMessage(context.Context) (WireMessage, error)
+}
+
+// ReadWriter is the interface implemented by types that can both read and write
+// WireMessages.
+type ReadWriter interface {
+	Reader
+	Writer
+}
+
+// ReadWriteCloser is the interface implemented by types that can read and write
+// WireMessages and can also be closed.
+type ReadWriteCloser interface {
+	Reader
+	Writer
+	io.Closer
+}
+
+// Transformer is the interface implemented by types that can alter a WireMessage.
+// Implementations should not directly alter the provided WireMessage and instead
+// make a copy of the message, alter it, and returned the new message.
+type Transformer interface {
+	TransformWireMessage(WireMessage) (WireMessage, error)
+}
+
+// ReadFrom will read a single WireMessage from the given io.Reader. This function will
+// validate the WireMessage. If the WireMessage is not valid, this method will
+// return both the error and the invalid WireMessage. If another type of processing
+// error occurs, WireMessage will be nil.
+//
+// This function will return the immutable versions of wire protocol messages. The
+// Convert function can be used to retrieve a mutable version of wire protocol
+// messages.
+func ReadFrom(io.Reader) (WireMessage, error) { return nil, nil }
+
+// Unmarshal will unmarshal data into a WireMessage.
+func Unmarshal([]byte) (WireMessage, error) { return nil, nil }
+
+// Validate will validate that data is a valid MongoDB wire protocol message.
+func Validate([]byte) error { return nil }