VOL-3934 - TLS connection support

This is a bit of a breaking change as the current voltctl config
file defines verify as a string when it should have been a bool
from the start.

depends on merge of https://gerrit.opencord.org/c/voltha-lib-go/+/23594

Change-Id: Idb1f90a6bc827a599f2290bd276604997aab44e8
diff --git a/Makefile b/Makefile
index 93e34ae..f58cfe5 100644
--- a/Makefile
+++ b/Makefile
@@ -67,6 +67,7 @@
 ## Local Development Helpers
 local-lib-go:
 ifdef LOCAL_LIB_GO
+	rm -rf vendor/github.com/opencord/voltha-lib-go/v4/pkg
 	mkdir -p vendor/github.com/opencord/voltha-lib-go/v4/pkg
 	cp -r ${LOCAL_LIB_GO}/pkg/* vendor/github.com/opencord/voltha-lib-go/v4/pkg/
 endif
diff --git a/VERSION b/VERSION
index 59b9db0..bc80560 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-1.5.0-dev
+1.5.0
diff --git a/go.mod b/go.mod
index faf0bb0..594bef5 100644
--- a/go.mod
+++ b/go.mod
@@ -9,9 +9,10 @@
 	github.com/imdario/mergo v0.3.7 // indirect
 	github.com/jessevdk/go-flags v1.4.0
 	github.com/jhump/protoreflect v1.5.0
-	github.com/opencord/voltha-lib-go/v4 v4.0.6
-	github.com/opencord/voltha-protos/v4 v4.0.11
+	github.com/opencord/voltha-lib-go/v4 v4.2.1
+	github.com/opencord/voltha-protos/v4 v4.0.16
 	github.com/stretchr/testify v1.4.0
+	go.etcd.io/etcd v0.0.0-20190930204107-236ac2a90522
 	golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 // indirect
 	golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 // indirect
 	google.golang.org/appengine v1.6.1 // indirect
diff --git a/go.sum b/go.sum
index a2ccf29..6a071d5 100644
--- a/go.sum
+++ b/go.sum
@@ -1,37 +1,40 @@
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+github.com/Azure/go-autorest v11.1.2+incompatible h1:viZ3tV5l4gE2Sw0xrasFHytCGtzYCrT+um/rrSQ1BfA=
 github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
 github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
 github.com/DataDog/zstd v1.3.6-0.20190409195224-796139022798/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM=
 github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo=
+github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/Shopify/sarama v1.23.1/go.mod h1:XLH1GYJnLVE0XCr6KdJGVJRTwY30moWNJ4sERjXX6fs=
 github.com/Shopify/sarama v1.25.0 h1:ch1ywjRLjfJtU+EaiJ+l0rWffQ6TRpyYmW4DX7Cb2SU=
 github.com/Shopify/sarama v1.25.0/go.mod h1:y/CFFTO9eaMTNriwu/Q+W4eioLqiDMGkA1W+gmdfj8w=
 github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc=
 github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
+github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
 github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
 github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
-github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
 github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0=
 github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
+github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
+github.com/boljen/go-bitmap v0.0.0-20151001105940-23cd2fb0ce7d h1:zsO4lp+bjv5XvPTF58Vq+qgmZEYZttJK+CWtSZhKenI=
 github.com/boljen/go-bitmap v0.0.0-20151001105940-23cd2fb0ce7d/go.mod h1:f1iKL6ZhUWvbk7PdWVmOaak10o86cqMUYEmn1CZNGEI=
 github.com/bsm/sarama-cluster v2.1.15+incompatible h1:RkV6WiNRnqEEbp81druK8zYhmnIgdOjqSVi0+9Cnl2A=
 github.com/bsm/sarama-cluster v2.1.15+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM=
+github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72 h1:fUmDBbSvv1uOzo/t8WaxZMVb7BxJ8JECo5lGoR9c5bA=
 github.com/buraksezer/consistent v0.0.0-20191006190839-693edf70fd72/go.mod h1:OEE5igu/CDjGegM1Jn6ZMo7R6LlV/JChAkjfQQIRLpg=
+github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73 h1:q1g9lSyo/nOIC3W5E3FK3Unrz8b9LdLXCyuC+ZcpPC0=
 github.com/cevaris/ordered_map v0.0.0-20190319150403-3adeae072e73/go.mod h1:507vXsotcZop7NZfBWdhPmVeOse4ko2R7AagJYrpoEg=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
+github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa h1:OaNxuTZr7kxeODyLWsRMC+OD03aFUH+mW6r2d+MWa5Y=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
@@ -45,6 +48,7 @@
 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw=
 github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/creack/pty v1.1.7 h1:6pwm8kMQKCmgUg0ZHTm5+/YvRK0s3THD/28+T6/kk4A=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@@ -52,10 +56,10 @@
 github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
 github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4 h1:qk/FSDDxo05wdJH28W+p5yivv7LuLYLRXPPD8KQCtZs=
 github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
-github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU=
 github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
 github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q=
 github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
@@ -63,24 +67,29 @@
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550 h1:mV9jbLoSW/8m4VK16ZkHTozJa8sesK5u5kTMFysTYac=
 github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys=
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
 github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
-github.com/frankban/quicktest v1.4.1 h1:Wv2VwvNn73pAdFIVUQRXYDFp31lXKbqblIXo/Q5GPSg=
 github.com/frankban/quicktest v1.4.1/go.mod h1:36zfPVQyHxymz4cH7wlDmVwDrJuljRB60qkgn7rorfQ=
 github.com/frankban/quicktest v1.5.0 h1:Tb4jWdSpdjKzTUicPnY61PZxKbDoGa7ABbrReT3gQVY=
 github.com/frankban/quicktest v1.5.0/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
 github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-kit/kit v0.8.0 h1:Wz+5lgoB0kkuqLEc6NVmwRknTKP6dTGbSqvhZtBI/j0=
 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
+github.com/go-logfmt/logfmt v0.3.0 h1:8HUsc87TaSWLKwrnumgC8/YconD2fJQsRJAsWaPg2ic=
 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
+github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
 github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
 github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
-github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE=
 github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
 github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE=
 github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -88,20 +97,18 @@
 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
 github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1 h1:G5FRp8JnTd7RQH5kemVNlMeyXQAztQ3mOWV95KxsXH8=
 github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
 github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
 github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
 github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
 github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4=
 github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
 github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
 github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
 github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
-github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
 github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
 github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg=
 github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
@@ -114,9 +121,11 @@
 github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
 github.com/googleapis/gnostic v0.3.0 h1:CcQijm0XKekKjP/YCz28LXVSpgguuB+nCxaSjCe09y0=
 github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8 h1:L9JPKrtsHMQ4VCRQfHvbbHBfB2Urn8xf6QZeXZ+OrN4=
 github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c h1:Lh2aW+HnU2Nbe1gqD9SOJLJxW1jBMmQOktN2acDyJk8=
 github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7 h1:6TSoaYExHper8PYsJu23GWVNOyYRCSnIFyxKgLSZ54w=
 github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4 h1:z53tR0945TRRQO/fLEVPI6SMv7ZflF0TEaTAoU7tOzg=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
@@ -124,52 +133,17 @@
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/hashicorp/consul/api v1.2.0 h1:oPsuzLp2uk7I7rojPKuncWbZ+m5TMoD4Ivs+2Rkeh4Y=
-github.com/hashicorp/consul/api v1.2.0/go.mod h1:1SIkFYi2ZTXUE5Kgt179+4hH33djo11+0Eo2XgTAtkw=
-github.com/hashicorp/consul/sdk v0.2.0 h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=
-github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-immutable-radix v1.1.0 h1:vN9wG1D6KG6YHRTWr8512cxGOVgTMEfgEdSj/hr8MPc=
-github.com/hashicorp/go-immutable-radix v1.1.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI=
-github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8=
-github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
-github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
 github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
 github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
 github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
-github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8xxpqM=
-github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/hashicorp/serf v0.8.4 h1:nfikPYzgKvrThySCqSN6ap+LqILhPej+ubRWRNQmzgk=
-github.com/hashicorp/serf v0.8.4/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k=
 github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imdario/mergo v0.3.7 h1:Y+UAYTZ7gDEuOfhxKWy+dvb5dRQ6rJjFSdX2HZY1/gI=
 github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
-github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
 github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
 github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8=
 github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
@@ -180,114 +154,104 @@
 github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
 github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs=
 github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
 github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
 github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/julienschmidt/httprouter v1.2.0 h1:TDTW5Yz1mjftljbcKqRcrYhd4XeOoI98t+9HbQbYf7g=
 github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
 github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E=
 github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg=
 github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
 github.com/klauspost/compress v1.9.7 h1:hYW1gP94JUmAhBtJ+LNz5My+gBobDxPR1iVuKug26aA=
 github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
 github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY=
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
 github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
 github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mattn/go-colorable v0.0.9 h1:UVL0vNpWh04HeJXV0KLcaT7r06gOH2l4OW6ddYRUIY4=
 github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-isatty v0.0.4 h1:bnP0vzxcAdeI1zdubAl5PjU6zsERjGZb7raWodagDYs=
 github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
+github.com/mattn/go-runewidth v0.0.2 h1:UnlwIPBGaTZfPQ6T1IGzPI0EkYAQmT9fAEJ/poFC63o=
 github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
 github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc=
 github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=
 github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5 h1:58+kh9C6jJVXYjt8IE48G2eWl6BjwU5Gj0gqY84fy78=
 github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
 github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.4.2 h1:3mYCb7aPxS/RU7TI1y4rkEn1oKmPRjNJLNEXgw7MH2I=
 github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
-github.com/opencord/voltha-lib-go/v4 v4.0.6 h1:Mc/bkYDR3YcpWPeW35ju9h3sO2cGByz8XM0ik5ikJkc=
-github.com/opencord/voltha-lib-go/v4 v4.0.6/go.mod h1:8NFUZz/mp4OvRmilBRhkLOUrw4G01ruSAVdzQu2ivPc=
-github.com/opencord/voltha-protos/v4 v4.0.8/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
-github.com/opencord/voltha-protos/v4 v4.0.11 h1:pJMNvjVxmCPGEUVPSLgqvT9P2ei2J+8Z9KsHJru2MRI=
-github.com/opencord/voltha-protos/v4 v4.0.11/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
+github.com/opencord/voltha-lib-go/v4 v4.2.1 h1:lMTWf8uudoeFNWia9MEc9qGHRkoHHbGMRL4QrEPS+5Q=
+github.com/opencord/voltha-lib-go/v4 v4.2.1/go.mod h1:K7lDkSkJ97EyfvX8fQtBmBvpj7n6MmwnAtD8Jz79HcQ=
+github.com/opencord/voltha-protos/v4 v4.0.12/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
+github.com/opencord/voltha-protos/v4 v4.0.16 h1:VQNhkc3FLvol9xPph860w97WoIcW+IjrEqNAZVOuQ+0=
+github.com/opencord/voltha-protos/v4 v4.0.16/go.mod h1:W/OIFIyvFh/C0vchRUuarIsMylEhzCRM9pNxLvkPtKc=
 github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
 github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc=
 github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
 github.com/pierrec/lz4 v0.0.0-20190327172049-315a67e90e41/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
-github.com/pierrec/lz4 v2.2.6+incompatible h1:6aCX4/YZ9v8q69hTyiR7dNLnTA3fgtKHVVW5BCd5Znw=
 github.com/pierrec/lz4 v2.2.6+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pierrec/lz4 v2.3.0+incompatible h1:CZzRn4Ut9GbUkHlQ7jqBXeZQV41ZSKWFc302ZU6lUTk=
 github.com/pierrec/lz4 v2.3.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
 github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
 github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
 github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
 github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
 github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
 github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
-github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
 github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM=
 github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE=
 github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
 github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
 github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs=
 github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
-github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ=
 github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af h1:gu+uRPtBe88sKxUCEXRoeCvVG90TJmwhiqRpvdhQFng=
 github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
 github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
 github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
+github.com/spf13/cobra v0.0.3 h1:ZlrZ4XsMRm04Fr5pSFxBgfND2EBVa1nLpiy1stUsX/8=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
 github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
 github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
 github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -295,13 +259,15 @@
 github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8 h1:ndzgwNDnKIqyCvHTXaCqh9KlOWKvBry6nuXMJmonVsE=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
 github.com/uber/jaeger-client-go v2.23.1+incompatible h1:uArBYHQR0HqLFFAypI7RsWTzPSj/bDpmZZuQjMLSg1A=
 github.com/uber/jaeger-client-go v2.23.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
 github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
 github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
+github.com/urfave/cli v1.20.0 h1:fDqGv3UG/4jbVl/QkFwEdddtEDjh/5Ov6X+0B/3bPaw=
 github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
+github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk=
 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I=
+github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0=
 github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
 github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
@@ -319,31 +285,28 @@
 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20190404164418-38d8ce5564a5/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
 golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
 golang.org/x/crypto v0.0.0-20191001170739-f9e2070545dc h1:KyTYo8xkh/2WdbFLUyQwBS0Jfn3qfZ9QmuPbok2oENE=
 golang.org/x/crypto v0.0.0-20191001170739-f9e2070545dc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4 h1:c2HOrn5iMezYjSlGPncknSEr/8x5LELb/ilJbXi9DEA=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
 golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0=
 golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
 golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
 golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190603091049-60506f45cf65 h1:+rhAzEzT3f4JtomfC371qB+0Ola2caSKcY69NUBZrRQ=
 golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
-golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
 golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
 golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 h1:6KET3Sqa7fkVfD63QnAM81ZeYg5n4HwApOJkufONnHA=
@@ -357,18 +320,15 @@
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
 golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190606165138-5da285871e9c h1:+EXw7AwNOKzPFXMZ1yNjO40aWCh3PIquJB2fYlv9wcs=
 golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24 h1:R8bzl0244nw47n1xKs1MUMAaTNgjavKcN/aX2Ss3+Fo=
@@ -389,6 +349,7 @@
 golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
 golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b h1:mSUCVIwDx4hfXJfWsOPfdzEHxzb2Xjl6BQ8YgPnazQA=
 golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -396,7 +357,6 @@
 google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I=
 google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
 google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc=
 google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
 google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c h1:hrpEMCZ2O7DR5gC1n2AJGVhrwiEjOi35+jxtIuZpTMo=
 google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
@@ -406,11 +366,12 @@
 google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
 google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s=
 google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
+gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
 gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/cheggaaa/pb.v1 v1.0.25 h1:Ev7yu1/f6+d+b3pi5vPdRPc6nNtP1umSfcWiEfRqv6I=
 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@@ -423,23 +384,23 @@
 gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
 gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI=
 gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4=
-gopkg.in/jcmturner/gokrb5.v7 v7.2.3 h1:hHMV/yKPwMnJhPuPx7pH2Uw/3Qyf+thJYlisUc44010=
 gopkg.in/jcmturner/gokrb5.v7 v7.2.3/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
 gopkg.in/jcmturner/gokrb5.v7 v7.3.0 h1:0709Jtq/6QXEuWRfAm260XqlpcwL1vxtO1tUE2qK8Z4=
 gopkg.in/jcmturner/gokrb5.v7 v7.3.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM=
 gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU=
 gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8=
+gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI=
 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
 gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
 gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 gopkg.in/yaml.v2 v2.2.3 h1:fvjTMHxHEw/mxHbtzPi3JCcKXQRAnQTBRo6YCJSVHKI=
 gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
 honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
 honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
 k8s.io/api v0.0.0-20190819141258-3544db3b9e44 h1:7Gz7/nQ7X2qmPXMyN0bNq7Zm9Uip+UnFuMZTd2l3vms=
 k8s.io/api v0.0.0-20190819141258-3544db3b9e44/go.mod h1:AOxZTnaXR/xiarlQL0JUfwQPxjmKDvVYoRp58cA7lUo=
@@ -449,6 +410,7 @@
 k8s.io/client-go v0.0.0-20190819141724-e14f31a72a77/go.mod h1:DmkJD5UDP87MVqUQ5VJ6Tj9Oen8WzXPhk3la4qpyG4g=
 k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
 k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI=
 k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
 k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
 k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
diff --git a/internal/pkg/apis/config/v1/defaults.go b/internal/pkg/apis/config/v1/defaults.go
new file mode 100644
index 0000000..c0fe427
--- /dev/null
+++ b/internal/pkg/apis/config/v1/defaults.go
@@ -0,0 +1,43 @@
+/*
+ * Copyright 2021-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"time"
+)
+
+func NewDefaultConfig() *GlobalConfigSpec {
+	return &GlobalConfigSpec{
+		Server:  "localhost:55555",
+		Kafka:   "localhost:9092",
+		KvStore: "localhost:2379",
+		Tls: TlsConfigSpec{
+			UseTls: false,
+			CACert: "",
+			Cert:   "",
+			Key:    "",
+			Verify: "",
+		},
+		Grpc: GrpcConfigSpec{
+			Timeout:            5 * time.Minute,
+			MaxCallRecvMsgSize: "4MB",
+		},
+		KvStoreConfig: KvStoreConfigSpec{
+			Timeout: 5 * time.Second,
+		},
+		K8sConfig: "",
+	}
+}
diff --git a/internal/pkg/apis/config/v1/types.go b/internal/pkg/apis/config/v1/types.go
new file mode 100644
index 0000000..a7ea78e
--- /dev/null
+++ b/internal/pkg/apis/config/v1/types.go
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2021-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"time"
+)
+
+type GrpcConfigSpec struct {
+	Timeout            time.Duration `yaml:"timeout"`
+	MaxCallRecvMsgSize string        `yaml:"maxCallRecvMsgSize"`
+}
+
+type KvStoreConfigSpec struct {
+	Timeout time.Duration `yaml:"timeout"`
+}
+
+type TlsConfigSpec struct {
+	UseTls bool   `yaml:"useTls"`
+	CACert string `yaml:"caCert"`
+	Cert   string `yaml:"cert"`
+	Key    string `yaml:"key"`
+	Verify string `yaml:"verify"`
+}
+
+type GlobalConfigSpec struct {
+	Server        string            `yaml:"server"`
+	Kafka         string            `yaml:"kafka"`
+	KvStore       string            `yaml:"kvstore"`
+	Tls           TlsConfigSpec     `yaml:"tls"`
+	Grpc          GrpcConfigSpec    `yaml:"grpc"`
+	KvStoreConfig KvStoreConfigSpec `yaml:"kvstoreconfig"`
+	K8sConfig     string            `yaml:"-"`
+}
diff --git a/internal/pkg/apis/config/v2/convert.go b/internal/pkg/apis/config/v2/convert.go
new file mode 100644
index 0000000..6e68983
--- /dev/null
+++ b/internal/pkg/apis/config/v2/convert.go
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2021-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"strconv"
+
+	configv1 "github.com/opencord/voltctl/internal/pkg/apis/config/v1"
+)
+
+func FromConfigV1(v1 *configv1.GlobalConfigSpec) *GlobalConfigSpec {
+	v2 := NewDefaultConfig()
+
+	v2.Server = v1.Server
+	v2.Kafka = v1.Kafka
+	v2.KvStore = v1.KvStore
+	v2.Tls.UseTls = v1.Tls.UseTls
+	v2.Tls.CACert = v1.Tls.CACert
+	v2.Tls.Cert = v1.Tls.Cert
+	v2.Tls.Key = v1.Tls.Key
+	if v1.Tls.Verify != "" {
+		if b, err := strconv.ParseBool(v1.Tls.Verify); err == nil {
+			v2.Tls.Verify = b
+		}
+	}
+	v2.Grpc.Timeout = v1.Grpc.Timeout
+	v2.Grpc.MaxCallRecvMsgSize = v1.Grpc.MaxCallRecvMsgSize
+	v2.KvStoreConfig.Timeout = v1.KvStoreConfig.Timeout
+	v2.K8sConfig = v1.K8sConfig
+	return v2
+}
diff --git a/internal/pkg/apis/config/v2/defaults.go b/internal/pkg/apis/config/v2/defaults.go
new file mode 100644
index 0000000..c872325
--- /dev/null
+++ b/internal/pkg/apis/config/v2/defaults.go
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2021-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"time"
+)
+
+func NewDefaultConfig() *GlobalConfigSpec {
+	return &GlobalConfigSpec{
+		ApiVersion: "v2",
+		Server:     "localhost:55555",
+		Kafka:      "localhost:9092",
+		KvStore:    "localhost:2379",
+		Tls: TlsConfigSpec{
+			UseTls: false,
+			CACert: "",
+			Cert:   "",
+			Key:    "",
+			Verify: false,
+		},
+		Grpc: GrpcConfigSpec{
+			ConnectTimeout:     5 * time.Second,
+			Timeout:            5 * time.Minute,
+			MaxCallRecvMsgSize: "4MB",
+		},
+		KvStoreConfig: KvStoreConfigSpec{
+			Timeout: 5 * time.Second,
+		},
+		K8sConfig: "",
+	}
+}
diff --git a/internal/pkg/apis/config/v2/types.go b/internal/pkg/apis/config/v2/types.go
new file mode 100644
index 0000000..1c5a0d4
--- /dev/null
+++ b/internal/pkg/apis/config/v2/types.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2021-present Ciena Corporation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package config
+
+import (
+	"time"
+)
+
+type GrpcConfigSpec struct {
+	ConnectTimeout     time.Duration `yaml:"connectTimeout"`
+	Timeout            time.Duration `yaml:"timeout"`
+	MaxCallRecvMsgSize string        `yaml:"maxCallRecvMsgSize"`
+}
+
+type KvStoreConfigSpec struct {
+	Timeout time.Duration `yaml:"timeout"`
+}
+
+type TlsConfigSpec struct {
+	UseTls bool   `yaml:"useTls"`
+	CACert string `yaml:"caCert"`
+	Cert   string `yaml:"cert"`
+	Key    string `yaml:"key"`
+	Verify bool   `yaml:"verify"`
+}
+
+type GlobalConfigSpec struct {
+	ApiVersion    string            `yaml:"apiVersion"`
+	Server        string            `yaml:"server"`
+	Kafka         string            `yaml:"kafka"`
+	KvStore       string            `yaml:"kvstore"`
+	Tls           TlsConfigSpec     `yaml:"tls"`
+	Grpc          GrpcConfigSpec    `yaml:"grpc"`
+	KvStoreConfig KvStoreConfigSpec `yaml:"kvstoreconfig"`
+	K8sConfig     string            `yaml:"-"`
+}
diff --git a/internal/pkg/commands/command.go b/internal/pkg/commands/command.go
index d2951a3..3e1beb4 100644
--- a/internal/pkg/commands/command.go
+++ b/internal/pkg/commands/command.go
@@ -16,12 +16,13 @@
 package commands
 
 import (
+	"context"
+	"crypto/tls"
 	"encoding/json"
 	"errors"
 	"fmt"
 	"io/ioutil"
 	"log"
-	"net"
 	"os"
 	"path/filepath"
 	"reflect"
@@ -32,11 +33,14 @@
 
 	"github.com/golang/protobuf/jsonpb"
 	"github.com/golang/protobuf/proto"
+	configv1 "github.com/opencord/voltctl/internal/pkg/apis/config/v1"
+	configv2 "github.com/opencord/voltctl/internal/pkg/apis/config/v2"
 	"github.com/opencord/voltctl/pkg/filter"
 	"github.com/opencord/voltctl/pkg/format"
 	"github.com/opencord/voltctl/pkg/order"
 	"google.golang.org/grpc"
-	"gopkg.in/yaml.v2"
+	"google.golang.org/grpc/credentials"
+	yaml "gopkg.in/yaml.v2"
 )
 
 type OutputType uint8
@@ -46,48 +50,9 @@
 	OUTPUT_JSON
 	OUTPUT_YAML
 
-	defaultApiHost = "localhost"
-	defaultApiPort = 55555
-
-	defaultKafkaHost = "localhost"
-	defaultKafkaPort = 9092
-
 	supportedKvStoreType = "etcd"
-	defaultKvHost        = "localhost"
-	defaultKvPort        = 2379
-	defaultKvTimeout     = time.Second * 5
-
-	defaultGrpcTimeout            = time.Minute * 5
-	defaultGrpcMaxCallRecvMsgSize = "4MB"
 )
 
-type GrpcConfigSpec struct {
-	Timeout            time.Duration `yaml:"timeout"`
-	MaxCallRecvMsgSize string        `yaml:"maxCallRecvMsgSize"`
-}
-
-type KvStoreConfigSpec struct {
-	Timeout time.Duration `yaml:"timeout"`
-}
-
-type TlsConfigSpec struct {
-	UseTls bool   `yaml:"useTls"`
-	CACert string `yaml:"caCert"`
-	Cert   string `yaml:"cert"`
-	Key    string `yaml:"key"`
-	Verify string `yaml:"verify"`
-}
-
-type GlobalConfigSpec struct {
-	Server        string            `yaml:"server"`
-	Kafka         string            `yaml:"kafka"`
-	KvStore       string            `yaml:"kvstore"`
-	Tls           TlsConfigSpec     `yaml:"tls"`
-	Grpc          GrpcConfigSpec    `yaml:"grpc"`
-	KvStoreConfig KvStoreConfigSpec `yaml:"kvstoreconfig"`
-	K8sConfig     string            `yaml:"-"`
-}
-
 var (
 	ParamNames = map[string]map[string]string{
 		"v1": {
@@ -105,21 +70,7 @@
 
 	CharReplacer = strings.NewReplacer("\\t", "\t", "\\n", "\n")
 
-	GlobalConfig = GlobalConfigSpec{
-		Server:  "localhost:55555",
-		Kafka:   "",
-		KvStore: "localhost:2379",
-		Tls: TlsConfigSpec{
-			UseTls: false,
-		},
-		Grpc: GrpcConfigSpec{
-			Timeout:            defaultGrpcTimeout,
-			MaxCallRecvMsgSize: defaultGrpcMaxCallRecvMsgSize,
-		},
-		KvStoreConfig: KvStoreConfigSpec{
-			Timeout: defaultKvTimeout,
-		},
-	}
+	GlobalConfig = configv2.NewDefaultConfig()
 
 	GlobalCommandOptions = make(map[string]map[string]string)
 
@@ -194,31 +145,6 @@
 	}
 }
 
-func splitEndpoint(ep, defaultHost string, defaultPort int) (string, int, error) {
-	port := defaultPort
-	host, sPort, err := net.SplitHostPort(ep)
-	if err != nil {
-		if addrErr, ok := err.(*net.AddrError); ok {
-			if addrErr.Err != "missing port in address" {
-				return "", 0, err
-			}
-			host = ep
-		} else {
-			return "", 0, err
-		}
-	} else if len(strings.TrimSpace(sPort)) > 0 {
-		val, err := strconv.Atoi(sPort)
-		if err != nil {
-			return "", 0, err
-		}
-		port = val
-	}
-	if len(strings.TrimSpace(host)) == 0 {
-		host = defaultHost
-	}
-	return strings.Trim(host, "]["), port, nil
-}
-
 type CommandResult struct {
 	Format    format.Format
 	Filter    string
@@ -283,9 +209,15 @@
 			Error.Fatalf("Unable to read the configuration file '%s': %s",
 				GlobalOptions.Config, err.Error())
 		}
+		// First try the latest version of the config api then work
+		// backwards
 		if err = yaml.Unmarshal(configFile, &GlobalConfig); err != nil {
-			Error.Fatalf("Unable to parse the configuration file '%s': %s",
-				GlobalOptions.Config, err.Error())
+			GlobalConfigV1 := configv1.NewDefaultConfig()
+			if err = yaml.Unmarshal(configFile, &GlobalConfigV1); err != nil {
+				Error.Fatalf("Unable to parse the configuration file '%s': %s",
+					GlobalOptions.Config, err.Error())
+			}
+			GlobalConfig = configv2.FromConfigV1(GlobalConfigV1)
 		}
 	}
 
@@ -293,32 +225,22 @@
 	if GlobalOptions.Server != "" {
 		GlobalConfig.Server = GlobalOptions.Server
 	}
-	host, port, err := splitEndpoint(GlobalConfig.Server, defaultApiHost, defaultApiPort)
-	if err != nil {
-		Error.Fatalf("voltha API endport incorrectly specified '%s':%s",
-			GlobalConfig.Server, err)
+
+	if GlobalOptions.UseTLS {
+		GlobalConfig.Tls.UseTls = true
 	}
-	GlobalConfig.Server = net.JoinHostPort(host, strconv.Itoa(port))
+
+	if GlobalOptions.Verify {
+		GlobalConfig.Tls.Verify = true
+	}
 
 	if GlobalOptions.Kafka != "" {
 		GlobalConfig.Kafka = GlobalOptions.Kafka
 	}
-	host, port, err = splitEndpoint(GlobalConfig.Kafka, defaultKafkaHost, defaultKafkaPort)
-	if err != nil {
-		Error.Fatalf("Kafka endport incorrectly specified '%s':%s",
-			GlobalConfig.Kafka, err)
-	}
-	GlobalConfig.Kafka = net.JoinHostPort(host, strconv.Itoa(port))
 
 	if GlobalOptions.KvStore != "" {
 		GlobalConfig.KvStore = GlobalOptions.KvStore
 	}
-	host, port, err = splitEndpoint(GlobalConfig.KvStore, defaultKvHost, defaultKvPort)
-	if err != nil {
-		Error.Fatalf("KV store endport incorrectly specified '%s':%s",
-			GlobalConfig.KvStore, err)
-	}
-	GlobalConfig.KvStore = net.JoinHostPort(host, strconv.Itoa(port))
 
 	if GlobalOptions.KvStoreTimeout != "" {
 		timeout, err := time.ParseDuration(GlobalOptions.KvStoreTimeout)
@@ -384,7 +306,24 @@
 		Error.Fatalf("Cannot convert msgSize %s to bytes", GlobalConfig.Grpc.MaxCallRecvMsgSize)
 	}
 
-	return grpc.Dial(GlobalConfig.Server, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(n))))
+	var opts []grpc.DialOption
+
+	opts = append(opts,
+		grpc.WithDisableRetry(),
+		grpc.WithBlock(),
+		grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(int(n))))
+
+	if GlobalConfig.Tls.UseTls {
+		creds := credentials.NewTLS(&tls.Config{
+			InsecureSkipVerify: !GlobalConfig.Tls.Verify})
+		opts = append(opts, grpc.WithTransportCredentials(creds))
+	} else {
+		opts = append(opts, grpc.WithInsecure())
+	}
+	ctx, cancel := context.WithTimeout(context.TODO(),
+		GlobalConfig.Grpc.ConnectTimeout)
+	defer cancel()
+	return grpc.DialContext(ctx, GlobalConfig.Server, opts...)
 }
 
 func ConvertJsonProtobufArray(data_in interface{}) (string, error) {
diff --git a/internal/pkg/commands/command_test.go b/internal/pkg/commands/command_test.go
index 68dd0a4..e0bd4bd 100644
--- a/internal/pkg/commands/command_test.go
+++ b/internal/pkg/commands/command_test.go
@@ -16,11 +16,12 @@
 package commands
 
 import (
-	flags "github.com/jessevdk/go-flags"
-	"github.com/stretchr/testify/assert"
 	"os"
 	"path"
 	"testing"
+
+	flags "github.com/jessevdk/go-flags"
+	"github.com/stretchr/testify/assert"
 )
 
 // Test that ProcessGlobalOptions does not interfere with GlobalConfig
@@ -39,58 +40,6 @@
 	assert.Equal(t, "localhost:55555", GlobalConfig.Server, "wrong default hostname for server")
 }
 
-func TestSplitHostPort(t *testing.T) {
-	data := []struct {
-		name        string
-		endpoint    string
-		defaultHost string
-		defaultPort int
-		host        string
-		port        int
-		err         bool
-	}{
-		{"Host and port specified", "host:1234", "default", 4321, "host", 1234, false},
-		{"Host only specified", "host", "default", 4321, "host", 4321, false},
-		{"Host: only specified", "host:", "default", 4321, "host", 4321, false},
-		{"Port only specified", ":1234", "default", 4321, "default", 1234, false},
-		{"Colon only", ":", "default", 4321, "default", 4321, false},
-		{"Empty endpoint", "", "default", 4321, "default", 4321, false},
-		{"IPv4 and port specified", "1.2.3.4:1234", "4.3.2.1", 4321, "1.2.3.4", 1234, false},
-		{"IPv4 only specified", "1.2.3.4", "4.3.2.1", 4321, "1.2.3.4", 4321, false},
-		{"IPv4: only specified", "1.2.3.4:", "4.3.2.1", 4321, "1.2.3.4", 4321, false},
-		{"IPv4 Port only specified", ":1234", "4.3.2.1", 4321, "4.3.2.1", 1234, false},
-		{"IPv4 Colon only", ":", "4.3.2.1", 4321, "4.3.2.1", 4321, false},
-		{"IPv4 Empty endpoint", "", "4.3.2.1", 4321, "4.3.2.1", 4321, false},
-		{"IPv6 and port specified", "[0001:c0ff:eec0::::ffff]:1234", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::ffff", 1234, false},
-		{"IPv6 only specified", "[0001:c0ff:eec0::::ffff]", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::ffff", 4321, false},
-		{"IPv6: only specified", "[0001:c0ff:eec0::::ffff]:", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::ffff", 4321, false},
-		{"IPv6 Port only specified", ":1234", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::aaaa", 1234, false},
-		{"IPv6 Colon only", ":", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::aaaa", 4321, false},
-		{"IPv6 Empty endpoint", "", "0001:c0ff:eec0::::aaaa", 4321, "0001:c0ff:eec0::::aaaa", 4321, false},
-		{"Invalid port", "host:1b", "default", 4321, "", 0, true},
-		{"Too many colons", "ho:st:1b", "default", 4321, "", 0, true},
-		{"IPv4 Invalid port", "1.2.3.4:1b", "4.3.2.1", 4321, "", 0, true},
-		{"IPv4 Too many colons", "1.2.3.4::1234", "4.3.2.1", 4321, "", 0, true},
-		{"IPv6 Invalid port", "[0001:c0ff:eec0::::ffff]:1b", "0001:c0ff:eec0::::aaaa", 4321, "", 0, true},
-		{"IPv6 Too many colons", "0001:c0ff:eec0::::ffff:1234", "0001:c0ff:eec0::::aaaa", 4321, "", 0, true},
-	}
-
-	for _, args := range data {
-		t.Run(args.name, func(t *testing.T) {
-			h, p, err := splitEndpoint(args.endpoint, args.defaultHost, args.defaultPort)
-			if args.err {
-				assert.NotNil(t, err, "unexpected non-error result")
-			} else {
-				assert.Nil(t, err, "unexpected error result")
-			}
-			if !args.err && err == nil {
-				assert.Equal(t, args.host, h, "unexpected host value")
-				assert.Equal(t, args.port, p, "unexpected port value")
-			}
-		})
-	}
-}
-
 func TestParseSize(t *testing.T) {
 	var res uint64
 	var err error
diff --git a/internal/pkg/commands/config.go b/internal/pkg/commands/config.go
index 3ba4118..59a1df1 100644
--- a/internal/pkg/commands/config.go
+++ b/internal/pkg/commands/config.go
@@ -17,12 +17,12 @@
 
 import (
 	"fmt"
+
 	flags "github.com/jessevdk/go-flags"
-	"gopkg.in/yaml.v2"
+	yaml "gopkg.in/yaml.v2"
 )
 
-const copyrightNotice = `
-# Copyright 2019-present Ciena Corporation
+const copyrightNotice = `# Copyright 2021-present Ciena Corporation
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -34,9 +34,7 @@
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
-# limitations under the License.
-#
-`
+# limitations under the License.`
 
 type CommandOptionsDump struct{}
 
diff --git a/internal/pkg/commands/log.go b/internal/pkg/commands/log.go
index 77d97e9..a1110b4 100644
--- a/internal/pkg/commands/log.go
+++ b/internal/pkg/commands/log.go
@@ -17,11 +17,10 @@
 
 import (
 	"context"
+	"crypto/tls"
 	"encoding/json"
 	"errors"
 	"fmt"
-	"net"
-	"strconv"
 	"strings"
 
 	flags "github.com/jessevdk/go-flags"
@@ -30,6 +29,7 @@
 	"github.com/opencord/voltha-lib-go/v4/pkg/config"
 	"github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore"
 	"github.com/opencord/voltha-lib-go/v4/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/client-go/kubernetes"
 	"k8s.io/client-go/tools/clientcmd"
@@ -250,14 +250,24 @@
 }
 
 func constructConfigManager(ctx context.Context) (*config.ConfigManager, func(), error) {
-	client, err := kvstore.NewEtcdClient(ctx, GlobalConfig.KvStore, GlobalConfig.KvStoreConfig.Timeout, log.FatalLevel)
+	var tlsConfig *tls.Config
+	if GlobalConfig.Tls.UseTls {
+		tlsConfig = &tls.Config{InsecureSkipVerify: !GlobalConfig.Tls.Verify}
+	}
+	logconfig := log.ConstructZapConfig(log.JSON, log.FatalLevel, log.Fields{})
+	client, err := kvstore.NewEtcdCustomClient(
+		ctx,
+		&v3Client.Config{
+			Endpoints:   []string{GlobalConfig.KvStore},
+			DialTimeout: GlobalConfig.KvStoreConfig.Timeout,
+			LogConfig:   &logconfig,
+			TLS:         tlsConfig,
+		})
 	if err != nil {
 		return nil, nil, fmt.Errorf("Unable to create kvstore client %s", err)
 	}
 
-	// Already error checked during option processing
-	host, port, _ := splitEndpoint(GlobalConfig.KvStore, defaultKvHost, defaultKvPort)
-	cm := config.NewConfigManager(ctx, client, supportedKvStoreType, net.JoinHostPort(host, strconv.Itoa(port)), GlobalConfig.KvStoreConfig.Timeout)
+	cm := config.NewConfigManager(ctx, client, supportedKvStoreType, GlobalConfig.KvStore, GlobalConfig.KvStoreConfig.Timeout)
 	return cm, func() { client.Close(ctx) }, nil
 }
 
@@ -685,8 +695,7 @@
 	if len(options.Args.Component) == 0 {
 		componentList, err = cm.RetrieveComponentList(ctx, config.ConfigTypeLogLevel)
 		if err != nil {
-			host, port, _ := splitEndpoint(GlobalConfig.KvStore, defaultKvHost, defaultKvPort)
-			return fmt.Errorf("Unable to retrieve list of voltha components : %s \nIs ETCD available at %s:%d?", err, host, port)
+			return fmt.Errorf("Unable to retrieve list of voltha components : %s \nIs ETCD available at %s?", err, GlobalConfig.KvStore)
 		}
 	} else {
 		componentList = toStringArray(options.Args.Component)
diff --git a/internal/pkg/commands/version.go b/internal/pkg/commands/version.go
index 8856ae1..db47ed8 100644
--- a/internal/pkg/commands/version.go
+++ b/internal/pkg/commands/version.go
@@ -18,12 +18,13 @@
 import (
 	"context"
 	"encoding/json"
+	"strings"
+
 	"github.com/golang/protobuf/ptypes/empty"
 	flags "github.com/jessevdk/go-flags"
 	"github.com/opencord/voltctl/internal/pkg/cli/version"
 	"github.com/opencord/voltctl/pkg/format"
 	"github.com/opencord/voltha-protos/v4/go/voltha"
-	"strings"
 )
 
 type VersionDetails struct {
@@ -140,7 +141,7 @@
 
 	voltha, err := client.GetVoltha(ctx, &empty.Empty{})
 	if err != nil {
-		return nil
+		return err
 	}
 
 	info := make(map[string]interface{})
diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore
deleted file mode 100644
index 8c03ec1..0000000
--- a/vendor/github.com/armon/go-metrics/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-
-/metrics.out
diff --git a/vendor/github.com/armon/go-metrics/.travis.yml b/vendor/github.com/armon/go-metrics/.travis.yml
deleted file mode 100644
index 87d230c..0000000
--- a/vendor/github.com/armon/go-metrics/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
-  - "1.x"
-
-env:
-  - GO111MODULE=on
-
-install:
-  - go get ./...
-
-script:
-  - go test ./...
diff --git a/vendor/github.com/armon/go-metrics/LICENSE b/vendor/github.com/armon/go-metrics/LICENSE
deleted file mode 100644
index 106569e..0000000
--- a/vendor/github.com/armon/go-metrics/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Armon Dadgar
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-the Software, and to permit persons to whom the Software is furnished to do so,
-subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/armon/go-metrics/README.md b/vendor/github.com/armon/go-metrics/README.md
deleted file mode 100644
index aa73348..0000000
--- a/vendor/github.com/armon/go-metrics/README.md
+++ /dev/null
@@ -1,91 +0,0 @@
-go-metrics
-==========
-
-This library provides a `metrics` package which can be used to instrument code,
-expose application metrics, and profile runtime performance in a flexible manner.
-
-Current API: [![GoDoc](https://godoc.org/github.com/armon/go-metrics?status.svg)](https://godoc.org/github.com/armon/go-metrics)
-
-Sinks
------
-
-The `metrics` package makes use of a `MetricSink` interface to support delivery
-to any type of backend. Currently the following sinks are provided:
-
-* StatsiteSink : Sinks to a [statsite](https://github.com/armon/statsite/) instance (TCP)
-* StatsdSink: Sinks to a [StatsD](https://github.com/etsy/statsd/) / statsite instance (UDP)
-* PrometheusSink: Sinks to a [Prometheus](http://prometheus.io/) metrics endpoint (exposed via HTTP for scrapes)
-* InmemSink : Provides in-memory aggregation, can be used to export stats
-* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example.
-* BlackholeSink : Sinks to nowhere
-
-In addition to the sinks, the `InmemSignal` can be used to catch a signal,
-and dump a formatted output of recent metrics. For example, when a process gets
-a SIGUSR1, it can dump to stderr recent performance metrics for debugging.
-
-Labels
-------
-
-Most metrics do have an equivalent ending with `WithLabels`, such methods
-allow to push metrics with labels and use some features of underlying Sinks
-(ex: translated into Prometheus labels).
-
-Since some of these labels may increase greatly cardinality of metrics, the
-library allow to filter labels using a blacklist/whitelist filtering system
-which is global to all metrics.
-
-* If `Config.AllowedLabels` is not nil, then only labels specified in this value will be sent to underlying Sink, otherwise, all labels are sent by default.
-* If `Config.BlockedLabels` is not nil, any label specified in this value will not be sent to underlying Sinks.
-
-By default, both `Config.AllowedLabels` and `Config.BlockedLabels` are nil, meaning that
-no tags are filetered at all, but it allow to a user to globally block some tags with high
-cardinality at application level.
-
-Examples
---------
-
-Here is an example of using the package:
-
-```go
-func SlowMethod() {
-    // Profiling the runtime of a method
-    defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now())
-}
-
-// Configure a statsite sink as the global metrics sink
-sink, _ := metrics.NewStatsiteSink("statsite:8125")
-metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink)
-
-// Emit a Key/Value pair
-metrics.EmitKey([]string{"questions", "meaning of life"}, 42)
-```
-
-Here is an example of setting up a signal handler:
-
-```go
-// Setup the inmem sink and signal handler
-inm := metrics.NewInmemSink(10*time.Second, time.Minute)
-sig := metrics.DefaultInmemSignal(inm)
-metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm)
-
-// Run some code
-inm.SetGauge([]string{"foo"}, 42)
-inm.EmitKey([]string{"bar"}, 30)
-
-inm.IncrCounter([]string{"baz"}, 42)
-inm.IncrCounter([]string{"baz"}, 1)
-inm.IncrCounter([]string{"baz"}, 80)
-
-inm.AddSample([]string{"method", "wow"}, 42)
-inm.AddSample([]string{"method", "wow"}, 100)
-inm.AddSample([]string{"method", "wow"}, 22)
-
-....
-```
-
-When a signal comes in, output like the following will be dumped to stderr:
-
-    [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000
-    [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000
-    [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509
-    [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513
\ No newline at end of file
diff --git a/vendor/github.com/armon/go-metrics/const_unix.go b/vendor/github.com/armon/go-metrics/const_unix.go
deleted file mode 100644
index 31098dd..0000000
--- a/vendor/github.com/armon/go-metrics/const_unix.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !windows
-
-package metrics
-
-import (
-	"syscall"
-)
-
-const (
-	// DefaultSignal is used with DefaultInmemSignal
-	DefaultSignal = syscall.SIGUSR1
-)
diff --git a/vendor/github.com/armon/go-metrics/const_windows.go b/vendor/github.com/armon/go-metrics/const_windows.go
deleted file mode 100644
index 38136af..0000000
--- a/vendor/github.com/armon/go-metrics/const_windows.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// +build windows
-
-package metrics
-
-import (
-	"syscall"
-)
-
-const (
-	// DefaultSignal is used with DefaultInmemSignal
-	// Windows has no SIGUSR1, use SIGBREAK
-	DefaultSignal = syscall.Signal(21)
-)
diff --git a/vendor/github.com/armon/go-metrics/go.mod b/vendor/github.com/armon/go-metrics/go.mod
deleted file mode 100644
index 88e1e98..0000000
--- a/vendor/github.com/armon/go-metrics/go.mod
+++ /dev/null
@@ -1,16 +0,0 @@
-module github.com/armon/go-metrics
-
-go 1.12
-
-require (
-	github.com/DataDog/datadog-go v2.2.0+incompatible
-	github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible
-	github.com/circonus-labs/circonusllhist v0.1.3 // indirect
-	github.com/hashicorp/go-immutable-radix v1.0.0
-	github.com/hashicorp/go-retryablehttp v0.5.3 // indirect
-	github.com/pascaldekloe/goe v0.1.0
-	github.com/pkg/errors v0.8.1 // indirect
-	github.com/prometheus/client_golang v0.9.2
-	github.com/stretchr/testify v1.3.0 // indirect
-	github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 // indirect
-)
diff --git a/vendor/github.com/armon/go-metrics/go.sum b/vendor/github.com/armon/go-metrics/go.sum
deleted file mode 100644
index 5ffd832..0000000
--- a/vendor/github.com/armon/go-metrics/go.sum
+++ /dev/null
@@ -1,46 +0,0 @@
-github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4=
-github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
-github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible h1:C29Ae4G5GtYyYMm1aztcyj/J5ckgJm2zwdDajFbx1NY=
-github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
-github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj7ug5D7I/orNUA=
-github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
-github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
-github.com/hashicorp/go-cleanhttp v0.5.0 h1:wvCrVc9TjDls6+YGAF2hAifE1E5U1+b4tH6KdvN3Gig=
-github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s=
-github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
-github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
-github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
-github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
-github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
-github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
-github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
-github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
-github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926 h1:G3dpKMzFDjgEh2q1Z7zUUtKa8ViPtH+ocF0bE0g00O8=
-github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ=
-golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
diff --git a/vendor/github.com/armon/go-metrics/inmem.go b/vendor/github.com/armon/go-metrics/inmem.go
deleted file mode 100644
index 93b0e0a..0000000
--- a/vendor/github.com/armon/go-metrics/inmem.go
+++ /dev/null
@@ -1,348 +0,0 @@
-package metrics
-
-import (
-	"bytes"
-	"fmt"
-	"math"
-	"net/url"
-	"strings"
-	"sync"
-	"time"
-)
-
-// InmemSink provides a MetricSink that does in-memory aggregation
-// without sending metrics over a network. It can be embedded within
-// an application to provide profiling information.
-type InmemSink struct {
-	// How long is each aggregation interval
-	interval time.Duration
-
-	// Retain controls how many metrics interval we keep
-	retain time.Duration
-
-	// maxIntervals is the maximum length of intervals.
-	// It is retain / interval.
-	maxIntervals int
-
-	// intervals is a slice of the retained intervals
-	intervals    []*IntervalMetrics
-	intervalLock sync.RWMutex
-
-	rateDenom float64
-}
-
-// IntervalMetrics stores the aggregated metrics
-// for a specific interval
-type IntervalMetrics struct {
-	sync.RWMutex
-
-	// The start time of the interval
-	Interval time.Time
-
-	// Gauges maps the key to the last set value
-	Gauges map[string]GaugeValue
-
-	// Points maps the string to the list of emitted values
-	// from EmitKey
-	Points map[string][]float32
-
-	// Counters maps the string key to a sum of the counter
-	// values
-	Counters map[string]SampledValue
-
-	// Samples maps the key to an AggregateSample,
-	// which has the rolled up view of a sample
-	Samples map[string]SampledValue
-}
-
-// NewIntervalMetrics creates a new IntervalMetrics for a given interval
-func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
-	return &IntervalMetrics{
-		Interval: intv,
-		Gauges:   make(map[string]GaugeValue),
-		Points:   make(map[string][]float32),
-		Counters: make(map[string]SampledValue),
-		Samples:  make(map[string]SampledValue),
-	}
-}
-
-// AggregateSample is used to hold aggregate metrics
-// about a sample
-type AggregateSample struct {
-	Count       int       // The count of emitted pairs
-	Rate        float64   // The values rate per time unit (usually 1 second)
-	Sum         float64   // The sum of values
-	SumSq       float64   `json:"-"` // The sum of squared values
-	Min         float64   // Minimum value
-	Max         float64   // Maximum value
-	LastUpdated time.Time `json:"-"` // When value was last updated
-}
-
-// Computes a Stddev of the values
-func (a *AggregateSample) Stddev() float64 {
-	num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2)
-	div := float64(a.Count * (a.Count - 1))
-	if div == 0 {
-		return 0
-	}
-	return math.Sqrt(num / div)
-}
-
-// Computes a mean of the values
-func (a *AggregateSample) Mean() float64 {
-	if a.Count == 0 {
-		return 0
-	}
-	return a.Sum / float64(a.Count)
-}
-
-// Ingest is used to update a sample
-func (a *AggregateSample) Ingest(v float64, rateDenom float64) {
-	a.Count++
-	a.Sum += v
-	a.SumSq += (v * v)
-	if v < a.Min || a.Count == 1 {
-		a.Min = v
-	}
-	if v > a.Max || a.Count == 1 {
-		a.Max = v
-	}
-	a.Rate = float64(a.Sum) / rateDenom
-	a.LastUpdated = time.Now()
-}
-
-func (a *AggregateSample) String() string {
-	if a.Count == 0 {
-		return "Count: 0"
-	} else if a.Stddev() == 0 {
-		return fmt.Sprintf("Count: %d Sum: %0.3f LastUpdated: %s", a.Count, a.Sum, a.LastUpdated)
-	} else {
-		return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f LastUpdated: %s",
-			a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum, a.LastUpdated)
-	}
-}
-
-// NewInmemSinkFromURL creates an InmemSink from a URL. It is used
-// (and tested) from NewMetricSinkFromURL.
-func NewInmemSinkFromURL(u *url.URL) (MetricSink, error) {
-	params := u.Query()
-
-	interval, err := time.ParseDuration(params.Get("interval"))
-	if err != nil {
-		return nil, fmt.Errorf("Bad 'interval' param: %s", err)
-	}
-
-	retain, err := time.ParseDuration(params.Get("retain"))
-	if err != nil {
-		return nil, fmt.Errorf("Bad 'retain' param: %s", err)
-	}
-
-	return NewInmemSink(interval, retain), nil
-}
-
-// NewInmemSink is used to construct a new in-memory sink.
-// Uses an aggregation interval and maximum retention period.
-func NewInmemSink(interval, retain time.Duration) *InmemSink {
-	rateTimeUnit := time.Second
-	i := &InmemSink{
-		interval:     interval,
-		retain:       retain,
-		maxIntervals: int(retain / interval),
-		rateDenom:    float64(interval.Nanoseconds()) / float64(rateTimeUnit.Nanoseconds()),
-	}
-	i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals)
-	return i
-}
-
-func (i *InmemSink) SetGauge(key []string, val float32) {
-	i.SetGaugeWithLabels(key, val, nil)
-}
-
-func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	k, name := i.flattenKeyLabels(key, labels)
-	intv := i.getInterval()
-
-	intv.Lock()
-	defer intv.Unlock()
-	intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
-}
-
-func (i *InmemSink) EmitKey(key []string, val float32) {
-	k := i.flattenKey(key)
-	intv := i.getInterval()
-
-	intv.Lock()
-	defer intv.Unlock()
-	vals := intv.Points[k]
-	intv.Points[k] = append(vals, val)
-}
-
-func (i *InmemSink) IncrCounter(key []string, val float32) {
-	i.IncrCounterWithLabels(key, val, nil)
-}
-
-func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	k, name := i.flattenKeyLabels(key, labels)
-	intv := i.getInterval()
-
-	intv.Lock()
-	defer intv.Unlock()
-
-	agg, ok := intv.Counters[k]
-	if !ok {
-		agg = SampledValue{
-			Name:            name,
-			AggregateSample: &AggregateSample{},
-			Labels:          labels,
-		}
-		intv.Counters[k] = agg
-	}
-	agg.Ingest(float64(val), i.rateDenom)
-}
-
-func (i *InmemSink) AddSample(key []string, val float32) {
-	i.AddSampleWithLabels(key, val, nil)
-}
-
-func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
-	k, name := i.flattenKeyLabels(key, labels)
-	intv := i.getInterval()
-
-	intv.Lock()
-	defer intv.Unlock()
-
-	agg, ok := intv.Samples[k]
-	if !ok {
-		agg = SampledValue{
-			Name:            name,
-			AggregateSample: &AggregateSample{},
-			Labels:          labels,
-		}
-		intv.Samples[k] = agg
-	}
-	agg.Ingest(float64(val), i.rateDenom)
-}
-
-// Data is used to retrieve all the aggregated metrics
-// Intervals may be in use, and a read lock should be acquired
-func (i *InmemSink) Data() []*IntervalMetrics {
-	// Get the current interval, forces creation
-	i.getInterval()
-
-	i.intervalLock.RLock()
-	defer i.intervalLock.RUnlock()
-
-	n := len(i.intervals)
-	intervals := make([]*IntervalMetrics, n)
-
-	copy(intervals[:n-1], i.intervals[:n-1])
-	current := i.intervals[n-1]
-
-	// make its own copy for current interval
-	intervals[n-1] = &IntervalMetrics{}
-	copyCurrent := intervals[n-1]
-	current.RLock()
-	*copyCurrent = *current
-
-	copyCurrent.Gauges = make(map[string]GaugeValue, len(current.Gauges))
-	for k, v := range current.Gauges {
-		copyCurrent.Gauges[k] = v
-	}
-	// saved values will be not change, just copy its link
-	copyCurrent.Points = make(map[string][]float32, len(current.Points))
-	for k, v := range current.Points {
-		copyCurrent.Points[k] = v
-	}
-	copyCurrent.Counters = make(map[string]SampledValue, len(current.Counters))
-	for k, v := range current.Counters {
-		copyCurrent.Counters[k] = v.deepCopy()
-	}
-	copyCurrent.Samples = make(map[string]SampledValue, len(current.Samples))
-	for k, v := range current.Samples {
-		copyCurrent.Samples[k] = v.deepCopy()
-	}
-	current.RUnlock()
-
-	return intervals
-}
-
-func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics {
-	i.intervalLock.RLock()
-	defer i.intervalLock.RUnlock()
-
-	n := len(i.intervals)
-	if n > 0 && i.intervals[n-1].Interval == intv {
-		return i.intervals[n-1]
-	}
-	return nil
-}
-
-func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics {
-	i.intervalLock.Lock()
-	defer i.intervalLock.Unlock()
-
-	// Check for an existing interval
-	n := len(i.intervals)
-	if n > 0 && i.intervals[n-1].Interval == intv {
-		return i.intervals[n-1]
-	}
-
-	// Add the current interval
-	current := NewIntervalMetrics(intv)
-	i.intervals = append(i.intervals, current)
-	n++
-
-	// Truncate the intervals if they are too long
-	if n >= i.maxIntervals {
-		copy(i.intervals[0:], i.intervals[n-i.maxIntervals:])
-		i.intervals = i.intervals[:i.maxIntervals]
-	}
-	return current
-}
-
-// getInterval returns the current interval to write to
-func (i *InmemSink) getInterval() *IntervalMetrics {
-	intv := time.Now().Truncate(i.interval)
-	if m := i.getExistingInterval(intv); m != nil {
-		return m
-	}
-	return i.createInterval(intv)
-}
-
-// Flattens the key for formatting, removes spaces
-func (i *InmemSink) flattenKey(parts []string) string {
-	buf := &bytes.Buffer{}
-	replacer := strings.NewReplacer(" ", "_")
-
-	if len(parts) > 0 {
-		replacer.WriteString(buf, parts[0])
-	}
-	for _, part := range parts[1:] {
-		replacer.WriteString(buf, ".")
-		replacer.WriteString(buf, part)
-	}
-
-	return buf.String()
-}
-
-// Flattens the key for formatting along with its labels, removes spaces
-func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
-	buf := &bytes.Buffer{}
-	replacer := strings.NewReplacer(" ", "_")
-
-	if len(parts) > 0 {
-		replacer.WriteString(buf, parts[0])
-	}
-	for _, part := range parts[1:] {
-		replacer.WriteString(buf, ".")
-		replacer.WriteString(buf, part)
-	}
-
-	key := buf.String()
-
-	for _, label := range labels {
-		replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
-	}
-
-	return buf.String(), key
-}
diff --git a/vendor/github.com/armon/go-metrics/inmem_endpoint.go b/vendor/github.com/armon/go-metrics/inmem_endpoint.go
deleted file mode 100644
index 5fac958..0000000
--- a/vendor/github.com/armon/go-metrics/inmem_endpoint.go
+++ /dev/null
@@ -1,131 +0,0 @@
-package metrics
-
-import (
-	"fmt"
-	"net/http"
-	"sort"
-	"time"
-)
-
-// MetricsSummary holds a roll-up of metrics info for a given interval
-type MetricsSummary struct {
-	Timestamp string
-	Gauges    []GaugeValue
-	Points    []PointValue
-	Counters  []SampledValue
-	Samples   []SampledValue
-}
-
-type GaugeValue struct {
-	Name  string
-	Hash  string `json:"-"`
-	Value float32
-
-	Labels        []Label           `json:"-"`
-	DisplayLabels map[string]string `json:"Labels"`
-}
-
-type PointValue struct {
-	Name   string
-	Points []float32
-}
-
-type SampledValue struct {
-	Name string
-	Hash string `json:"-"`
-	*AggregateSample
-	Mean   float64
-	Stddev float64
-
-	Labels        []Label           `json:"-"`
-	DisplayLabels map[string]string `json:"Labels"`
-}
-
-// deepCopy allocates a new instance of AggregateSample
-func (source *SampledValue) deepCopy() SampledValue {
-	dest := *source
-	if source.AggregateSample != nil {
-		dest.AggregateSample = &AggregateSample{}
-		*dest.AggregateSample = *source.AggregateSample
-	}
-	return dest
-}
-
-// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
-func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
-	data := i.Data()
-
-	var interval *IntervalMetrics
-	n := len(data)
-	switch {
-	case n == 0:
-		return nil, fmt.Errorf("no metric intervals have been initialized yet")
-	case n == 1:
-		// Show the current interval if it's all we have
-		interval = data[0]
-	default:
-		// Show the most recent finished interval if we have one
-		interval = data[n-2]
-	}
-
-	interval.RLock()
-	defer interval.RUnlock()
-
-	summary := MetricsSummary{
-		Timestamp: interval.Interval.Round(time.Second).UTC().String(),
-		Gauges:    make([]GaugeValue, 0, len(interval.Gauges)),
-		Points:    make([]PointValue, 0, len(interval.Points)),
-	}
-
-	// Format and sort the output of each metric type, so it gets displayed in a
-	// deterministic order.
-	for name, points := range interval.Points {
-		summary.Points = append(summary.Points, PointValue{name, points})
-	}
-	sort.Slice(summary.Points, func(i, j int) bool {
-		return summary.Points[i].Name < summary.Points[j].Name
-	})
-
-	for hash, value := range interval.Gauges {
-		value.Hash = hash
-		value.DisplayLabels = make(map[string]string)
-		for _, label := range value.Labels {
-			value.DisplayLabels[label.Name] = label.Value
-		}
-		value.Labels = nil
-
-		summary.Gauges = append(summary.Gauges, value)
-	}
-	sort.Slice(summary.Gauges, func(i, j int) bool {
-		return summary.Gauges[i].Hash < summary.Gauges[j].Hash
-	})
-
-	summary.Counters = formatSamples(interval.Counters)
-	summary.Samples = formatSamples(interval.Samples)
-
-	return summary, nil
-}
-
-func formatSamples(source map[string]SampledValue) []SampledValue {
-	output := make([]SampledValue, 0, len(source))
-	for hash, sample := range source {
-		displayLabels := make(map[string]string)
-		for _, label := range sample.Labels {
-			displayLabels[label.Name] = label.Value
-		}
-
-		output = append(output, SampledValue{
-			Name:            sample.Name,
-			Hash:            hash,
-			AggregateSample: sample.AggregateSample,
-			Mean:            sample.AggregateSample.Mean(),
-			Stddev:          sample.AggregateSample.Stddev(),
-			DisplayLabels:   displayLabels,
-		})
-	}
-	sort.Slice(output, func(i, j int) bool {
-		return output[i].Hash < output[j].Hash
-	})
-
-	return output
-}
diff --git a/vendor/github.com/armon/go-metrics/inmem_signal.go b/vendor/github.com/armon/go-metrics/inmem_signal.go
deleted file mode 100644
index 0937f4a..0000000
--- a/vendor/github.com/armon/go-metrics/inmem_signal.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package metrics
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"os"
-	"os/signal"
-	"strings"
-	"sync"
-	"syscall"
-)
-
-// InmemSignal is used to listen for a given signal, and when received,
-// to dump the current metrics from the InmemSink to an io.Writer
-type InmemSignal struct {
-	signal syscall.Signal
-	inm    *InmemSink
-	w      io.Writer
-	sigCh  chan os.Signal
-
-	stop     bool
-	stopCh   chan struct{}
-	stopLock sync.Mutex
-}
-
-// NewInmemSignal creates a new InmemSignal which listens for a given signal,
-// and dumps the current metrics out to a writer
-func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal {
-	i := &InmemSignal{
-		signal: sig,
-		inm:    inmem,
-		w:      w,
-		sigCh:  make(chan os.Signal, 1),
-		stopCh: make(chan struct{}),
-	}
-	signal.Notify(i.sigCh, sig)
-	go i.run()
-	return i
-}
-
-// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1
-// and writes output to stderr. Windows uses SIGBREAK
-func DefaultInmemSignal(inmem *InmemSink) *InmemSignal {
-	return NewInmemSignal(inmem, DefaultSignal, os.Stderr)
-}
-
-// Stop is used to stop the InmemSignal from listening
-func (i *InmemSignal) Stop() {
-	i.stopLock.Lock()
-	defer i.stopLock.Unlock()
-
-	if i.stop {
-		return
-	}
-	i.stop = true
-	close(i.stopCh)
-	signal.Stop(i.sigCh)
-}
-
-// run is a long running routine that handles signals
-func (i *InmemSignal) run() {
-	for {
-		select {
-		case <-i.sigCh:
-			i.dumpStats()
-		case <-i.stopCh:
-			return
-		}
-	}
-}
-
-// dumpStats is used to dump the data to output writer
-func (i *InmemSignal) dumpStats() {
-	buf := bytes.NewBuffer(nil)
-
-	data := i.inm.Data()
-	// Skip the last period which is still being aggregated
-	for j := 0; j < len(data)-1; j++ {
-		intv := data[j]
-		intv.RLock()
-		for _, val := range intv.Gauges {
-			name := i.flattenLabels(val.Name, val.Labels)
-			fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
-		}
-		for name, vals := range intv.Points {
-			for _, val := range vals {
-				fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
-			}
-		}
-		for _, agg := range intv.Counters {
-			name := i.flattenLabels(agg.Name, agg.Labels)
-			fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
-		}
-		for _, agg := range intv.Samples {
-			name := i.flattenLabels(agg.Name, agg.Labels)
-			fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
-		}
-		intv.RUnlock()
-	}
-
-	// Write out the bytes
-	i.w.Write(buf.Bytes())
-}
-
-// Flattens the key for formatting along with its labels, removes spaces
-func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
-	buf := bytes.NewBufferString(name)
-	replacer := strings.NewReplacer(" ", "_", ":", "_")
-
-	for _, label := range labels {
-		replacer.WriteString(buf, ".")
-		replacer.WriteString(buf, label.Value)
-	}
-
-	return buf.String()
-}
diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go
deleted file mode 100644
index 4920d68..0000000
--- a/vendor/github.com/armon/go-metrics/metrics.go
+++ /dev/null
@@ -1,278 +0,0 @@
-package metrics
-
-import (
-	"runtime"
-	"strings"
-	"time"
-
-	"github.com/hashicorp/go-immutable-radix"
-)
-
-type Label struct {
-	Name  string
-	Value string
-}
-
-func (m *Metrics) SetGauge(key []string, val float32) {
-	m.SetGaugeWithLabels(key, val, nil)
-}
-
-func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	if m.HostName != "" {
-		if m.EnableHostnameLabel {
-			labels = append(labels, Label{"host", m.HostName})
-		} else if m.EnableHostname {
-			key = insert(0, m.HostName, key)
-		}
-	}
-	if m.EnableTypePrefix {
-		key = insert(0, "gauge", key)
-	}
-	if m.ServiceName != "" {
-		if m.EnableServiceLabel {
-			labels = append(labels, Label{"service", m.ServiceName})
-		} else {
-			key = insert(0, m.ServiceName, key)
-		}
-	}
-	allowed, labelsFiltered := m.allowMetric(key, labels)
-	if !allowed {
-		return
-	}
-	m.sink.SetGaugeWithLabels(key, val, labelsFiltered)
-}
-
-func (m *Metrics) EmitKey(key []string, val float32) {
-	if m.EnableTypePrefix {
-		key = insert(0, "kv", key)
-	}
-	if m.ServiceName != "" {
-		key = insert(0, m.ServiceName, key)
-	}
-	allowed, _ := m.allowMetric(key, nil)
-	if !allowed {
-		return
-	}
-	m.sink.EmitKey(key, val)
-}
-
-func (m *Metrics) IncrCounter(key []string, val float32) {
-	m.IncrCounterWithLabels(key, val, nil)
-}
-
-func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	if m.HostName != "" && m.EnableHostnameLabel {
-		labels = append(labels, Label{"host", m.HostName})
-	}
-	if m.EnableTypePrefix {
-		key = insert(0, "counter", key)
-	}
-	if m.ServiceName != "" {
-		if m.EnableServiceLabel {
-			labels = append(labels, Label{"service", m.ServiceName})
-		} else {
-			key = insert(0, m.ServiceName, key)
-		}
-	}
-	allowed, labelsFiltered := m.allowMetric(key, labels)
-	if !allowed {
-		return
-	}
-	m.sink.IncrCounterWithLabels(key, val, labelsFiltered)
-}
-
-func (m *Metrics) AddSample(key []string, val float32) {
-	m.AddSampleWithLabels(key, val, nil)
-}
-
-func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
-	if m.HostName != "" && m.EnableHostnameLabel {
-		labels = append(labels, Label{"host", m.HostName})
-	}
-	if m.EnableTypePrefix {
-		key = insert(0, "sample", key)
-	}
-	if m.ServiceName != "" {
-		if m.EnableServiceLabel {
-			labels = append(labels, Label{"service", m.ServiceName})
-		} else {
-			key = insert(0, m.ServiceName, key)
-		}
-	}
-	allowed, labelsFiltered := m.allowMetric(key, labels)
-	if !allowed {
-		return
-	}
-	m.sink.AddSampleWithLabels(key, val, labelsFiltered)
-}
-
-func (m *Metrics) MeasureSince(key []string, start time.Time) {
-	m.MeasureSinceWithLabels(key, start, nil)
-}
-
-func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
-	if m.HostName != "" && m.EnableHostnameLabel {
-		labels = append(labels, Label{"host", m.HostName})
-	}
-	if m.EnableTypePrefix {
-		key = insert(0, "timer", key)
-	}
-	if m.ServiceName != "" {
-		if m.EnableServiceLabel {
-			labels = append(labels, Label{"service", m.ServiceName})
-		} else {
-			key = insert(0, m.ServiceName, key)
-		}
-	}
-	allowed, labelsFiltered := m.allowMetric(key, labels)
-	if !allowed {
-		return
-	}
-	now := time.Now()
-	elapsed := now.Sub(start)
-	msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
-	m.sink.AddSampleWithLabels(key, msec, labelsFiltered)
-}
-
-// UpdateFilter overwrites the existing filter with the given rules.
-func (m *Metrics) UpdateFilter(allow, block []string) {
-	m.UpdateFilterAndLabels(allow, block, m.AllowedLabels, m.BlockedLabels)
-}
-
-// UpdateFilterAndLabels overwrites the existing filter with the given rules.
-func (m *Metrics) UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
-	m.filterLock.Lock()
-	defer m.filterLock.Unlock()
-
-	m.AllowedPrefixes = allow
-	m.BlockedPrefixes = block
-
-	if allowedLabels == nil {
-		// Having a white list means we take only elements from it
-		m.allowedLabels = nil
-	} else {
-		m.allowedLabels = make(map[string]bool)
-		for _, v := range allowedLabels {
-			m.allowedLabels[v] = true
-		}
-	}
-	m.blockedLabels = make(map[string]bool)
-	for _, v := range blockedLabels {
-		m.blockedLabels[v] = true
-	}
-	m.AllowedLabels = allowedLabels
-	m.BlockedLabels = blockedLabels
-
-	m.filter = iradix.New()
-	for _, prefix := range m.AllowedPrefixes {
-		m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
-	}
-	for _, prefix := range m.BlockedPrefixes {
-		m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
-	}
-}
-
-// labelIsAllowed return true if a should be included in metric
-// the caller should lock m.filterLock while calling this method
-func (m *Metrics) labelIsAllowed(label *Label) bool {
-	labelName := (*label).Name
-	if m.blockedLabels != nil {
-		_, ok := m.blockedLabels[labelName]
-		if ok {
-			// If present, let's remove this label
-			return false
-		}
-	}
-	if m.allowedLabels != nil {
-		_, ok := m.allowedLabels[labelName]
-		return ok
-	}
-	// Allow by default
-	return true
-}
-
-// filterLabels return only allowed labels
-// the caller should lock m.filterLock while calling this method
-func (m *Metrics) filterLabels(labels []Label) []Label {
-	if labels == nil {
-		return nil
-	}
-	toReturn := []Label{}
-	for _, label := range labels {
-		if m.labelIsAllowed(&label) {
-			toReturn = append(toReturn, label)
-		}
-	}
-	return toReturn
-}
-
-// Returns whether the metric should be allowed based on configured prefix filters
-// Also return the applicable labels
-func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) {
-	m.filterLock.RLock()
-	defer m.filterLock.RUnlock()
-
-	if m.filter == nil || m.filter.Len() == 0 {
-		return m.Config.FilterDefault, m.filterLabels(labels)
-	}
-
-	_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
-	if !ok {
-		return m.Config.FilterDefault, m.filterLabels(labels)
-	}
-
-	return allowed.(bool), m.filterLabels(labels)
-}
-
-// Periodically collects runtime stats to publish
-func (m *Metrics) collectStats() {
-	for {
-		time.Sleep(m.ProfileInterval)
-		m.emitRuntimeStats()
-	}
-}
-
-// Emits various runtime statsitics
-func (m *Metrics) emitRuntimeStats() {
-	// Export number of Goroutines
-	numRoutines := runtime.NumGoroutine()
-	m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines))
-
-	// Export memory stats
-	var stats runtime.MemStats
-	runtime.ReadMemStats(&stats)
-	m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc))
-	m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys))
-	m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs))
-	m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees))
-	m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects))
-	m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs))
-	m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC))
-
-	// Export info about the last few GC runs
-	num := stats.NumGC
-
-	// Handle wrap around
-	if num < m.lastNumGC {
-		m.lastNumGC = 0
-	}
-
-	// Ensure we don't scan more than 256
-	if num-m.lastNumGC >= 256 {
-		m.lastNumGC = num - 255
-	}
-
-	for i := m.lastNumGC; i < num; i++ {
-		pause := stats.PauseNs[i%256]
-		m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause))
-	}
-	m.lastNumGC = num
-}
-
-// Inserts a string value at an index into the slice
-func insert(i int, v string, s []string) []string {
-	s = append(s, "")
-	copy(s[i+1:], s[i:])
-	s[i] = v
-	return s
-}
diff --git a/vendor/github.com/armon/go-metrics/sink.go b/vendor/github.com/armon/go-metrics/sink.go
deleted file mode 100644
index 0b7d6e4..0000000
--- a/vendor/github.com/armon/go-metrics/sink.go
+++ /dev/null
@@ -1,115 +0,0 @@
-package metrics
-
-import (
-	"fmt"
-	"net/url"
-)
-
-// The MetricSink interface is used to transmit metrics information
-// to an external system
-type MetricSink interface {
-	// A Gauge should retain the last value it is set to
-	SetGauge(key []string, val float32)
-	SetGaugeWithLabels(key []string, val float32, labels []Label)
-
-	// Should emit a Key/Value pair for each call
-	EmitKey(key []string, val float32)
-
-	// Counters should accumulate values
-	IncrCounter(key []string, val float32)
-	IncrCounterWithLabels(key []string, val float32, labels []Label)
-
-	// Samples are for timing information, where quantiles are used
-	AddSample(key []string, val float32)
-	AddSampleWithLabels(key []string, val float32, labels []Label)
-}
-
-// BlackholeSink is used to just blackhole messages
-type BlackholeSink struct{}
-
-func (*BlackholeSink) SetGauge(key []string, val float32)                              {}
-func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label)    {}
-func (*BlackholeSink) EmitKey(key []string, val float32)                               {}
-func (*BlackholeSink) IncrCounter(key []string, val float32)                           {}
-func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
-func (*BlackholeSink) AddSample(key []string, val float32)                             {}
-func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label)   {}
-
-// FanoutSink is used to sink to fanout values to multiple sinks
-type FanoutSink []MetricSink
-
-func (fh FanoutSink) SetGauge(key []string, val float32) {
-	fh.SetGaugeWithLabels(key, val, nil)
-}
-
-func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	for _, s := range fh {
-		s.SetGaugeWithLabels(key, val, labels)
-	}
-}
-
-func (fh FanoutSink) EmitKey(key []string, val float32) {
-	for _, s := range fh {
-		s.EmitKey(key, val)
-	}
-}
-
-func (fh FanoutSink) IncrCounter(key []string, val float32) {
-	fh.IncrCounterWithLabels(key, val, nil)
-}
-
-func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	for _, s := range fh {
-		s.IncrCounterWithLabels(key, val, labels)
-	}
-}
-
-func (fh FanoutSink) AddSample(key []string, val float32) {
-	fh.AddSampleWithLabels(key, val, nil)
-}
-
-func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
-	for _, s := range fh {
-		s.AddSampleWithLabels(key, val, labels)
-	}
-}
-
-// sinkURLFactoryFunc is an generic interface around the *SinkFromURL() function provided
-// by each sink type
-type sinkURLFactoryFunc func(*url.URL) (MetricSink, error)
-
-// sinkRegistry supports the generic NewMetricSink function by mapping URL
-// schemes to metric sink factory functions
-var sinkRegistry = map[string]sinkURLFactoryFunc{
-	"statsd":   NewStatsdSinkFromURL,
-	"statsite": NewStatsiteSinkFromURL,
-	"inmem":    NewInmemSinkFromURL,
-}
-
-// NewMetricSinkFromURL allows a generic URL input to configure any of the
-// supported sinks. The scheme of the URL identifies the type of the sink, the
-// and query parameters are used to set options.
-//
-// "statsd://" - Initializes a StatsdSink. The host and port are passed through
-// as the "addr" of the sink
-//
-// "statsite://" - Initializes a StatsiteSink. The host and port become the
-// "addr" of the sink
-//
-// "inmem://" - Initializes an InmemSink. The host and port are ignored. The
-// "interval" and "duration" query parameters must be specified with valid
-// durations, see NewInmemSink for details.
-func NewMetricSinkFromURL(urlStr string) (MetricSink, error) {
-	u, err := url.Parse(urlStr)
-	if err != nil {
-		return nil, err
-	}
-
-	sinkURLFactoryFunc := sinkRegistry[u.Scheme]
-	if sinkURLFactoryFunc == nil {
-		return nil, fmt.Errorf(
-			"cannot create metric sink, unrecognized sink name: %q", u.Scheme)
-	}
-
-	return sinkURLFactoryFunc(u)
-}
diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go
deleted file mode 100644
index 32a28c4..0000000
--- a/vendor/github.com/armon/go-metrics/start.go
+++ /dev/null
@@ -1,141 +0,0 @@
-package metrics
-
-import (
-	"os"
-	"sync"
-	"sync/atomic"
-	"time"
-
-	"github.com/hashicorp/go-immutable-radix"
-)
-
-// Config is used to configure metrics settings
-type Config struct {
-	ServiceName          string        // Prefixed with keys to separate services
-	HostName             string        // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
-	EnableHostname       bool          // Enable prefixing gauge values with hostname
-	EnableHostnameLabel  bool          // Enable adding hostname to labels
-	EnableServiceLabel   bool          // Enable adding service to labels
-	EnableRuntimeMetrics bool          // Enables profiling of runtime metrics (GC, Goroutines, Memory)
-	EnableTypePrefix     bool          // Prefixes key with a type ("counter", "gauge", "timer")
-	TimerGranularity     time.Duration // Granularity of timers.
-	ProfileInterval      time.Duration // Interval to profile runtime metrics
-
-	AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
-	BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
-	AllowedLabels   []string // A list of metric labels to allow, with '.' as the separator
-	BlockedLabels   []string // A list of metric labels to block, with '.' as the separator
-	FilterDefault   bool     // Whether to allow metrics by default
-}
-
-// Metrics represents an instance of a metrics sink that can
-// be used to emit
-type Metrics struct {
-	Config
-	lastNumGC     uint32
-	sink          MetricSink
-	filter        *iradix.Tree
-	allowedLabels map[string]bool
-	blockedLabels map[string]bool
-	filterLock    sync.RWMutex // Lock filters and allowedLabels/blockedLabels access
-}
-
-// Shared global metrics instance
-var globalMetrics atomic.Value // *Metrics
-
-func init() {
-	// Initialize to a blackhole sink to avoid errors
-	globalMetrics.Store(&Metrics{sink: &BlackholeSink{}})
-}
-
-// DefaultConfig provides a sane default configuration
-func DefaultConfig(serviceName string) *Config {
-	c := &Config{
-		ServiceName:          serviceName, // Use client provided service
-		HostName:             "",
-		EnableHostname:       true,             // Enable hostname prefix
-		EnableRuntimeMetrics: true,             // Enable runtime profiling
-		EnableTypePrefix:     false,            // Disable type prefix
-		TimerGranularity:     time.Millisecond, // Timers are in milliseconds
-		ProfileInterval:      time.Second,      // Poll runtime every second
-		FilterDefault:        true,             // Don't filter metrics by default
-	}
-
-	// Try to get the hostname
-	name, _ := os.Hostname()
-	c.HostName = name
-	return c
-}
-
-// New is used to create a new instance of Metrics
-func New(conf *Config, sink MetricSink) (*Metrics, error) {
-	met := &Metrics{}
-	met.Config = *conf
-	met.sink = sink
-	met.UpdateFilterAndLabels(conf.AllowedPrefixes, conf.BlockedPrefixes, conf.AllowedLabels, conf.BlockedLabels)
-
-	// Start the runtime collector
-	if conf.EnableRuntimeMetrics {
-		go met.collectStats()
-	}
-	return met, nil
-}
-
-// NewGlobal is the same as New, but it assigns the metrics object to be
-// used globally as well as returning it.
-func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) {
-	metrics, err := New(conf, sink)
-	if err == nil {
-		globalMetrics.Store(metrics)
-	}
-	return metrics, err
-}
-
-// Proxy all the methods to the globalMetrics instance
-func SetGauge(key []string, val float32) {
-	globalMetrics.Load().(*Metrics).SetGauge(key, val)
-}
-
-func SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
-}
-
-func EmitKey(key []string, val float32) {
-	globalMetrics.Load().(*Metrics).EmitKey(key, val)
-}
-
-func IncrCounter(key []string, val float32) {
-	globalMetrics.Load().(*Metrics).IncrCounter(key, val)
-}
-
-func IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
-}
-
-func AddSample(key []string, val float32) {
-	globalMetrics.Load().(*Metrics).AddSample(key, val)
-}
-
-func AddSampleWithLabels(key []string, val float32, labels []Label) {
-	globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
-}
-
-func MeasureSince(key []string, start time.Time) {
-	globalMetrics.Load().(*Metrics).MeasureSince(key, start)
-}
-
-func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
-	globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
-}
-
-func UpdateFilter(allow, block []string) {
-	globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
-}
-
-// UpdateFilterAndLabels set allow/block prefixes of metrics while allowedLabels
-// and blockedLabels - when not nil - allow filtering of labels in order to
-// block/allow globally labels (especially useful when having large number of
-// values for a given label). See README.md for more information about usage.
-func UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels []string) {
-	globalMetrics.Load().(*Metrics).UpdateFilterAndLabels(allow, block, allowedLabels, blockedLabels)
-}
diff --git a/vendor/github.com/armon/go-metrics/statsd.go b/vendor/github.com/armon/go-metrics/statsd.go
deleted file mode 100644
index 1bfffce..0000000
--- a/vendor/github.com/armon/go-metrics/statsd.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package metrics
-
-import (
-	"bytes"
-	"fmt"
-	"log"
-	"net"
-	"net/url"
-	"strings"
-	"time"
-)
-
-const (
-	// statsdMaxLen is the maximum size of a packet
-	// to send to statsd
-	statsdMaxLen = 1400
-)
-
-// StatsdSink provides a MetricSink that can be used
-// with a statsite or statsd metrics server. It uses
-// only UDP packets, while StatsiteSink uses TCP.
-type StatsdSink struct {
-	addr        string
-	metricQueue chan string
-}
-
-// NewStatsdSinkFromURL creates an StatsdSink from a URL. It is used
-// (and tested) from NewMetricSinkFromURL.
-func NewStatsdSinkFromURL(u *url.URL) (MetricSink, error) {
-	return NewStatsdSink(u.Host)
-}
-
-// NewStatsdSink is used to create a new StatsdSink
-func NewStatsdSink(addr string) (*StatsdSink, error) {
-	s := &StatsdSink{
-		addr:        addr,
-		metricQueue: make(chan string, 4096),
-	}
-	go s.flushMetrics()
-	return s, nil
-}
-
-// Close is used to stop flushing to statsd
-func (s *StatsdSink) Shutdown() {
-	close(s.metricQueue)
-}
-
-func (s *StatsdSink) SetGauge(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
-}
-
-func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
-}
-
-func (s *StatsdSink) EmitKey(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
-}
-
-func (s *StatsdSink) IncrCounter(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
-}
-
-func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
-}
-
-func (s *StatsdSink) AddSample(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
-}
-
-func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
-}
-
-// Flattens the key for formatting, removes spaces
-func (s *StatsdSink) flattenKey(parts []string) string {
-	joined := strings.Join(parts, ".")
-	return strings.Map(func(r rune) rune {
-		switch r {
-		case ':':
-			fallthrough
-		case ' ':
-			return '_'
-		default:
-			return r
-		}
-	}, joined)
-}
-
-// Flattens the key along with labels for formatting, removes spaces
-func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
-	for _, label := range labels {
-		parts = append(parts, label.Value)
-	}
-	return s.flattenKey(parts)
-}
-
-// Does a non-blocking push to the metrics queue
-func (s *StatsdSink) pushMetric(m string) {
-	select {
-	case s.metricQueue <- m:
-	default:
-	}
-}
-
-// Flushes metrics
-func (s *StatsdSink) flushMetrics() {
-	var sock net.Conn
-	var err error
-	var wait <-chan time.Time
-	ticker := time.NewTicker(flushInterval)
-	defer ticker.Stop()
-
-CONNECT:
-	// Create a buffer
-	buf := bytes.NewBuffer(nil)
-
-	// Attempt to connect
-	sock, err = net.Dial("udp", s.addr)
-	if err != nil {
-		log.Printf("[ERR] Error connecting to statsd! Err: %s", err)
-		goto WAIT
-	}
-
-	for {
-		select {
-		case metric, ok := <-s.metricQueue:
-			// Get a metric from the queue
-			if !ok {
-				goto QUIT
-			}
-
-			// Check if this would overflow the packet size
-			if len(metric)+buf.Len() > statsdMaxLen {
-				_, err := sock.Write(buf.Bytes())
-				buf.Reset()
-				if err != nil {
-					log.Printf("[ERR] Error writing to statsd! Err: %s", err)
-					goto WAIT
-				}
-			}
-
-			// Append to the buffer
-			buf.WriteString(metric)
-
-		case <-ticker.C:
-			if buf.Len() == 0 {
-				continue
-			}
-
-			_, err := sock.Write(buf.Bytes())
-			buf.Reset()
-			if err != nil {
-				log.Printf("[ERR] Error flushing to statsd! Err: %s", err)
-				goto WAIT
-			}
-		}
-	}
-
-WAIT:
-	// Wait for a while
-	wait = time.After(time.Duration(5) * time.Second)
-	for {
-		select {
-		// Dequeue the messages to avoid backlog
-		case _, ok := <-s.metricQueue:
-			if !ok {
-				goto QUIT
-			}
-		case <-wait:
-			goto CONNECT
-		}
-	}
-QUIT:
-	s.metricQueue = nil
-}
diff --git a/vendor/github.com/armon/go-metrics/statsite.go b/vendor/github.com/armon/go-metrics/statsite.go
deleted file mode 100644
index 6c0d284..0000000
--- a/vendor/github.com/armon/go-metrics/statsite.go
+++ /dev/null
@@ -1,172 +0,0 @@
-package metrics
-
-import (
-	"bufio"
-	"fmt"
-	"log"
-	"net"
-	"net/url"
-	"strings"
-	"time"
-)
-
-const (
-	// We force flush the statsite metrics after this period of
-	// inactivity. Prevents stats from getting stuck in a buffer
-	// forever.
-	flushInterval = 100 * time.Millisecond
-)
-
-// NewStatsiteSinkFromURL creates an StatsiteSink from a URL. It is used
-// (and tested) from NewMetricSinkFromURL.
-func NewStatsiteSinkFromURL(u *url.URL) (MetricSink, error) {
-	return NewStatsiteSink(u.Host)
-}
-
-// StatsiteSink provides a MetricSink that can be used with a
-// statsite metrics server
-type StatsiteSink struct {
-	addr        string
-	metricQueue chan string
-}
-
-// NewStatsiteSink is used to create a new StatsiteSink
-func NewStatsiteSink(addr string) (*StatsiteSink, error) {
-	s := &StatsiteSink{
-		addr:        addr,
-		metricQueue: make(chan string, 4096),
-	}
-	go s.flushMetrics()
-	return s, nil
-}
-
-// Close is used to stop flushing to statsite
-func (s *StatsiteSink) Shutdown() {
-	close(s.metricQueue)
-}
-
-func (s *StatsiteSink) SetGauge(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
-}
-
-func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
-}
-
-func (s *StatsiteSink) EmitKey(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
-}
-
-func (s *StatsiteSink) IncrCounter(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
-}
-
-func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
-}
-
-func (s *StatsiteSink) AddSample(key []string, val float32) {
-	flatKey := s.flattenKey(key)
-	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
-}
-
-func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
-	flatKey := s.flattenKeyLabels(key, labels)
-	s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
-}
-
-// Flattens the key for formatting, removes spaces
-func (s *StatsiteSink) flattenKey(parts []string) string {
-	joined := strings.Join(parts, ".")
-	return strings.Map(func(r rune) rune {
-		switch r {
-		case ':':
-			fallthrough
-		case ' ':
-			return '_'
-		default:
-			return r
-		}
-	}, joined)
-}
-
-// Flattens the key along with labels for formatting, removes spaces
-func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
-	for _, label := range labels {
-		parts = append(parts, label.Value)
-	}
-	return s.flattenKey(parts)
-}
-
-// Does a non-blocking push to the metrics queue
-func (s *StatsiteSink) pushMetric(m string) {
-	select {
-	case s.metricQueue <- m:
-	default:
-	}
-}
-
-// Flushes metrics
-func (s *StatsiteSink) flushMetrics() {
-	var sock net.Conn
-	var err error
-	var wait <-chan time.Time
-	var buffered *bufio.Writer
-	ticker := time.NewTicker(flushInterval)
-	defer ticker.Stop()
-
-CONNECT:
-	// Attempt to connect
-	sock, err = net.Dial("tcp", s.addr)
-	if err != nil {
-		log.Printf("[ERR] Error connecting to statsite! Err: %s", err)
-		goto WAIT
-	}
-
-	// Create a buffered writer
-	buffered = bufio.NewWriter(sock)
-
-	for {
-		select {
-		case metric, ok := <-s.metricQueue:
-			// Get a metric from the queue
-			if !ok {
-				goto QUIT
-			}
-
-			// Try to send to statsite
-			_, err := buffered.Write([]byte(metric))
-			if err != nil {
-				log.Printf("[ERR] Error writing to statsite! Err: %s", err)
-				goto WAIT
-			}
-		case <-ticker.C:
-			if err := buffered.Flush(); err != nil {
-				log.Printf("[ERR] Error flushing to statsite! Err: %s", err)
-				goto WAIT
-			}
-		}
-	}
-
-WAIT:
-	// Wait for a while
-	wait = time.After(time.Duration(5) * time.Second)
-	for {
-		select {
-		// Dequeue the messages to avoid backlog
-		case _, ok := <-s.metricQueue:
-			if !ok {
-				goto QUIT
-			}
-		case <-wait:
-			goto CONNECT
-		}
-	}
-QUIT:
-	s.metricQueue = nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/LICENSE b/vendor/github.com/hashicorp/consul/api/LICENSE
deleted file mode 100644
index c33dcc7..0000000
--- a/vendor/github.com/hashicorp/consul/api/LICENSE
+++ /dev/null
@@ -1,354 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
-     means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. “Incompatible With Secondary Licenses”
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of version
-        1.1 or earlier of the License, but not also under the terms of a
-        Secondary License.
-
-1.6. “Executable Form”
-
-     means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
-     means a work that combines Covered Software with other material, in a separate
-     file or files, that is not Covered Software.
-
-1.8. “License”
-
-     means this document.
-
-1.9. “Licensable”
-
-     means having the right to grant, to the maximum extent possible, whether at the
-     time of the initial grant or subsequently, any and all of the rights conveyed by
-     this License.
-
-1.10. “Modifications”
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to, deletion
-        from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
-      means any patent claim(s), including without limitation, method, process,
-      and apparatus claims, in any patent Licensable by such Contributor that
-      would be infringed, but for the grant of the License, by the making,
-      using, selling, offering for sale, having made, import, or transfer of
-      either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
-      means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, “You” includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, “control” means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or as
-        part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its Contributions
-        or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution become
-     effective for each Contribution on the date the Contributor first distributes
-     such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under this
-     License. No additional rights or licenses will be implied from the distribution
-     or licensing of Covered Software under this License. Notwithstanding Section
-     2.1(b) above, no patent license is granted by a Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party’s
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of its
-        Contributions.
-
-     This License does not grant any rights in the trademarks, service marks, or
-     logos of any Contributor (except as may be necessary to comply with the
-     notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this License
-     (see Section 10.2) or under the terms of a Secondary License (if permitted
-     under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its Contributions
-     are its original creation(s) or it has sufficient rights to grant the
-     rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under applicable
-     copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under the
-     terms of this License. You must inform recipients that the Source Code Form
-     of the Covered Software is governed by the terms of this License, and how
-     they can obtain a copy of this License. You may not attempt to alter or
-     restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this License,
-        or sublicense it under different terms, provided that the license for
-        the Executable Form does not attempt to limit or alter the recipients’
-        rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for the
-     Covered Software. If the Larger Work is a combination of Covered Software
-     with a work governed by one or more Secondary Licenses, and the Covered
-     Software is not Incompatible With Secondary Licenses, this License permits
-     You to additionally distribute such Covered Software under the terms of
-     such Secondary License(s), so that the recipient of the Larger Work may, at
-     their option, further distribute the Covered Software under the terms of
-     either this License or such Secondary License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices (including
-     copyright notices, patent notices, disclaimers of warranty, or limitations
-     of liability) contained within the Source Code Form of the Covered
-     Software, except that You may alter any license notices to the extent
-     required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on behalf
-     of any Contributor. You must make it absolutely clear that any such
-     warranty, support, indemnity, or liability obligation is offered by You
-     alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute, judicial
-   order, or regulation then You must: (a) comply with the terms of this License
-   to the maximum extent possible; and (b) describe the limitations and the code
-   they affect. Such description must be placed in a text file included with all
-   distributions of the Covered Software under this License. Except to the
-   extent prohibited by statute or regulation, such description must be
-   sufficiently detailed for a recipient of ordinary skill to be able to
-   understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
-     if such Contributor fails to notify You of the non-compliance by some
-     reasonable means prior to 60 days after You have come back into compliance.
-     Moreover, Your grants from a particular Contributor are reinstated on an
-     ongoing basis if such Contributor notifies You of the non-compliance by
-     some reasonable means, this is the first time You have received notice of
-     non-compliance with this License from such Contributor, and You become
-     compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions, counter-claims,
-     and cross-claims) alleging that a Contributor Version directly or
-     indirectly infringes any patent, then the rights granted to You by any and
-     all Contributors for the Covered Software under Section 2.1 of this License
-     shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an “as is” basis, without
-   warranty of any kind, either expressed, implied, or statutory, including,
-   without limitation, warranties that the Covered Software is free of defects,
-   merchantable, fit for a particular purpose or non-infringing. The entire
-   risk as to the quality and performance of the Covered Software is with You.
-   Should any Covered Software prove defective in any respect, You (not any
-   Contributor) assume the cost of any necessary servicing, repair, or
-   correction. This disclaimer of warranty constitutes an essential part of this
-   License. No use of  any Covered Software is authorized under this License
-   except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from such
-   party’s negligence to the extent applicable law prohibits such limitation.
-   Some jurisdictions do not allow the exclusion or limitation of incidental or
-   consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts of
-   a jurisdiction where the defendant maintains its principal place of business
-   and such litigation shall be governed by laws of that jurisdiction, without
-   reference to its conflict-of-law provisions. Nothing in this Section shall
-   prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject matter
-   hereof. If any provision of this License is held to be unenforceable, such
-   provision shall be reformed only to the extent necessary to make it
-   enforceable. Any law or regulation which provides that the language of a
-   contract shall be construed against the drafter shall not be used to construe
-   this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version of
-      the License under which You originally received the Covered Software, or
-      under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a modified
-      version of this License if you rename the license and remove any
-      references to the name of the license steward (except to note that such
-      modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
-      If You choose to distribute Source Code Form that is Incompatible With
-      Secondary Licenses under the terms of this version of the License, the
-      notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
-      This Source Code Form is “Incompatible
-      With Secondary Licenses”, as defined by
-      the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md
deleted file mode 100644
index 3255cbb..0000000
--- a/vendor/github.com/hashicorp/consul/api/README.md
+++ /dev/null
@@ -1,67 +0,0 @@
-Consul API client
-=================
-
-This package provides the `api` package which attempts to
-provide programmatic access to the full Consul API.
-
-Currently, all of the Consul APIs included in version 0.6.0 are supported.
-
-Documentation
-=============
-
-The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
-
-Usage
-=====
-
-Below is an example of using the Consul client:
-
-```go
-package main
-
-import "github.com/hashicorp/consul/api"
-import "fmt"
-
-func main() {
-	// Get a new client
-	client, err := api.NewClient(api.DefaultConfig())
-	if err != nil {
-		panic(err)
-	}
-
-	// Get a handle to the KV API
-	kv := client.KV()
-
-	// PUT a new KV pair
-	p := &api.KVPair{Key: "REDIS_MAXCLIENTS", Value: []byte("1000")}
-	_, err = kv.Put(p, nil)
-	if err != nil {
-		panic(err)
-	}
-
-	// Lookup the pair
-	pair, _, err := kv.Get("REDIS_MAXCLIENTS", nil)
-	if err != nil {
-		panic(err)
-	}
-	fmt.Printf("KV: %v %s\n", pair.Key, pair.Value)
-}
-```
-
-To run this example, start a Consul server:
-
-```bash
-consul agent -dev
-```
-
-Copy the code above into a file such as `main.go`.
-
-Install and run. You'll see a key (`REDIS_MAXCLIENTS`) and value (`1000`) printed.
-
-```bash
-$ go get
-$ go run main.go
-KV: REDIS_MAXCLIENTS 1000
-```
-
-After running the code, you can also view the values in the Consul UI on your local machine at http://localhost:8500/ui/dc1/kv
diff --git a/vendor/github.com/hashicorp/consul/api/acl.go b/vendor/github.com/hashicorp/consul/api/acl.go
deleted file mode 100644
index 124409f..0000000
--- a/vendor/github.com/hashicorp/consul/api/acl.go
+++ /dev/null
@@ -1,1116 +0,0 @@
-package api
-
-import (
-	"fmt"
-	"io"
-	"io/ioutil"
-	"net/url"
-	"time"
-
-	"github.com/mitchellh/mapstructure"
-)
-
-const (
-	// ACLClientType is the client type token
-	ACLClientType = "client"
-
-	// ACLManagementType is the management type token
-	ACLManagementType = "management"
-)
-
-type ACLTokenPolicyLink struct {
-	ID   string
-	Name string
-}
-type ACLTokenRoleLink struct {
-	ID   string
-	Name string
-}
-
-// ACLToken represents an ACL Token
-type ACLToken struct {
-	CreateIndex       uint64
-	ModifyIndex       uint64
-	AccessorID        string
-	SecretID          string
-	Description       string
-	Policies          []*ACLTokenPolicyLink `json:",omitempty"`
-	Roles             []*ACLTokenRoleLink   `json:",omitempty"`
-	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
-	Local             bool
-	ExpirationTTL     time.Duration `json:",omitempty"`
-	ExpirationTime    *time.Time    `json:",omitempty"`
-	CreateTime        time.Time     `json:",omitempty"`
-	Hash              []byte        `json:",omitempty"`
-
-	// DEPRECATED (ACL-Legacy-Compat)
-	// Rules will only be present for legacy tokens returned via the new APIs
-	Rules string `json:",omitempty"`
-}
-
-type ACLTokenListEntry struct {
-	CreateIndex       uint64
-	ModifyIndex       uint64
-	AccessorID        string
-	Description       string
-	Policies          []*ACLTokenPolicyLink `json:",omitempty"`
-	Roles             []*ACLTokenRoleLink   `json:",omitempty"`
-	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
-	Local             bool
-	ExpirationTime    *time.Time `json:",omitempty"`
-	CreateTime        time.Time
-	Hash              []byte
-	Legacy            bool
-}
-
-// ACLEntry is used to represent a legacy ACL token
-// The legacy tokens are deprecated.
-type ACLEntry struct {
-	CreateIndex uint64
-	ModifyIndex uint64
-	ID          string
-	Name        string
-	Type        string
-	Rules       string
-}
-
-// ACLReplicationStatus is used to represent the status of ACL replication.
-type ACLReplicationStatus struct {
-	Enabled              bool
-	Running              bool
-	SourceDatacenter     string
-	ReplicationType      string
-	ReplicatedIndex      uint64
-	ReplicatedRoleIndex  uint64
-	ReplicatedTokenIndex uint64
-	LastSuccess          time.Time
-	LastError            time.Time
-}
-
-// ACLServiceIdentity represents a high-level grant of all necessary privileges
-// to assume the identity of the named Service in the Catalog and within
-// Connect.
-type ACLServiceIdentity struct {
-	ServiceName string
-	Datacenters []string `json:",omitempty"`
-}
-
-// ACLPolicy represents an ACL Policy.
-type ACLPolicy struct {
-	ID          string
-	Name        string
-	Description string
-	Rules       string
-	Datacenters []string
-	Hash        []byte
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-type ACLPolicyListEntry struct {
-	ID          string
-	Name        string
-	Description string
-	Datacenters []string
-	Hash        []byte
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-type ACLRolePolicyLink struct {
-	ID   string
-	Name string
-}
-
-// ACLRole represents an ACL Role.
-type ACLRole struct {
-	ID                string
-	Name              string
-	Description       string
-	Policies          []*ACLRolePolicyLink  `json:",omitempty"`
-	ServiceIdentities []*ACLServiceIdentity `json:",omitempty"`
-	Hash              []byte
-	CreateIndex       uint64
-	ModifyIndex       uint64
-}
-
-// BindingRuleBindType is the type of binding rule mechanism used.
-type BindingRuleBindType string
-
-const (
-	// BindingRuleBindTypeService binds to a service identity with the given name.
-	BindingRuleBindTypeService BindingRuleBindType = "service"
-
-	// BindingRuleBindTypeRole binds to pre-existing roles with the given name.
-	BindingRuleBindTypeRole BindingRuleBindType = "role"
-)
-
-type ACLBindingRule struct {
-	ID          string
-	Description string
-	AuthMethod  string
-	Selector    string
-	BindType    BindingRuleBindType
-	BindName    string
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-type ACLAuthMethod struct {
-	Name        string
-	Type        string
-	Description string
-
-	// Configuration is arbitrary configuration for the auth method. This
-	// should only contain primitive values and containers (such as lists and
-	// maps).
-	Config map[string]interface{}
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-type ACLAuthMethodListEntry struct {
-	Name        string
-	Type        string
-	Description string
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// ParseKubernetesAuthMethodConfig takes a raw config map and returns a parsed
-// KubernetesAuthMethodConfig.
-func ParseKubernetesAuthMethodConfig(raw map[string]interface{}) (*KubernetesAuthMethodConfig, error) {
-	var config KubernetesAuthMethodConfig
-	decodeConf := &mapstructure.DecoderConfig{
-		Result:           &config,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := mapstructure.NewDecoder(decodeConf)
-	if err != nil {
-		return nil, err
-	}
-
-	if err := decoder.Decode(raw); err != nil {
-		return nil, fmt.Errorf("error decoding config: %s", err)
-	}
-
-	return &config, nil
-}
-
-// KubernetesAuthMethodConfig is the config for the built-in Consul auth method
-// for Kubernetes.
-type KubernetesAuthMethodConfig struct {
-	Host              string `json:",omitempty"`
-	CACert            string `json:",omitempty"`
-	ServiceAccountJWT string `json:",omitempty"`
-}
-
-// RenderToConfig converts this into a map[string]interface{} suitable for use
-// in the ACLAuthMethod.Config field.
-func (c *KubernetesAuthMethodConfig) RenderToConfig() map[string]interface{} {
-	return map[string]interface{}{
-		"Host":              c.Host,
-		"CACert":            c.CACert,
-		"ServiceAccountJWT": c.ServiceAccountJWT,
-	}
-}
-
-type ACLLoginParams struct {
-	AuthMethod  string
-	BearerToken string
-	Meta        map[string]string `json:",omitempty"`
-}
-
-// ACL can be used to query the ACL endpoints
-type ACL struct {
-	c *Client
-}
-
-// ACL returns a handle to the ACL endpoints
-func (c *Client) ACL() *ACL {
-	return &ACL{c}
-}
-
-// Bootstrap is used to perform a one-time ACL bootstrap operation on a cluster
-// to get the first management token.
-func (a *ACL) Bootstrap() (*ACLToken, *WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/bootstrap")
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, wm, nil
-}
-
-// Create is used to generate a new token with the given parameters
-//
-// Deprecated: Use TokenCreate instead.
-func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/create")
-	r.setWriteOptions(q)
-	r.obj = acl
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// Update is used to update the rules of an existing token
-//
-// Deprecated: Use TokenUpdate instead.
-func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/update")
-	r.setWriteOptions(q)
-	r.obj = acl
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// Destroy is used to destroy a given ACL token ID
-//
-// Deprecated: Use TokenDelete instead.
-func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// Clone is used to return a new token cloned from an existing one
-//
-// Deprecated: Use TokenClone instead.
-func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// Info is used to query for information about an ACL token
-//
-// Deprecated: Use TokenRead instead.
-func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/info/"+id)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	if len(entries) > 0 {
-		return entries[0], qm, nil
-	}
-	return nil, qm, nil
-}
-
-// List is used to get all the ACL tokens
-//
-// Deprecated: Use TokenList instead.
-func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/list")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// Replication returns the status of the ACL replication process in the datacenter
-func (a *ACL) Replication(q *QueryOptions) (*ACLReplicationStatus, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/replication")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries *ACLReplicationStatus
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// TokenCreate creates a new ACL token. If either the AccessorID or SecretID fields
-// of the ACLToken structure are empty they will be filled in by Consul.
-func (a *ACL) TokenCreate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
-	r := a.c.newRequest("PUT", "/v1/acl/token")
-	r.setWriteOptions(q)
-	r.obj = token
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// TokenUpdate updates a token in place without modifying its AccessorID or SecretID. A valid
-// AccessorID must be set in the ACLToken structure passed to this function but the SecretID may
-// be omitted and will be filled in by Consul with its existing value.
-func (a *ACL) TokenUpdate(token *ACLToken, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
-	if token.AccessorID == "" {
-		return nil, nil, fmt.Errorf("Must specify an AccessorID for Token Updating")
-	}
-	r := a.c.newRequest("PUT", "/v1/acl/token/"+token.AccessorID)
-	r.setWriteOptions(q)
-	r.obj = token
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// TokenClone will create a new token with the same policies and locality as the original
-// token but will have its own auto-generated AccessorID and SecretID as well having the
-// description passed to this function. The tokenID parameter must be a valid Accessor ID
-// of an existing token.
-func (a *ACL) TokenClone(tokenID string, description string, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
-	if tokenID == "" {
-		return nil, nil, fmt.Errorf("Must specify a tokenID for Token Cloning")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/token/"+tokenID+"/clone")
-	r.setWriteOptions(q)
-	r.obj = struct{ Description string }{description}
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// TokenDelete removes a single ACL token. The tokenID parameter must be a valid
-// Accessor ID of an existing token.
-func (a *ACL) TokenDelete(tokenID string, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("DELETE", "/v1/acl/token/"+tokenID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// TokenRead retrieves the full token details. The tokenID parameter must be a valid
-// Accessor ID of an existing token.
-func (a *ACL) TokenRead(tokenID string, q *QueryOptions) (*ACLToken, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/token/"+tokenID)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// TokenReadSelf retrieves the full token details of the token currently
-// assigned to the API Client. In this manner its possible to read a token
-// by its Secret ID.
-func (a *ACL) TokenReadSelf(q *QueryOptions) (*ACLToken, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/token/self")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// TokenList lists all tokens. The listing does not contain any SecretIDs as those
-// may only be retrieved by a call to TokenRead.
-func (a *ACL) TokenList(q *QueryOptions) ([]*ACLTokenListEntry, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/tokens")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLTokenListEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// PolicyCreate will create a new policy. It is not allowed for the policy parameters
-// ID field to be set as this will be generated by Consul while processing the request.
-func (a *ACL) PolicyCreate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
-	if policy.ID != "" {
-		return nil, nil, fmt.Errorf("Cannot specify an ID in Policy Creation")
-	}
-	r := a.c.newRequest("PUT", "/v1/acl/policy")
-	r.setWriteOptions(q)
-	r.obj = policy
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLPolicy
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// PolicyUpdate updates a policy. The ID field of the policy parameter must be set to an
-// existing policy ID
-func (a *ACL) PolicyUpdate(policy *ACLPolicy, q *WriteOptions) (*ACLPolicy, *WriteMeta, error) {
-	if policy.ID == "" {
-		return nil, nil, fmt.Errorf("Must specify an ID in Policy Update")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/policy/"+policy.ID)
-	r.setWriteOptions(q)
-	r.obj = policy
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLPolicy
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// PolicyDelete deletes a policy given its ID.
-func (a *ACL) PolicyDelete(policyID string, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("DELETE", "/v1/acl/policy/"+policyID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// PolicyRead retrieves the policy details including the rule set.
-func (a *ACL) PolicyRead(policyID string, q *QueryOptions) (*ACLPolicy, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/policy/"+policyID)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out ACLPolicy
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// PolicyList retrieves a listing of all policies. The listing does not include the
-// rules for any policy as those should be retrieved by subsequent calls to PolicyRead.
-func (a *ACL) PolicyList(q *QueryOptions) ([]*ACLPolicyListEntry, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/policies")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLPolicyListEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// RulesTranslate translates the legacy rule syntax into the current syntax.
-//
-// Deprecated: Support for the legacy syntax translation will be removed
-// when legacy ACL support is removed.
-func (a *ACL) RulesTranslate(rules io.Reader) (string, error) {
-	r := a.c.newRequest("POST", "/v1/acl/rules/translate")
-	r.body = rules
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return "", err
-	}
-	defer resp.Body.Close()
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	ruleBytes, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return "", fmt.Errorf("Failed to read translated rule body: %v", err)
-	}
-
-	return string(ruleBytes), nil
-}
-
-// RulesTranslateToken translates the rules associated with the legacy syntax
-// into the current syntax and returns the results.
-//
-// Deprecated: Support for the legacy syntax translation will be removed
-// when legacy ACL support is removed.
-func (a *ACL) RulesTranslateToken(tokenID string) (string, error) {
-	r := a.c.newRequest("GET", "/v1/acl/rules/translate/"+tokenID)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return "", err
-	}
-	defer resp.Body.Close()
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	ruleBytes, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return "", fmt.Errorf("Failed to read translated rule body: %v", err)
-	}
-
-	return string(ruleBytes), nil
-}
-
-// RoleCreate will create a new role. It is not allowed for the role parameters
-// ID field to be set as this will be generated by Consul while processing the request.
-func (a *ACL) RoleCreate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
-	if role.ID != "" {
-		return nil, nil, fmt.Errorf("Cannot specify an ID in Role Creation")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/role")
-	r.setWriteOptions(q)
-	r.obj = role
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLRole
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// RoleUpdate updates a role. The ID field of the role parameter must be set to an
-// existing role ID
-func (a *ACL) RoleUpdate(role *ACLRole, q *WriteOptions) (*ACLRole, *WriteMeta, error) {
-	if role.ID == "" {
-		return nil, nil, fmt.Errorf("Must specify an ID in Role Update")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/role/"+role.ID)
-	r.setWriteOptions(q)
-	r.obj = role
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLRole
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// RoleDelete deletes a role given its ID.
-func (a *ACL) RoleDelete(roleID string, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("DELETE", "/v1/acl/role/"+roleID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// RoleRead retrieves the role details (by ID). Returns nil if not found.
-func (a *ACL) RoleRead(roleID string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/role/"+roleID)
-	r.setQueryOptions(q)
-	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if !found {
-		return nil, qm, nil
-	}
-
-	var out ACLRole
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// RoleReadByName retrieves the role details (by name). Returns nil if not found.
-func (a *ACL) RoleReadByName(roleName string, q *QueryOptions) (*ACLRole, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/role/name/"+url.QueryEscape(roleName))
-	r.setQueryOptions(q)
-	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if !found {
-		return nil, qm, nil
-	}
-
-	var out ACLRole
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// RoleList retrieves a listing of all roles. The listing does not include some
-// metadata for the role as those should be retrieved by subsequent calls to
-// RoleRead.
-func (a *ACL) RoleList(q *QueryOptions) ([]*ACLRole, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/roles")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLRole
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// AuthMethodCreate will create a new auth method.
-func (a *ACL) AuthMethodCreate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
-	if method.Name == "" {
-		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Creation")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/auth-method")
-	r.setWriteOptions(q)
-	r.obj = method
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLAuthMethod
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// AuthMethodUpdate updates an auth method.
-func (a *ACL) AuthMethodUpdate(method *ACLAuthMethod, q *WriteOptions) (*ACLAuthMethod, *WriteMeta, error) {
-	if method.Name == "" {
-		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Update")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/auth-method/"+url.QueryEscape(method.Name))
-	r.setWriteOptions(q)
-	r.obj = method
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLAuthMethod
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// AuthMethodDelete deletes an auth method given its Name.
-func (a *ACL) AuthMethodDelete(methodName string, q *WriteOptions) (*WriteMeta, error) {
-	if methodName == "" {
-		return nil, fmt.Errorf("Must specify a Name in Auth Method Delete")
-	}
-
-	r := a.c.newRequest("DELETE", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// AuthMethodRead retrieves the auth method. Returns nil if not found.
-func (a *ACL) AuthMethodRead(methodName string, q *QueryOptions) (*ACLAuthMethod, *QueryMeta, error) {
-	if methodName == "" {
-		return nil, nil, fmt.Errorf("Must specify a Name in Auth Method Read")
-	}
-
-	r := a.c.newRequest("GET", "/v1/acl/auth-method/"+url.QueryEscape(methodName))
-	r.setQueryOptions(q)
-	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if !found {
-		return nil, qm, nil
-	}
-
-	var out ACLAuthMethod
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// AuthMethodList retrieves a listing of all auth methods. The listing does not
-// include some metadata for the auth method as those should be retrieved by
-// subsequent calls to AuthMethodRead.
-func (a *ACL) AuthMethodList(q *QueryOptions) ([]*ACLAuthMethodListEntry, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/auth-methods")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLAuthMethodListEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// BindingRuleCreate will create a new binding rule. It is not allowed for the
-// binding rule parameter's ID field to be set as this will be generated by
-// Consul while processing the request.
-func (a *ACL) BindingRuleCreate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
-	if rule.ID != "" {
-		return nil, nil, fmt.Errorf("Cannot specify an ID in Binding Rule Creation")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/binding-rule")
-	r.setWriteOptions(q)
-	r.obj = rule
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLBindingRule
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// BindingRuleUpdate updates a binding rule. The ID field of the role binding
-// rule parameter must be set to an existing binding rule ID.
-func (a *ACL) BindingRuleUpdate(rule *ACLBindingRule, q *WriteOptions) (*ACLBindingRule, *WriteMeta, error) {
-	if rule.ID == "" {
-		return nil, nil, fmt.Errorf("Must specify an ID in Binding Rule Update")
-	}
-
-	r := a.c.newRequest("PUT", "/v1/acl/binding-rule/"+rule.ID)
-	r.setWriteOptions(q)
-	r.obj = rule
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLBindingRule
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, wm, nil
-}
-
-// BindingRuleDelete deletes a binding rule given its ID.
-func (a *ACL) BindingRuleDelete(bindingRuleID string, q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("DELETE", "/v1/acl/binding-rule/"+bindingRuleID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
-
-// BindingRuleRead retrieves the binding rule details. Returns nil if not found.
-func (a *ACL) BindingRuleRead(bindingRuleID string, q *QueryOptions) (*ACLBindingRule, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/binding-rule/"+bindingRuleID)
-	r.setQueryOptions(q)
-	found, rtt, resp, err := requireNotFoundOrOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if !found {
-		return nil, qm, nil
-	}
-
-	var out ACLBindingRule
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-// BindingRuleList retrieves a listing of all binding rules.
-func (a *ACL) BindingRuleList(methodName string, q *QueryOptions) ([]*ACLBindingRule, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/acl/binding-rules")
-	if methodName != "" {
-		r.params.Set("authmethod", methodName)
-	}
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*ACLBindingRule
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// Login is used to exchange auth method credentials for a newly-minted Consul Token.
-func (a *ACL) Login(auth *ACLLoginParams, q *WriteOptions) (*ACLToken, *WriteMeta, error) {
-	r := a.c.newRequest("POST", "/v1/acl/login")
-	r.setWriteOptions(q)
-	r.obj = auth
-
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out ACLToken
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, wm, nil
-}
-
-// Logout is used to destroy a Consul Token created via Login().
-func (a *ACL) Logout(q *WriteOptions) (*WriteMeta, error) {
-	r := a.c.newRequest("POST", "/v1/acl/logout")
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go
deleted file mode 100644
index 1ef3312..0000000
--- a/vendor/github.com/hashicorp/consul/api/agent.go
+++ /dev/null
@@ -1,984 +0,0 @@
-package api
-
-import (
-	"bufio"
-	"bytes"
-	"fmt"
-	"io"
-	"net/http"
-	"net/url"
-)
-
-// ServiceKind is the kind of service being registered.
-type ServiceKind string
-
-const (
-	// ServiceKindTypical is a typical, classic Consul service. This is
-	// represented by the absence of a value. This was chosen for ease of
-	// backwards compatibility: existing services in the catalog would
-	// default to the typical service.
-	ServiceKindTypical ServiceKind = ""
-
-	// ServiceKindConnectProxy is a proxy for the Connect feature. This
-	// service proxies another service within Consul and speaks the connect
-	// protocol.
-	ServiceKindConnectProxy ServiceKind = "connect-proxy"
-
-	// ServiceKindMeshGateway is a Mesh Gateway for the Connect feature. This
-	// service will proxy connections based off the SNI header set by other
-	// connect proxies
-	ServiceKindMeshGateway ServiceKind = "mesh-gateway"
-)
-
-// UpstreamDestType is the type of upstream discovery mechanism.
-type UpstreamDestType string
-
-const (
-	// UpstreamDestTypeService discovers instances via healthy service lookup.
-	UpstreamDestTypeService UpstreamDestType = "service"
-
-	// UpstreamDestTypePreparedQuery discovers instances via prepared query
-	// execution.
-	UpstreamDestTypePreparedQuery UpstreamDestType = "prepared_query"
-)
-
-// AgentCheck represents a check known to the agent
-type AgentCheck struct {
-	Node        string
-	CheckID     string
-	Name        string
-	Status      string
-	Notes       string
-	Output      string
-	ServiceID   string
-	ServiceName string
-	Definition  HealthCheckDefinition
-}
-
-// AgentWeights represent optional weights for a service
-type AgentWeights struct {
-	Passing int
-	Warning int
-}
-
-// AgentService represents a service known to the agent
-type AgentService struct {
-	Kind              ServiceKind `json:",omitempty"`
-	ID                string
-	Service           string
-	Tags              []string
-	Meta              map[string]string
-	Port              int
-	Address           string
-	TaggedAddresses   map[string]ServiceAddress `json:",omitempty"`
-	Weights           AgentWeights
-	EnableTagOverride bool
-	CreateIndex       uint64                          `json:",omitempty" bexpr:"-"`
-	ModifyIndex       uint64                          `json:",omitempty" bexpr:"-"`
-	ContentHash       string                          `json:",omitempty" bexpr:"-"`
-	Proxy             *AgentServiceConnectProxyConfig `json:",omitempty"`
-	Connect           *AgentServiceConnect            `json:",omitempty"`
-}
-
-// AgentServiceChecksInfo returns information about a Service and its checks
-type AgentServiceChecksInfo struct {
-	AggregatedStatus string
-	Service          *AgentService
-	Checks           HealthChecks
-}
-
-// AgentServiceConnect represents the Connect configuration of a service.
-type AgentServiceConnect struct {
-	Native         bool                      `json:",omitempty"`
-	SidecarService *AgentServiceRegistration `json:",omitempty" bexpr:"-"`
-}
-
-// AgentServiceConnectProxyConfig is the proxy configuration in a connect-proxy
-// ServiceDefinition or response.
-type AgentServiceConnectProxyConfig struct {
-	DestinationServiceName string                 `json:",omitempty"`
-	DestinationServiceID   string                 `json:",omitempty"`
-	LocalServiceAddress    string                 `json:",omitempty"`
-	LocalServicePort       int                    `json:",omitempty"`
-	Config                 map[string]interface{} `json:",omitempty" bexpr:"-"`
-	Upstreams              []Upstream             `json:",omitempty"`
-	MeshGateway            MeshGatewayConfig      `json:",omitempty"`
-}
-
-// AgentMember represents a cluster member known to the agent
-type AgentMember struct {
-	Name        string
-	Addr        string
-	Port        uint16
-	Tags        map[string]string
-	Status      int
-	ProtocolMin uint8
-	ProtocolMax uint8
-	ProtocolCur uint8
-	DelegateMin uint8
-	DelegateMax uint8
-	DelegateCur uint8
-}
-
-// AllSegments is used to select for all segments in MembersOpts.
-const AllSegments = "_all"
-
-// MembersOpts is used for querying member information.
-type MembersOpts struct {
-	// WAN is whether to show members from the WAN.
-	WAN bool
-
-	// Segment is the LAN segment to show members for. Setting this to the
-	// AllSegments value above will show members in all segments.
-	Segment string
-}
-
-// AgentServiceRegistration is used to register a new service
-type AgentServiceRegistration struct {
-	Kind              ServiceKind               `json:",omitempty"`
-	ID                string                    `json:",omitempty"`
-	Name              string                    `json:",omitempty"`
-	Tags              []string                  `json:",omitempty"`
-	Port              int                       `json:",omitempty"`
-	Address           string                    `json:",omitempty"`
-	TaggedAddresses   map[string]ServiceAddress `json:",omitempty"`
-	EnableTagOverride bool                      `json:",omitempty"`
-	Meta              map[string]string         `json:",omitempty"`
-	Weights           *AgentWeights             `json:",omitempty"`
-	Check             *AgentServiceCheck
-	Checks            AgentServiceChecks
-	Proxy             *AgentServiceConnectProxyConfig `json:",omitempty"`
-	Connect           *AgentServiceConnect            `json:",omitempty"`
-}
-
-// AgentCheckRegistration is used to register a new check
-type AgentCheckRegistration struct {
-	ID        string `json:",omitempty"`
-	Name      string `json:",omitempty"`
-	Notes     string `json:",omitempty"`
-	ServiceID string `json:",omitempty"`
-	AgentServiceCheck
-}
-
-// AgentServiceCheck is used to define a node or service level check
-type AgentServiceCheck struct {
-	CheckID           string              `json:",omitempty"`
-	Name              string              `json:",omitempty"`
-	Args              []string            `json:"ScriptArgs,omitempty"`
-	DockerContainerID string              `json:",omitempty"`
-	Shell             string              `json:",omitempty"` // Only supported for Docker.
-	Interval          string              `json:",omitempty"`
-	Timeout           string              `json:",omitempty"`
-	TTL               string              `json:",omitempty"`
-	HTTP              string              `json:",omitempty"`
-	Header            map[string][]string `json:",omitempty"`
-	Method            string              `json:",omitempty"`
-	TCP               string              `json:",omitempty"`
-	Status            string              `json:",omitempty"`
-	Notes             string              `json:",omitempty"`
-	TLSSkipVerify     bool                `json:",omitempty"`
-	GRPC              string              `json:",omitempty"`
-	GRPCUseTLS        bool                `json:",omitempty"`
-	AliasNode         string              `json:",omitempty"`
-	AliasService      string              `json:",omitempty"`
-
-	// In Consul 0.7 and later, checks that are associated with a service
-	// may also contain this optional DeregisterCriticalServiceAfter field,
-	// which is a timeout in the same Go time format as Interval and TTL. If
-	// a check is in the critical state for more than this configured value,
-	// then its associated service (and all of its associated checks) will
-	// automatically be deregistered.
-	DeregisterCriticalServiceAfter string `json:",omitempty"`
-}
-type AgentServiceChecks []*AgentServiceCheck
-
-// AgentToken is used when updating ACL tokens for an agent.
-type AgentToken struct {
-	Token string
-}
-
-// Metrics info is used to store different types of metric values from the agent.
-type MetricsInfo struct {
-	Timestamp string
-	Gauges    []GaugeValue
-	Points    []PointValue
-	Counters  []SampledValue
-	Samples   []SampledValue
-}
-
-// GaugeValue stores one value that is updated as time goes on, such as
-// the amount of memory allocated.
-type GaugeValue struct {
-	Name   string
-	Value  float32
-	Labels map[string]string
-}
-
-// PointValue holds a series of points for a metric.
-type PointValue struct {
-	Name   string
-	Points []float32
-}
-
-// SampledValue stores info about a metric that is incremented over time,
-// such as the number of requests to an HTTP endpoint.
-type SampledValue struct {
-	Name   string
-	Count  int
-	Sum    float64
-	Min    float64
-	Max    float64
-	Mean   float64
-	Stddev float64
-	Labels map[string]string
-}
-
-// AgentAuthorizeParams are the request parameters for authorizing a request.
-type AgentAuthorizeParams struct {
-	Target           string
-	ClientCertURI    string
-	ClientCertSerial string
-}
-
-// AgentAuthorize is the response structure for Connect authorization.
-type AgentAuthorize struct {
-	Authorized bool
-	Reason     string
-}
-
-// ConnectProxyConfig is the response structure for agent-local proxy
-// configuration.
-type ConnectProxyConfig struct {
-	ProxyServiceID    string
-	TargetServiceID   string
-	TargetServiceName string
-	ContentHash       string
-	Config            map[string]interface{} `bexpr:"-"`
-	Upstreams         []Upstream
-}
-
-// Upstream is the response structure for a proxy upstream configuration.
-type Upstream struct {
-	DestinationType      UpstreamDestType `json:",omitempty"`
-	DestinationNamespace string           `json:",omitempty"`
-	DestinationName      string
-	Datacenter           string                 `json:",omitempty"`
-	LocalBindAddress     string                 `json:",omitempty"`
-	LocalBindPort        int                    `json:",omitempty"`
-	Config               map[string]interface{} `json:",omitempty" bexpr:"-"`
-	MeshGateway          MeshGatewayConfig      `json:",omitempty"`
-}
-
-// Agent can be used to query the Agent endpoints
-type Agent struct {
-	c *Client
-
-	// cache the node name
-	nodeName string
-}
-
-// Agent returns a handle to the agent endpoints
-func (c *Client) Agent() *Agent {
-	return &Agent{c: c}
-}
-
-// Self is used to query the agent we are speaking to for
-// information about itself
-func (a *Agent) Self() (map[string]map[string]interface{}, error) {
-	r := a.c.newRequest("GET", "/v1/agent/self")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out map[string]map[string]interface{}
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Host is used to retrieve information about the host the
-// agent is running on such as CPU, memory, and disk. Requires
-// a operator:read ACL token.
-func (a *Agent) Host() (map[string]interface{}, error) {
-	r := a.c.newRequest("GET", "/v1/agent/host")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out map[string]interface{}
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Metrics is used to query the agent we are speaking to for
-// its current internal metric data
-func (a *Agent) Metrics() (*MetricsInfo, error) {
-	r := a.c.newRequest("GET", "/v1/agent/metrics")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out *MetricsInfo
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Reload triggers a configuration reload for the agent we are connected to.
-func (a *Agent) Reload() error {
-	r := a.c.newRequest("PUT", "/v1/agent/reload")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// NodeName is used to get the node name of the agent
-func (a *Agent) NodeName() (string, error) {
-	if a.nodeName != "" {
-		return a.nodeName, nil
-	}
-	info, err := a.Self()
-	if err != nil {
-		return "", err
-	}
-	name := info["Config"]["NodeName"].(string)
-	a.nodeName = name
-	return name, nil
-}
-
-// Checks returns the locally registered checks
-func (a *Agent) Checks() (map[string]*AgentCheck, error) {
-	return a.ChecksWithFilter("")
-}
-
-// ChecksWithFilter returns a subset of the locally registered checks that match
-// the given filter expression
-func (a *Agent) ChecksWithFilter(filter string) (map[string]*AgentCheck, error) {
-	r := a.c.newRequest("GET", "/v1/agent/checks")
-	r.filterQuery(filter)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out map[string]*AgentCheck
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Services returns the locally registered services
-func (a *Agent) Services() (map[string]*AgentService, error) {
-	return a.ServicesWithFilter("")
-}
-
-// ServicesWithFilter returns a subset of the locally registered services that match
-// the given filter expression
-func (a *Agent) ServicesWithFilter(filter string) (map[string]*AgentService, error) {
-	r := a.c.newRequest("GET", "/v1/agent/services")
-	r.filterQuery(filter)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out map[string]*AgentService
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-
-	return out, nil
-}
-
-// AgentHealthServiceByID returns for a given serviceID: the aggregated health status, the service definition or an error if any
-// - If the service is not found, will return status (critical, nil, nil)
-// - If the service is found, will return (critical|passing|warning), AgentServiceChecksInfo, nil)
-// - In all other cases, will return an error
-func (a *Agent) AgentHealthServiceByID(serviceID string) (string, *AgentServiceChecksInfo, error) {
-	path := fmt.Sprintf("/v1/agent/health/service/id/%v", url.PathEscape(serviceID))
-	r := a.c.newRequest("GET", path)
-	r.params.Add("format", "json")
-	r.header.Set("Accept", "application/json")
-	_, resp, err := a.c.doRequest(r)
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-	// Service not Found
-	if resp.StatusCode == http.StatusNotFound {
-		return HealthCritical, nil, nil
-	}
-	var out *AgentServiceChecksInfo
-	if err := decodeBody(resp, &out); err != nil {
-		return HealthCritical, out, err
-	}
-	switch resp.StatusCode {
-	case http.StatusOK:
-		return HealthPassing, out, nil
-	case http.StatusTooManyRequests:
-		return HealthWarning, out, nil
-	case http.StatusServiceUnavailable:
-		return HealthCritical, out, nil
-	}
-	return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
-}
-
-// AgentHealthServiceByName returns for a given service name: the aggregated health status for all services
-// having the specified name.
-// - If no service is not found, will return status (critical, [], nil)
-// - If the service is found, will return (critical|passing|warning), []api.AgentServiceChecksInfo, nil)
-// - In all other cases, will return an error
-func (a *Agent) AgentHealthServiceByName(service string) (string, []AgentServiceChecksInfo, error) {
-	path := fmt.Sprintf("/v1/agent/health/service/name/%v", url.PathEscape(service))
-	r := a.c.newRequest("GET", path)
-	r.params.Add("format", "json")
-	r.header.Set("Accept", "application/json")
-	_, resp, err := a.c.doRequest(r)
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-	// Service not Found
-	if resp.StatusCode == http.StatusNotFound {
-		return HealthCritical, nil, nil
-	}
-	var out []AgentServiceChecksInfo
-	if err := decodeBody(resp, &out); err != nil {
-		return HealthCritical, out, err
-	}
-	switch resp.StatusCode {
-	case http.StatusOK:
-		return HealthPassing, out, nil
-	case http.StatusTooManyRequests:
-		return HealthWarning, out, nil
-	case http.StatusServiceUnavailable:
-		return HealthCritical, out, nil
-	}
-	return HealthCritical, out, fmt.Errorf("Unexpected Error Code %v for %s", resp.StatusCode, path)
-}
-
-// Service returns a locally registered service instance and allows for
-// hash-based blocking.
-//
-// Note that this uses an unconventional blocking mechanism since it's
-// agent-local state. That means there is no persistent raft index so we block
-// based on object hash instead.
-func (a *Agent) Service(serviceID string, q *QueryOptions) (*AgentService, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/agent/service/"+serviceID)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out *AgentService
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return out, qm, nil
-}
-
-// Members returns the known gossip members. The WAN
-// flag can be used to query a server for WAN members.
-func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
-	r := a.c.newRequest("GET", "/v1/agent/members")
-	if wan {
-		r.params.Set("wan", "1")
-	}
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out []*AgentMember
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// MembersOpts returns the known gossip members and can be passed
-// additional options for WAN/segment filtering.
-func (a *Agent) MembersOpts(opts MembersOpts) ([]*AgentMember, error) {
-	r := a.c.newRequest("GET", "/v1/agent/members")
-	r.params.Set("segment", opts.Segment)
-	if opts.WAN {
-		r.params.Set("wan", "1")
-	}
-
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out []*AgentMember
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// ServiceRegister is used to register a new service with
-// the local agent
-func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
-	r := a.c.newRequest("PUT", "/v1/agent/service/register")
-	r.obj = service
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// ServiceDeregister is used to deregister a service with
-// the local agent
-func (a *Agent) ServiceDeregister(serviceID string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// PassTTL is used to set a TTL check to the passing state.
-//
-// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
-// The client interface will be removed in 0.8 or changed to use
-// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
-func (a *Agent) PassTTL(checkID, note string) error {
-	return a.updateTTL(checkID, note, "pass")
-}
-
-// WarnTTL is used to set a TTL check to the warning state.
-//
-// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
-// The client interface will be removed in 0.8 or changed to use
-// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
-func (a *Agent) WarnTTL(checkID, note string) error {
-	return a.updateTTL(checkID, note, "warn")
-}
-
-// FailTTL is used to set a TTL check to the failing state.
-//
-// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
-// The client interface will be removed in 0.8 or changed to use
-// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
-func (a *Agent) FailTTL(checkID, note string) error {
-	return a.updateTTL(checkID, note, "fail")
-}
-
-// updateTTL is used to update the TTL of a check. This is the internal
-// method that uses the old API that's present in Consul versions prior to
-// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
-// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
-// but keep the old Pass/Warn/Fail methods using the old API under the hood.
-//
-// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
-// The client interface will be removed in 0.8 and the server endpoints will
-// be removed in 0.9.
-func (a *Agent) updateTTL(checkID, note, status string) error {
-	switch status {
-	case "pass":
-	case "warn":
-	case "fail":
-	default:
-		return fmt.Errorf("Invalid status: %s", status)
-	}
-	endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
-	r := a.c.newRequest("PUT", endpoint)
-	r.params.Set("note", note)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// checkUpdate is the payload for a PUT for a check update.
-type checkUpdate struct {
-	// Status is one of the api.Health* states: HealthPassing
-	// ("passing"), HealthWarning ("warning"), or HealthCritical
-	// ("critical").
-	Status string
-
-	// Output is the information to post to the UI for operators as the
-	// output of the process that decided to hit the TTL check. This is
-	// different from the note field that's associated with the check
-	// itself.
-	Output string
-}
-
-// UpdateTTL is used to update the TTL of a check. This uses the newer API
-// that was introduced in Consul 0.6.4 and later. We translate the old status
-// strings for compatibility (though a newer version of Consul will still be
-// required to use this API).
-func (a *Agent) UpdateTTL(checkID, output, status string) error {
-	switch status {
-	case "pass", HealthPassing:
-		status = HealthPassing
-	case "warn", HealthWarning:
-		status = HealthWarning
-	case "fail", HealthCritical:
-		status = HealthCritical
-	default:
-		return fmt.Errorf("Invalid status: %s", status)
-	}
-
-	endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
-	r := a.c.newRequest("PUT", endpoint)
-	r.obj = &checkUpdate{
-		Status: status,
-		Output: output,
-	}
-
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// CheckRegister is used to register a new check with
-// the local agent
-func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
-	r := a.c.newRequest("PUT", "/v1/agent/check/register")
-	r.obj = check
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// CheckDeregister is used to deregister a check with
-// the local agent
-func (a *Agent) CheckDeregister(checkID string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// Join is used to instruct the agent to attempt a join to
-// another cluster member
-func (a *Agent) Join(addr string, wan bool) error {
-	r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
-	if wan {
-		r.params.Set("wan", "1")
-	}
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// Leave is used to have the agent gracefully leave the cluster and shutdown
-func (a *Agent) Leave() error {
-	r := a.c.newRequest("PUT", "/v1/agent/leave")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// ForceLeave is used to have the agent eject a failed node
-func (a *Agent) ForceLeave(node string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// ConnectAuthorize is used to authorize an incoming connection
-// to a natively integrated Connect service.
-func (a *Agent) ConnectAuthorize(auth *AgentAuthorizeParams) (*AgentAuthorize, error) {
-	r := a.c.newRequest("POST", "/v1/agent/connect/authorize")
-	r.obj = auth
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out AgentAuthorize
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return &out, nil
-}
-
-// ConnectCARoots returns the list of roots.
-func (a *Agent) ConnectCARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/agent/connect/ca/roots")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out CARootList
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
-// ConnectCALeaf gets the leaf certificate for the given service ID.
-func (a *Agent) ConnectCALeaf(serviceID string, q *QueryOptions) (*LeafCert, *QueryMeta, error) {
-	r := a.c.newRequest("GET", "/v1/agent/connect/ca/leaf/"+serviceID)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out LeafCert
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
-// EnableServiceMaintenance toggles service maintenance mode on
-// for the given service ID.
-func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
-	r.params.Set("enable", "true")
-	r.params.Set("reason", reason)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// DisableServiceMaintenance toggles service maintenance mode off
-// for the given service ID.
-func (a *Agent) DisableServiceMaintenance(serviceID string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
-	r.params.Set("enable", "false")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// EnableNodeMaintenance toggles node maintenance mode on for the
-// agent we are connected to.
-func (a *Agent) EnableNodeMaintenance(reason string) error {
-	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
-	r.params.Set("enable", "true")
-	r.params.Set("reason", reason)
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// DisableNodeMaintenance toggles node maintenance mode off for the
-// agent we are connected to.
-func (a *Agent) DisableNodeMaintenance() error {
-	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
-	r.params.Set("enable", "false")
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// Monitor returns a channel which will receive streaming logs from the agent
-// Providing a non-nil stopCh can be used to close the connection and stop the
-// log stream. An empty string will be sent down the given channel when there's
-// nothing left to stream, after which the caller should close the stopCh.
-func (a *Agent) Monitor(loglevel string, stopCh <-chan struct{}, q *QueryOptions) (chan string, error) {
-	r := a.c.newRequest("GET", "/v1/agent/monitor")
-	r.setQueryOptions(q)
-	if loglevel != "" {
-		r.params.Add("loglevel", loglevel)
-	}
-	_, resp, err := requireOK(a.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-
-	logCh := make(chan string, 64)
-	go func() {
-		defer resp.Body.Close()
-
-		scanner := bufio.NewScanner(resp.Body)
-		for {
-			select {
-			case <-stopCh:
-				close(logCh)
-				return
-			default:
-			}
-			if scanner.Scan() {
-				// An empty string signals to the caller that
-				// the scan is done, so make sure we only emit
-				// that when the scanner says it's done, not if
-				// we happen to ingest an empty line.
-				if text := scanner.Text(); text != "" {
-					logCh <- text
-				} else {
-					logCh <- " "
-				}
-			} else {
-				logCh <- ""
-			}
-		}
-	}()
-
-	return logCh, nil
-}
-
-// UpdateACLToken updates the agent's "acl_token". See updateToken for more
-// details.
-//
-// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateDefaultACLToken for v1.4.3 and above
-func (a *Agent) UpdateACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateToken("acl_token", token, q)
-}
-
-// UpdateACLAgentToken updates the agent's "acl_agent_token". See updateToken
-// for more details.
-//
-// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentACLToken for v1.4.3 and above
-func (a *Agent) UpdateACLAgentToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateToken("acl_agent_token", token, q)
-}
-
-// UpdateACLAgentMasterToken updates the agent's "acl_agent_master_token". See
-// updateToken for more details.
-//
-// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateAgentMasterACLToken for v1.4.3 and above
-func (a *Agent) UpdateACLAgentMasterToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateToken("acl_agent_master_token", token, q)
-}
-
-// UpdateACLReplicationToken updates the agent's "acl_replication_token". See
-// updateToken for more details.
-//
-// DEPRECATED (ACL-Legacy-Compat) - Prefer UpdateReplicationACLToken for v1.4.3 and above
-func (a *Agent) UpdateACLReplicationToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateToken("acl_replication_token", token, q)
-}
-
-// UpdateDefaultACLToken updates the agent's "default" token. See updateToken
-// for more details
-func (a *Agent) UpdateDefaultACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateTokenFallback("default", "acl_token", token, q)
-}
-
-// UpdateAgentACLToken updates the agent's "agent" token. See updateToken
-// for more details
-func (a *Agent) UpdateAgentACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateTokenFallback("agent", "acl_agent_token", token, q)
-}
-
-// UpdateAgentMasterACLToken updates the agent's "agent_master" token. See updateToken
-// for more details
-func (a *Agent) UpdateAgentMasterACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateTokenFallback("agent_master", "acl_agent_master_token", token, q)
-}
-
-// UpdateReplicationACLToken updates the agent's "replication" token. See updateToken
-// for more details
-func (a *Agent) UpdateReplicationACLToken(token string, q *WriteOptions) (*WriteMeta, error) {
-	return a.updateTokenFallback("replication", "acl_replication_token", token, q)
-}
-
-// updateToken can be used to update one of an agent's ACL tokens after the agent has
-// started. The tokens are may not be persisted, so will need to be updated again if
-// the agent is restarted unless the agent is configured to persist them.
-func (a *Agent) updateToken(target, token string, q *WriteOptions) (*WriteMeta, error) {
-	meta, _, err := a.updateTokenOnce(target, token, q)
-	return meta, err
-}
-
-func (a *Agent) updateTokenFallback(target, fallback, token string, q *WriteOptions) (*WriteMeta, error) {
-	meta, status, err := a.updateTokenOnce(target, token, q)
-	if err != nil && status == 404 {
-		meta, _, err = a.updateTokenOnce(fallback, token, q)
-	}
-	return meta, err
-}
-
-func (a *Agent) updateTokenOnce(target, token string, q *WriteOptions) (*WriteMeta, int, error) {
-	r := a.c.newRequest("PUT", fmt.Sprintf("/v1/agent/token/%s", target))
-	r.setWriteOptions(q)
-	r.obj = &AgentToken{Token: token}
-
-	rtt, resp, err := a.c.doRequest(r)
-	if err != nil {
-		return nil, 0, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-
-	if resp.StatusCode != 200 {
-		var buf bytes.Buffer
-		io.Copy(&buf, resp.Body)
-		return wm, resp.StatusCode, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
-	}
-
-	return wm, resp.StatusCode, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go
deleted file mode 100644
index b624b3c..0000000
--- a/vendor/github.com/hashicorp/consul/api/api.go
+++ /dev/null
@@ -1,973 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"context"
-	"crypto/tls"
-	"encoding/json"
-	"fmt"
-	"io"
-	"io/ioutil"
-	"log"
-	"net"
-	"net/http"
-	"net/url"
-	"os"
-	"strconv"
-	"strings"
-	"time"
-
-	"github.com/hashicorp/go-cleanhttp"
-	"github.com/hashicorp/go-rootcerts"
-)
-
-const (
-	// HTTPAddrEnvName defines an environment variable name which sets
-	// the HTTP address if there is no -http-addr specified.
-	HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
-
-	// HTTPTokenEnvName defines an environment variable name which sets
-	// the HTTP token.
-	HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
-
-	// HTTPTokenFileEnvName defines an environment variable name which sets
-	// the HTTP token file.
-	HTTPTokenFileEnvName = "CONSUL_HTTP_TOKEN_FILE"
-
-	// HTTPAuthEnvName defines an environment variable name which sets
-	// the HTTP authentication header.
-	HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
-
-	// HTTPSSLEnvName defines an environment variable name which sets
-	// whether or not to use HTTPS.
-	HTTPSSLEnvName = "CONSUL_HTTP_SSL"
-
-	// HTTPCAFile defines an environment variable name which sets the
-	// CA file to use for talking to Consul over TLS.
-	HTTPCAFile = "CONSUL_CACERT"
-
-	// HTTPCAPath defines an environment variable name which sets the
-	// path to a directory of CA certs to use for talking to Consul over TLS.
-	HTTPCAPath = "CONSUL_CAPATH"
-
-	// HTTPClientCert defines an environment variable name which sets the
-	// client cert file to use for talking to Consul over TLS.
-	HTTPClientCert = "CONSUL_CLIENT_CERT"
-
-	// HTTPClientKey defines an environment variable name which sets the
-	// client key file to use for talking to Consul over TLS.
-	HTTPClientKey = "CONSUL_CLIENT_KEY"
-
-	// HTTPTLSServerName defines an environment variable name which sets the
-	// server name to use as the SNI host when connecting via TLS
-	HTTPTLSServerName = "CONSUL_TLS_SERVER_NAME"
-
-	// HTTPSSLVerifyEnvName defines an environment variable name which sets
-	// whether or not to disable certificate checking.
-	HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
-
-	// GRPCAddrEnvName defines an environment variable name which sets the gRPC
-	// address for consul connect envoy. Note this isn't actually used by the api
-	// client in this package but is defined here for consistency with all the
-	// other ENV names we use.
-	GRPCAddrEnvName = "CONSUL_GRPC_ADDR"
-)
-
-// QueryOptions are used to parameterize a query
-type QueryOptions struct {
-	// Providing a datacenter overwrites the DC provided
-	// by the Config
-	Datacenter string
-
-	// AllowStale allows any Consul server (non-leader) to service
-	// a read. This allows for lower latency and higher throughput
-	AllowStale bool
-
-	// RequireConsistent forces the read to be fully consistent.
-	// This is more expensive but prevents ever performing a stale
-	// read.
-	RequireConsistent bool
-
-	// UseCache requests that the agent cache results locally. See
-	// https://www.consul.io/api/features/caching.html for more details on the
-	// semantics.
-	UseCache bool
-
-	// MaxAge limits how old a cached value will be returned if UseCache is true.
-	// If there is a cached response that is older than the MaxAge, it is treated
-	// as a cache miss and a new fetch invoked. If the fetch fails, the error is
-	// returned. Clients that wish to allow for stale results on error can set
-	// StaleIfError to a longer duration to change this behavior. It is ignored
-	// if the endpoint supports background refresh caching. See
-	// https://www.consul.io/api/features/caching.html for more details.
-	MaxAge time.Duration
-
-	// StaleIfError specifies how stale the client will accept a cached response
-	// if the servers are unavailable to fetch a fresh one. Only makes sense when
-	// UseCache is true and MaxAge is set to a lower, non-zero value. It is
-	// ignored if the endpoint supports background refresh caching. See
-	// https://www.consul.io/api/features/caching.html for more details.
-	StaleIfError time.Duration
-
-	// WaitIndex is used to enable a blocking query. Waits
-	// until the timeout or the next index is reached
-	WaitIndex uint64
-
-	// WaitHash is used by some endpoints instead of WaitIndex to perform blocking
-	// on state based on a hash of the response rather than a monotonic index.
-	// This is required when the state being blocked on is not stored in Raft, for
-	// example agent-local proxy configuration.
-	WaitHash string
-
-	// WaitTime is used to bound the duration of a wait.
-	// Defaults to that of the Config, but can be overridden.
-	WaitTime time.Duration
-
-	// Token is used to provide a per-request ACL token
-	// which overrides the agent's default token.
-	Token string
-
-	// Near is used to provide a node name that will sort the results
-	// in ascending order based on the estimated round trip time from
-	// that node. Setting this to "_agent" will use the agent's node
-	// for the sort.
-	Near string
-
-	// NodeMeta is used to filter results by nodes with the given
-	// metadata key/value pairs. Currently, only one key/value pair can
-	// be provided for filtering.
-	NodeMeta map[string]string
-
-	// RelayFactor is used in keyring operations to cause responses to be
-	// relayed back to the sender through N other random nodes. Must be
-	// a value from 0 to 5 (inclusive).
-	RelayFactor uint8
-
-	// LocalOnly is used in keyring list operation to force the keyring
-	// query to only hit local servers (no WAN traffic).
-	LocalOnly bool
-
-	// Connect filters prepared query execution to only include Connect-capable
-	// services. This currently affects prepared query execution.
-	Connect bool
-
-	// ctx is an optional context pass through to the underlying HTTP
-	// request layer. Use Context() and WithContext() to manage this.
-	ctx context.Context
-
-	// Filter requests filtering data prior to it being returned. The string
-	// is a go-bexpr compatible expression.
-	Filter string
-}
-
-func (o *QueryOptions) Context() context.Context {
-	if o != nil && o.ctx != nil {
-		return o.ctx
-	}
-	return context.Background()
-}
-
-func (o *QueryOptions) WithContext(ctx context.Context) *QueryOptions {
-	o2 := new(QueryOptions)
-	if o != nil {
-		*o2 = *o
-	}
-	o2.ctx = ctx
-	return o2
-}
-
-// WriteOptions are used to parameterize a write
-type WriteOptions struct {
-	// Providing a datacenter overwrites the DC provided
-	// by the Config
-	Datacenter string
-
-	// Token is used to provide a per-request ACL token
-	// which overrides the agent's default token.
-	Token string
-
-	// RelayFactor is used in keyring operations to cause responses to be
-	// relayed back to the sender through N other random nodes. Must be
-	// a value from 0 to 5 (inclusive).
-	RelayFactor uint8
-
-	// ctx is an optional context pass through to the underlying HTTP
-	// request layer. Use Context() and WithContext() to manage this.
-	ctx context.Context
-}
-
-func (o *WriteOptions) Context() context.Context {
-	if o != nil && o.ctx != nil {
-		return o.ctx
-	}
-	return context.Background()
-}
-
-func (o *WriteOptions) WithContext(ctx context.Context) *WriteOptions {
-	o2 := new(WriteOptions)
-	if o != nil {
-		*o2 = *o
-	}
-	o2.ctx = ctx
-	return o2
-}
-
-// QueryMeta is used to return meta data about a query
-type QueryMeta struct {
-	// LastIndex. This can be used as a WaitIndex to perform
-	// a blocking query
-	LastIndex uint64
-
-	// LastContentHash. This can be used as a WaitHash to perform a blocking query
-	// for endpoints that support hash-based blocking. Endpoints that do not
-	// support it will return an empty hash.
-	LastContentHash string
-
-	// Time of last contact from the leader for the
-	// server servicing the request
-	LastContact time.Duration
-
-	// Is there a known leader
-	KnownLeader bool
-
-	// How long did the request take
-	RequestTime time.Duration
-
-	// Is address translation enabled for HTTP responses on this agent
-	AddressTranslationEnabled bool
-
-	// CacheHit is true if the result was served from agent-local cache.
-	CacheHit bool
-
-	// CacheAge is set if request was ?cached and indicates how stale the cached
-	// response is.
-	CacheAge time.Duration
-}
-
-// WriteMeta is used to return meta data about a write
-type WriteMeta struct {
-	// How long did the request take
-	RequestTime time.Duration
-}
-
-// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
-type HttpBasicAuth struct {
-	// Username to use for HTTP Basic Authentication
-	Username string
-
-	// Password to use for HTTP Basic Authentication
-	Password string
-}
-
-// Config is used to configure the creation of a client
-type Config struct {
-	// Address is the address of the Consul server
-	Address string
-
-	// Scheme is the URI scheme for the Consul server
-	Scheme string
-
-	// Datacenter to use. If not provided, the default agent datacenter is used.
-	Datacenter string
-
-	// Transport is the Transport to use for the http client.
-	Transport *http.Transport
-
-	// HttpClient is the client to use. Default will be
-	// used if not provided.
-	HttpClient *http.Client
-
-	// HttpAuth is the auth info to use for http access.
-	HttpAuth *HttpBasicAuth
-
-	// WaitTime limits how long a Watch will block. If not provided,
-	// the agent default values will be used.
-	WaitTime time.Duration
-
-	// Token is used to provide a per-request ACL token
-	// which overrides the agent's default token.
-	Token string
-
-	// TokenFile is a file containing the current token to use for this client.
-	// If provided it is read once at startup and never again.
-	TokenFile string
-
-	TLSConfig TLSConfig
-}
-
-// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
-// Consul using TLS.
-type TLSConfig struct {
-	// Address is the optional address of the Consul server. The port, if any
-	// will be removed from here and this will be set to the ServerName of the
-	// resulting config.
-	Address string
-
-	// CAFile is the optional path to the CA certificate used for Consul
-	// communication, defaults to the system bundle if not specified.
-	CAFile string
-
-	// CAPath is the optional path to a directory of CA certificates to use for
-	// Consul communication, defaults to the system bundle if not specified.
-	CAPath string
-
-	// CertFile is the optional path to the certificate for Consul
-	// communication. If this is set then you need to also set KeyFile.
-	CertFile string
-
-	// KeyFile is the optional path to the private key for Consul communication.
-	// If this is set then you need to also set CertFile.
-	KeyFile string
-
-	// InsecureSkipVerify if set to true will disable TLS host verification.
-	InsecureSkipVerify bool
-}
-
-// DefaultConfig returns a default configuration for the client. By default this
-// will pool and reuse idle connections to Consul. If you have a long-lived
-// client object, this is the desired behavior and should make the most efficient
-// use of the connections to Consul. If you don't reuse a client object, which
-// is not recommended, then you may notice idle connections building up over
-// time. To avoid this, use the DefaultNonPooledConfig() instead.
-func DefaultConfig() *Config {
-	return defaultConfig(cleanhttp.DefaultPooledTransport)
-}
-
-// DefaultNonPooledConfig returns a default configuration for the client which
-// does not pool connections. This isn't a recommended configuration because it
-// will reconnect to Consul on every request, but this is useful to avoid the
-// accumulation of idle connections if you make many client objects during the
-// lifetime of your application.
-func DefaultNonPooledConfig() *Config {
-	return defaultConfig(cleanhttp.DefaultTransport)
-}
-
-// defaultConfig returns the default configuration for the client, using the
-// given function to make the transport.
-func defaultConfig(transportFn func() *http.Transport) *Config {
-	config := &Config{
-		Address:   "127.0.0.1:8500",
-		Scheme:    "http",
-		Transport: transportFn(),
-	}
-
-	if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
-		config.Address = addr
-	}
-
-	if tokenFile := os.Getenv(HTTPTokenFileEnvName); tokenFile != "" {
-		config.TokenFile = tokenFile
-	}
-
-	if token := os.Getenv(HTTPTokenEnvName); token != "" {
-		config.Token = token
-	}
-
-	if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
-		var username, password string
-		if strings.Contains(auth, ":") {
-			split := strings.SplitN(auth, ":", 2)
-			username = split[0]
-			password = split[1]
-		} else {
-			username = auth
-		}
-
-		config.HttpAuth = &HttpBasicAuth{
-			Username: username,
-			Password: password,
-		}
-	}
-
-	if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
-		enabled, err := strconv.ParseBool(ssl)
-		if err != nil {
-			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
-		}
-
-		if enabled {
-			config.Scheme = "https"
-		}
-	}
-
-	if v := os.Getenv(HTTPTLSServerName); v != "" {
-		config.TLSConfig.Address = v
-	}
-	if v := os.Getenv(HTTPCAFile); v != "" {
-		config.TLSConfig.CAFile = v
-	}
-	if v := os.Getenv(HTTPCAPath); v != "" {
-		config.TLSConfig.CAPath = v
-	}
-	if v := os.Getenv(HTTPClientCert); v != "" {
-		config.TLSConfig.CertFile = v
-	}
-	if v := os.Getenv(HTTPClientKey); v != "" {
-		config.TLSConfig.KeyFile = v
-	}
-	if v := os.Getenv(HTTPSSLVerifyEnvName); v != "" {
-		doVerify, err := strconv.ParseBool(v)
-		if err != nil {
-			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
-		}
-		if !doVerify {
-			config.TLSConfig.InsecureSkipVerify = true
-		}
-	}
-
-	return config
-}
-
-// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
-// Consul using TLS.
-func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
-	tlsClientConfig := &tls.Config{
-		InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
-	}
-
-	if tlsConfig.Address != "" {
-		server := tlsConfig.Address
-		hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
-		if hasPort {
-			var err error
-			server, _, err = net.SplitHostPort(server)
-			if err != nil {
-				return nil, err
-			}
-		}
-		tlsClientConfig.ServerName = server
-	}
-
-	if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
-		tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
-		if err != nil {
-			return nil, err
-		}
-		tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
-	}
-
-	if tlsConfig.CAFile != "" || tlsConfig.CAPath != "" {
-		rootConfig := &rootcerts.Config{
-			CAFile: tlsConfig.CAFile,
-			CAPath: tlsConfig.CAPath,
-		}
-		if err := rootcerts.ConfigureTLS(tlsClientConfig, rootConfig); err != nil {
-			return nil, err
-		}
-	}
-
-	return tlsClientConfig, nil
-}
-
-func (c *Config) GenerateEnv() []string {
-	env := make([]string, 0, 10)
-
-	env = append(env,
-		fmt.Sprintf("%s=%s", HTTPAddrEnvName, c.Address),
-		fmt.Sprintf("%s=%s", HTTPTokenEnvName, c.Token),
-		fmt.Sprintf("%s=%s", HTTPTokenFileEnvName, c.TokenFile),
-		fmt.Sprintf("%s=%t", HTTPSSLEnvName, c.Scheme == "https"),
-		fmt.Sprintf("%s=%s", HTTPCAFile, c.TLSConfig.CAFile),
-		fmt.Sprintf("%s=%s", HTTPCAPath, c.TLSConfig.CAPath),
-		fmt.Sprintf("%s=%s", HTTPClientCert, c.TLSConfig.CertFile),
-		fmt.Sprintf("%s=%s", HTTPClientKey, c.TLSConfig.KeyFile),
-		fmt.Sprintf("%s=%s", HTTPTLSServerName, c.TLSConfig.Address),
-		fmt.Sprintf("%s=%t", HTTPSSLVerifyEnvName, !c.TLSConfig.InsecureSkipVerify))
-
-	if c.HttpAuth != nil {
-		env = append(env, fmt.Sprintf("%s=%s:%s", HTTPAuthEnvName, c.HttpAuth.Username, c.HttpAuth.Password))
-	} else {
-		env = append(env, fmt.Sprintf("%s=", HTTPAuthEnvName))
-	}
-
-	return env
-}
-
-// Client provides a client to the Consul API
-type Client struct {
-	config Config
-}
-
-// NewClient returns a new client
-func NewClient(config *Config) (*Client, error) {
-	// bootstrap the config
-	defConfig := DefaultConfig()
-
-	if len(config.Address) == 0 {
-		config.Address = defConfig.Address
-	}
-
-	if len(config.Scheme) == 0 {
-		config.Scheme = defConfig.Scheme
-	}
-
-	if config.Transport == nil {
-		config.Transport = defConfig.Transport
-	}
-
-	if config.TLSConfig.Address == "" {
-		config.TLSConfig.Address = defConfig.TLSConfig.Address
-	}
-
-	if config.TLSConfig.CAFile == "" {
-		config.TLSConfig.CAFile = defConfig.TLSConfig.CAFile
-	}
-
-	if config.TLSConfig.CAPath == "" {
-		config.TLSConfig.CAPath = defConfig.TLSConfig.CAPath
-	}
-
-	if config.TLSConfig.CertFile == "" {
-		config.TLSConfig.CertFile = defConfig.TLSConfig.CertFile
-	}
-
-	if config.TLSConfig.KeyFile == "" {
-		config.TLSConfig.KeyFile = defConfig.TLSConfig.KeyFile
-	}
-
-	if !config.TLSConfig.InsecureSkipVerify {
-		config.TLSConfig.InsecureSkipVerify = defConfig.TLSConfig.InsecureSkipVerify
-	}
-
-	if config.HttpClient == nil {
-		var err error
-		config.HttpClient, err = NewHttpClient(config.Transport, config.TLSConfig)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	parts := strings.SplitN(config.Address, "://", 2)
-	if len(parts) == 2 {
-		switch parts[0] {
-		case "http":
-			config.Scheme = "http"
-		case "https":
-			config.Scheme = "https"
-		case "unix":
-			trans := cleanhttp.DefaultTransport()
-			trans.DialContext = func(_ context.Context, _, _ string) (net.Conn, error) {
-				return net.Dial("unix", parts[1])
-			}
-			config.HttpClient = &http.Client{
-				Transport: trans,
-			}
-		default:
-			return nil, fmt.Errorf("Unknown protocol scheme: %s", parts[0])
-		}
-		config.Address = parts[1]
-	}
-
-	// If the TokenFile is set, always use that, even if a Token is configured.
-	// This is because when TokenFile is set it is read into the Token field.
-	// We want any derived clients to have to re-read the token file.
-	if config.TokenFile != "" {
-		data, err := ioutil.ReadFile(config.TokenFile)
-		if err != nil {
-			return nil, fmt.Errorf("Error loading token file: %s", err)
-		}
-
-		if token := strings.TrimSpace(string(data)); token != "" {
-			config.Token = token
-		}
-	}
-	if config.Token == "" {
-		config.Token = defConfig.Token
-	}
-
-	return &Client{config: *config}, nil
-}
-
-// NewHttpClient returns an http client configured with the given Transport and TLS
-// config.
-func NewHttpClient(transport *http.Transport, tlsConf TLSConfig) (*http.Client, error) {
-	client := &http.Client{
-		Transport: transport,
-	}
-
-	// TODO (slackpad) - Once we get some run time on the HTTP/2 support we
-	// should turn it on by default if TLS is enabled. We would basically
-	// just need to call http2.ConfigureTransport(transport) here. We also
-	// don't want to introduce another external dependency on
-	// golang.org/x/net/http2 at this time. For a complete recipe for how
-	// to enable HTTP/2 support on a transport suitable for the API client
-	// library see agent/http_test.go:TestHTTPServer_H2.
-
-	if transport.TLSClientConfig == nil {
-		tlsClientConfig, err := SetupTLSConfig(&tlsConf)
-
-		if err != nil {
-			return nil, err
-		}
-
-		transport.TLSClientConfig = tlsClientConfig
-	}
-
-	return client, nil
-}
-
-// request is used to help build up a request
-type request struct {
-	config *Config
-	method string
-	url    *url.URL
-	params url.Values
-	body   io.Reader
-	header http.Header
-	obj    interface{}
-	ctx    context.Context
-}
-
-// setQueryOptions is used to annotate the request with
-// additional query options
-func (r *request) setQueryOptions(q *QueryOptions) {
-	if q == nil {
-		return
-	}
-	if q.Datacenter != "" {
-		r.params.Set("dc", q.Datacenter)
-	}
-	if q.AllowStale {
-		r.params.Set("stale", "")
-	}
-	if q.RequireConsistent {
-		r.params.Set("consistent", "")
-	}
-	if q.WaitIndex != 0 {
-		r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
-	}
-	if q.WaitTime != 0 {
-		r.params.Set("wait", durToMsec(q.WaitTime))
-	}
-	if q.WaitHash != "" {
-		r.params.Set("hash", q.WaitHash)
-	}
-	if q.Token != "" {
-		r.header.Set("X-Consul-Token", q.Token)
-	}
-	if q.Near != "" {
-		r.params.Set("near", q.Near)
-	}
-	if q.Filter != "" {
-		r.params.Set("filter", q.Filter)
-	}
-	if len(q.NodeMeta) > 0 {
-		for key, value := range q.NodeMeta {
-			r.params.Add("node-meta", key+":"+value)
-		}
-	}
-	if q.RelayFactor != 0 {
-		r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
-	}
-	if q.LocalOnly {
-		r.params.Set("local-only", fmt.Sprintf("%t", q.LocalOnly))
-	}
-	if q.Connect {
-		r.params.Set("connect", "true")
-	}
-	if q.UseCache && !q.RequireConsistent {
-		r.params.Set("cached", "")
-
-		cc := []string{}
-		if q.MaxAge > 0 {
-			cc = append(cc, fmt.Sprintf("max-age=%.0f", q.MaxAge.Seconds()))
-		}
-		if q.StaleIfError > 0 {
-			cc = append(cc, fmt.Sprintf("stale-if-error=%.0f", q.StaleIfError.Seconds()))
-		}
-		if len(cc) > 0 {
-			r.header.Set("Cache-Control", strings.Join(cc, ", "))
-		}
-	}
-	r.ctx = q.ctx
-}
-
-// durToMsec converts a duration to a millisecond specified string. If the
-// user selected a positive value that rounds to 0 ms, then we will use 1 ms
-// so they get a short delay, otherwise Consul will translate the 0 ms into
-// a huge default delay.
-func durToMsec(dur time.Duration) string {
-	ms := dur / time.Millisecond
-	if dur > 0 && ms == 0 {
-		ms = 1
-	}
-	return fmt.Sprintf("%dms", ms)
-}
-
-// serverError is a string we look for to detect 500 errors.
-const serverError = "Unexpected response code: 500"
-
-// IsRetryableError returns true for 500 errors from the Consul servers, and
-// network connection errors. These are usually retryable at a later time.
-// This applies to reads but NOT to writes. This may return true for errors
-// on writes that may have still gone through, so do not use this to retry
-// any write operations.
-func IsRetryableError(err error) bool {
-	if err == nil {
-		return false
-	}
-
-	if _, ok := err.(net.Error); ok {
-		return true
-	}
-
-	// TODO (slackpad) - Make a real error type here instead of using
-	// a string check.
-	return strings.Contains(err.Error(), serverError)
-}
-
-// setWriteOptions is used to annotate the request with
-// additional write options
-func (r *request) setWriteOptions(q *WriteOptions) {
-	if q == nil {
-		return
-	}
-	if q.Datacenter != "" {
-		r.params.Set("dc", q.Datacenter)
-	}
-	if q.Token != "" {
-		r.header.Set("X-Consul-Token", q.Token)
-	}
-	if q.RelayFactor != 0 {
-		r.params.Set("relay-factor", strconv.Itoa(int(q.RelayFactor)))
-	}
-	r.ctx = q.ctx
-}
-
-// toHTTP converts the request to an HTTP request
-func (r *request) toHTTP() (*http.Request, error) {
-	// Encode the query parameters
-	r.url.RawQuery = r.params.Encode()
-
-	// Check if we should encode the body
-	if r.body == nil && r.obj != nil {
-		b, err := encodeBody(r.obj)
-		if err != nil {
-			return nil, err
-		}
-		r.body = b
-	}
-
-	// Create the HTTP request
-	req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
-	if err != nil {
-		return nil, err
-	}
-
-	req.URL.Host = r.url.Host
-	req.URL.Scheme = r.url.Scheme
-	req.Host = r.url.Host
-	req.Header = r.header
-
-	// Setup auth
-	if r.config.HttpAuth != nil {
-		req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
-	}
-	if r.ctx != nil {
-		return req.WithContext(r.ctx), nil
-	}
-
-	return req, nil
-}
-
-// newRequest is used to create a new request
-func (c *Client) newRequest(method, path string) *request {
-	r := &request{
-		config: &c.config,
-		method: method,
-		url: &url.URL{
-			Scheme: c.config.Scheme,
-			Host:   c.config.Address,
-			Path:   path,
-		},
-		params: make(map[string][]string),
-		header: make(http.Header),
-	}
-	if c.config.Datacenter != "" {
-		r.params.Set("dc", c.config.Datacenter)
-	}
-	if c.config.WaitTime != 0 {
-		r.params.Set("wait", durToMsec(r.config.WaitTime))
-	}
-	if c.config.Token != "" {
-		r.header.Set("X-Consul-Token", r.config.Token)
-	}
-	return r
-}
-
-// doRequest runs a request with our client
-func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
-	req, err := r.toHTTP()
-	if err != nil {
-		return 0, nil, err
-	}
-	start := time.Now()
-	resp, err := c.config.HttpClient.Do(req)
-	diff := time.Since(start)
-	return diff, resp, err
-}
-
-// Query is used to do a GET request against an endpoint
-// and deserialize the response into an interface using
-// standard Consul conventions.
-func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
-	r := c.newRequest("GET", endpoint)
-	r.setQueryOptions(q)
-	rtt, resp, err := c.doRequest(r)
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if err := decodeBody(resp, out); err != nil {
-		return nil, err
-	}
-	return qm, nil
-}
-
-// write is used to do a PUT request against an endpoint
-// and serialize/deserialized using the standard Consul conventions.
-func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
-	r := c.newRequest("PUT", endpoint)
-	r.setWriteOptions(q)
-	r.obj = in
-	rtt, resp, err := requireOK(c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	if out != nil {
-		if err := decodeBody(resp, &out); err != nil {
-			return nil, err
-		}
-	} else if _, err := ioutil.ReadAll(resp.Body); err != nil {
-		return nil, err
-	}
-	return wm, nil
-}
-
-// parseQueryMeta is used to help parse query meta-data
-//
-// TODO(rb): bug? the error from this function is never handled
-func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
-	header := resp.Header
-
-	// Parse the X-Consul-Index (if it's set - hash based blocking queries don't
-	// set this)
-	if indexStr := header.Get("X-Consul-Index"); indexStr != "" {
-		index, err := strconv.ParseUint(indexStr, 10, 64)
-		if err != nil {
-			return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
-		}
-		q.LastIndex = index
-	}
-	q.LastContentHash = header.Get("X-Consul-ContentHash")
-
-	// Parse the X-Consul-LastContact
-	last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
-	if err != nil {
-		return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
-	}
-	q.LastContact = time.Duration(last) * time.Millisecond
-
-	// Parse the X-Consul-KnownLeader
-	switch header.Get("X-Consul-KnownLeader") {
-	case "true":
-		q.KnownLeader = true
-	default:
-		q.KnownLeader = false
-	}
-
-	// Parse X-Consul-Translate-Addresses
-	switch header.Get("X-Consul-Translate-Addresses") {
-	case "true":
-		q.AddressTranslationEnabled = true
-	default:
-		q.AddressTranslationEnabled = false
-	}
-
-	// Parse Cache info
-	if cacheStr := header.Get("X-Cache"); cacheStr != "" {
-		q.CacheHit = strings.EqualFold(cacheStr, "HIT")
-	}
-	if ageStr := header.Get("Age"); ageStr != "" {
-		age, err := strconv.ParseUint(ageStr, 10, 64)
-		if err != nil {
-			return fmt.Errorf("Failed to parse Age Header: %v", err)
-		}
-		q.CacheAge = time.Duration(age) * time.Second
-	}
-
-	return nil
-}
-
-// decodeBody is used to JSON decode a body
-func decodeBody(resp *http.Response, out interface{}) error {
-	dec := json.NewDecoder(resp.Body)
-	return dec.Decode(out)
-}
-
-// encodeBody is used to encode a request body
-func encodeBody(obj interface{}) (io.Reader, error) {
-	buf := bytes.NewBuffer(nil)
-	enc := json.NewEncoder(buf)
-	if err := enc.Encode(obj); err != nil {
-		return nil, err
-	}
-	return buf, nil
-}
-
-// requireOK is used to wrap doRequest and check for a 200
-func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
-	if e != nil {
-		if resp != nil {
-			resp.Body.Close()
-		}
-		return d, nil, e
-	}
-	if resp.StatusCode != 200 {
-		return d, nil, generateUnexpectedResponseCodeError(resp)
-	}
-	return d, resp, nil
-}
-
-func (req *request) filterQuery(filter string) {
-	if filter == "" {
-		return
-	}
-
-	req.params.Set("filter", filter)
-}
-
-// generateUnexpectedResponseCodeError consumes the rest of the body, closes
-// the body stream and generates an error indicating the status code was
-// unexpected.
-func generateUnexpectedResponseCodeError(resp *http.Response) error {
-	var buf bytes.Buffer
-	io.Copy(&buf, resp.Body)
-	resp.Body.Close()
-	return fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
-}
-
-func requireNotFoundOrOK(d time.Duration, resp *http.Response, e error) (bool, time.Duration, *http.Response, error) {
-	if e != nil {
-		if resp != nil {
-			resp.Body.Close()
-		}
-		return false, d, nil, e
-	}
-	switch resp.StatusCode {
-	case 200:
-		return true, d, resp, nil
-	case 404:
-		return false, d, resp, nil
-	default:
-		return false, d, nil, generateUnexpectedResponseCodeError(resp)
-	}
-}
diff --git a/vendor/github.com/hashicorp/consul/api/catalog.go b/vendor/github.com/hashicorp/consul/api/catalog.go
deleted file mode 100644
index 3fb0553..0000000
--- a/vendor/github.com/hashicorp/consul/api/catalog.go
+++ /dev/null
@@ -1,262 +0,0 @@
-package api
-
-import (
-	"net"
-	"strconv"
-)
-
-type Weights struct {
-	Passing int
-	Warning int
-}
-
-type Node struct {
-	ID              string
-	Node            string
-	Address         string
-	Datacenter      string
-	TaggedAddresses map[string]string
-	Meta            map[string]string
-	CreateIndex     uint64
-	ModifyIndex     uint64
-}
-
-type ServiceAddress struct {
-	Address string
-	Port    int
-}
-
-type CatalogService struct {
-	ID                       string
-	Node                     string
-	Address                  string
-	Datacenter               string
-	TaggedAddresses          map[string]string
-	NodeMeta                 map[string]string
-	ServiceID                string
-	ServiceName              string
-	ServiceAddress           string
-	ServiceTaggedAddresses   map[string]ServiceAddress
-	ServiceTags              []string
-	ServiceMeta              map[string]string
-	ServicePort              int
-	ServiceWeights           Weights
-	ServiceEnableTagOverride bool
-	ServiceProxy             *AgentServiceConnectProxyConfig
-	CreateIndex              uint64
-	Checks                   HealthChecks
-	ModifyIndex              uint64
-}
-
-type CatalogNode struct {
-	Node     *Node
-	Services map[string]*AgentService
-}
-
-type CatalogRegistration struct {
-	ID              string
-	Node            string
-	Address         string
-	TaggedAddresses map[string]string
-	NodeMeta        map[string]string
-	Datacenter      string
-	Service         *AgentService
-	Check           *AgentCheck
-	Checks          HealthChecks
-	SkipNodeUpdate  bool
-}
-
-type CatalogDeregistration struct {
-	Node       string
-	Address    string // Obsolete.
-	Datacenter string
-	ServiceID  string
-	CheckID    string
-}
-
-// Catalog can be used to query the Catalog endpoints
-type Catalog struct {
-	c *Client
-}
-
-// Catalog returns a handle to the catalog endpoints
-func (c *Client) Catalog() *Catalog {
-	return &Catalog{c}
-}
-
-func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
-	r := c.c.newRequest("PUT", "/v1/catalog/register")
-	r.setWriteOptions(q)
-	r.obj = reg
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	return wm, nil
-}
-
-func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
-	r := c.c.newRequest("PUT", "/v1/catalog/deregister")
-	r.setWriteOptions(q)
-	r.obj = dereg
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	return wm, nil
-}
-
-// Datacenters is used to query for all the known datacenters
-func (c *Catalog) Datacenters() ([]string, error) {
-	r := c.c.newRequest("GET", "/v1/catalog/datacenters")
-	_, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out []string
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Nodes is used to query all the known nodes
-func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
-	r := c.c.newRequest("GET", "/v1/catalog/nodes")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*Node
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Services is used to query for all known services
-func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
-	r := c.c.newRequest("GET", "/v1/catalog/services")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out map[string][]string
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Service is used to query catalog entries for a given service
-func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
-	var tags []string
-	if tag != "" {
-		tags = []string{tag}
-	}
-	return c.service(service, tags, q, false)
-}
-
-// Supports multiple tags for filtering
-func (c *Catalog) ServiceMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
-	return c.service(service, tags, q, false)
-}
-
-// Connect is used to query catalog entries for a given Connect-enabled service
-func (c *Catalog) Connect(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
-	var tags []string
-	if tag != "" {
-		tags = []string{tag}
-	}
-	return c.service(service, tags, q, true)
-}
-
-// Supports multiple tags for filtering
-func (c *Catalog) ConnectMultipleTags(service string, tags []string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
-	return c.service(service, tags, q, true)
-}
-
-func (c *Catalog) service(service string, tags []string, q *QueryOptions, connect bool) ([]*CatalogService, *QueryMeta, error) {
-	path := "/v1/catalog/service/" + service
-	if connect {
-		path = "/v1/catalog/connect/" + service
-	}
-	r := c.c.newRequest("GET", path)
-	r.setQueryOptions(q)
-	if len(tags) > 0 {
-		for _, tag := range tags {
-			r.params.Add("tag", tag)
-		}
-	}
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*CatalogService
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Node is used to query for service information about a single node
-func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
-	r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out *CatalogNode
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-func ParseServiceAddr(addrPort string) (ServiceAddress, error) {
-	port := 0
-	host, portStr, err := net.SplitHostPort(addrPort)
-	if err == nil {
-		port, err = strconv.Atoi(portStr)
-	}
-	return ServiceAddress{Address: host, Port: port}, err
-}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go
deleted file mode 100644
index 1588f2e..0000000
--- a/vendor/github.com/hashicorp/consul/api/config_entry.go
+++ /dev/null
@@ -1,319 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"encoding/json"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-
-	"github.com/mitchellh/mapstructure"
-)
-
-const (
-	ServiceDefaults string = "service-defaults"
-	ProxyDefaults   string = "proxy-defaults"
-	ServiceRouter   string = "service-router"
-	ServiceSplitter string = "service-splitter"
-	ServiceResolver string = "service-resolver"
-
-	ProxyConfigGlobal string = "global"
-)
-
-type ConfigEntry interface {
-	GetKind() string
-	GetName() string
-	GetCreateIndex() uint64
-	GetModifyIndex() uint64
-}
-
-type MeshGatewayMode string
-
-const (
-	// MeshGatewayModeDefault represents no specific mode and should
-	// be used to indicate that a different layer of the configuration
-	// chain should take precedence
-	MeshGatewayModeDefault MeshGatewayMode = ""
-
-	// MeshGatewayModeNone represents that the Upstream Connect connections
-	// should be direct and not flow through a mesh gateway.
-	MeshGatewayModeNone MeshGatewayMode = "none"
-
-	// MeshGatewayModeLocal represents that the Upstrea Connect connections
-	// should be made to a mesh gateway in the local datacenter. This is
-	MeshGatewayModeLocal MeshGatewayMode = "local"
-
-	// MeshGatewayModeRemote represents that the Upstream Connect connections
-	// should be made to a mesh gateway in a remote datacenter.
-	MeshGatewayModeRemote MeshGatewayMode = "remote"
-)
-
-// MeshGatewayConfig controls how Mesh Gateways are used for upstream Connect
-// services
-type MeshGatewayConfig struct {
-	// Mode is the mode that should be used for the upstream connection.
-	Mode MeshGatewayMode `json:",omitempty"`
-}
-
-type ServiceConfigEntry struct {
-	Kind        string
-	Name        string
-	Protocol    string            `json:",omitempty"`
-	MeshGateway MeshGatewayConfig `json:",omitempty"`
-	ExternalSNI string            `json:",omitempty"`
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-func (s *ServiceConfigEntry) GetKind() string {
-	return s.Kind
-}
-
-func (s *ServiceConfigEntry) GetName() string {
-	return s.Name
-}
-
-func (s *ServiceConfigEntry) GetCreateIndex() uint64 {
-	return s.CreateIndex
-}
-
-func (s *ServiceConfigEntry) GetModifyIndex() uint64 {
-	return s.ModifyIndex
-}
-
-type ProxyConfigEntry struct {
-	Kind        string
-	Name        string
-	Config      map[string]interface{} `json:",omitempty"`
-	MeshGateway MeshGatewayConfig      `json:",omitempty"`
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-func (p *ProxyConfigEntry) GetKind() string {
-	return p.Kind
-}
-
-func (p *ProxyConfigEntry) GetName() string {
-	return p.Name
-}
-
-func (p *ProxyConfigEntry) GetCreateIndex() uint64 {
-	return p.CreateIndex
-}
-
-func (p *ProxyConfigEntry) GetModifyIndex() uint64 {
-	return p.ModifyIndex
-}
-
-type rawEntryListResponse struct {
-	kind    string
-	Entries []map[string]interface{}
-}
-
-func makeConfigEntry(kind, name string) (ConfigEntry, error) {
-	switch kind {
-	case ServiceDefaults:
-		return &ServiceConfigEntry{Kind: kind, Name: name}, nil
-	case ProxyDefaults:
-		return &ProxyConfigEntry{Kind: kind, Name: name}, nil
-	case ServiceRouter:
-		return &ServiceRouterConfigEntry{Kind: kind, Name: name}, nil
-	case ServiceSplitter:
-		return &ServiceSplitterConfigEntry{Kind: kind, Name: name}, nil
-	case ServiceResolver:
-		return &ServiceResolverConfigEntry{Kind: kind, Name: name}, nil
-	default:
-		return nil, fmt.Errorf("invalid config entry kind: %s", kind)
-	}
-}
-
-func MakeConfigEntry(kind, name string) (ConfigEntry, error) {
-	return makeConfigEntry(kind, name)
-}
-
-// DecodeConfigEntry will decode the result of using json.Unmarshal of a config
-// entry into a map[string]interface{}.
-//
-// Important caveats:
-//
-// - This will NOT work if the map[string]interface{} was produced using HCL
-// decoding as that requires more extensive parsing to work around the issues
-// with map[string][]interface{} that arise.
-//
-// - This will only decode fields using their camel case json field
-// representations.
-func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) {
-	var entry ConfigEntry
-
-	kindVal, ok := raw["Kind"]
-	if !ok {
-		kindVal, ok = raw["kind"]
-	}
-	if !ok {
-		return nil, fmt.Errorf("Payload does not contain a kind/Kind key at the top level")
-	}
-
-	if kindStr, ok := kindVal.(string); ok {
-		newEntry, err := makeConfigEntry(kindStr, "")
-		if err != nil {
-			return nil, err
-		}
-		entry = newEntry
-	} else {
-		return nil, fmt.Errorf("Kind value in payload is not a string")
-	}
-
-	decodeConf := &mapstructure.DecoderConfig{
-		DecodeHook:       mapstructure.StringToTimeDurationHookFunc(),
-		Result:           &entry,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := mapstructure.NewDecoder(decodeConf)
-	if err != nil {
-		return nil, err
-	}
-
-	return entry, decoder.Decode(raw)
-}
-
-func DecodeConfigEntryFromJSON(data []byte) (ConfigEntry, error) {
-	var raw map[string]interface{}
-	if err := json.Unmarshal(data, &raw); err != nil {
-		return nil, err
-	}
-
-	return DecodeConfigEntry(raw)
-}
-
-func decodeConfigEntrySlice(raw []map[string]interface{}) ([]ConfigEntry, error) {
-	var entries []ConfigEntry
-	for _, rawEntry := range raw {
-		entry, err := DecodeConfigEntry(rawEntry)
-		if err != nil {
-			return nil, err
-		}
-		entries = append(entries, entry)
-	}
-	return entries, nil
-}
-
-// ConfigEntries can be used to query the Config endpoints
-type ConfigEntries struct {
-	c *Client
-}
-
-// Config returns a handle to the Config endpoints
-func (c *Client) ConfigEntries() *ConfigEntries {
-	return &ConfigEntries{c}
-}
-
-func (conf *ConfigEntries) Get(kind string, name string, q *QueryOptions) (ConfigEntry, *QueryMeta, error) {
-	if kind == "" || name == "" {
-		return nil, nil, fmt.Errorf("Both kind and name parameters must not be empty")
-	}
-
-	entry, err := makeConfigEntry(kind, name)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s/%s", kind, name))
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(conf.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if err := decodeBody(resp, entry); err != nil {
-		return nil, nil, err
-	}
-
-	return entry, qm, nil
-}
-
-func (conf *ConfigEntries) List(kind string, q *QueryOptions) ([]ConfigEntry, *QueryMeta, error) {
-	if kind == "" {
-		return nil, nil, fmt.Errorf("The kind parameter must not be empty")
-	}
-
-	r := conf.c.newRequest("GET", fmt.Sprintf("/v1/config/%s", kind))
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(conf.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var raw []map[string]interface{}
-	if err := decodeBody(resp, &raw); err != nil {
-		return nil, nil, err
-	}
-
-	entries, err := decodeConfigEntrySlice(raw)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	return entries, qm, nil
-}
-
-func (conf *ConfigEntries) Set(entry ConfigEntry, w *WriteOptions) (bool, *WriteMeta, error) {
-	return conf.set(entry, nil, w)
-}
-
-func (conf *ConfigEntries) CAS(entry ConfigEntry, index uint64, w *WriteOptions) (bool, *WriteMeta, error) {
-	return conf.set(entry, map[string]string{"cas": strconv.FormatUint(index, 10)}, w)
-}
-
-func (conf *ConfigEntries) set(entry ConfigEntry, params map[string]string, w *WriteOptions) (bool, *WriteMeta, error) {
-	r := conf.c.newRequest("PUT", "/v1/config")
-	r.setWriteOptions(w)
-	for param, value := range params {
-		r.params.Set(param, value)
-	}
-	r.obj = entry
-	rtt, resp, err := requireOK(conf.c.doRequest(r))
-	if err != nil {
-		return false, nil, err
-	}
-	defer resp.Body.Close()
-
-	var buf bytes.Buffer
-	if _, err := io.Copy(&buf, resp.Body); err != nil {
-		return false, nil, fmt.Errorf("Failed to read response: %v", err)
-	}
-	res := strings.Contains(buf.String(), "true")
-
-	wm := &WriteMeta{RequestTime: rtt}
-	return res, wm, nil
-}
-
-func (conf *ConfigEntries) Delete(kind string, name string, w *WriteOptions) (*WriteMeta, error) {
-	if kind == "" || name == "" {
-		return nil, fmt.Errorf("Both kind and name parameters must not be empty")
-	}
-
-	r := conf.c.newRequest("DELETE", fmt.Sprintf("/v1/config/%s/%s", kind, name))
-	r.setWriteOptions(w)
-	rtt, resp, err := requireOK(conf.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	resp.Body.Close()
-	wm := &WriteMeta{RequestTime: rtt}
-	return wm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
deleted file mode 100644
index 77acfbd..0000000
--- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go
+++ /dev/null
@@ -1,200 +0,0 @@
-package api
-
-import (
-	"encoding/json"
-	"time"
-)
-
-type ServiceRouterConfigEntry struct {
-	Kind string
-	Name string
-
-	Routes []ServiceRoute `json:",omitempty"`
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-func (e *ServiceRouterConfigEntry) GetKind() string        { return e.Kind }
-func (e *ServiceRouterConfigEntry) GetName() string        { return e.Name }
-func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
-func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
-
-type ServiceRoute struct {
-	Match       *ServiceRouteMatch       `json:",omitempty"`
-	Destination *ServiceRouteDestination `json:",omitempty"`
-}
-
-type ServiceRouteMatch struct {
-	HTTP *ServiceRouteHTTPMatch `json:",omitempty"`
-}
-
-type ServiceRouteHTTPMatch struct {
-	PathExact  string `json:",omitempty"`
-	PathPrefix string `json:",omitempty"`
-	PathRegex  string `json:",omitempty"`
-
-	Header     []ServiceRouteHTTPMatchHeader     `json:",omitempty"`
-	QueryParam []ServiceRouteHTTPMatchQueryParam `json:",omitempty"`
-	Methods    []string                          `json:",omitempty"`
-}
-
-type ServiceRouteHTTPMatchHeader struct {
-	Name    string
-	Present bool   `json:",omitempty"`
-	Exact   string `json:",omitempty"`
-	Prefix  string `json:",omitempty"`
-	Suffix  string `json:",omitempty"`
-	Regex   string `json:",omitempty"`
-	Invert  bool   `json:",omitempty"`
-}
-
-type ServiceRouteHTTPMatchQueryParam struct {
-	Name    string
-	Present bool   `json:",omitempty"`
-	Exact   string `json:",omitempty"`
-	Regex   string `json:",omitempty"`
-}
-
-type ServiceRouteDestination struct {
-	Service               string        `json:",omitempty"`
-	ServiceSubset         string        `json:",omitempty"`
-	Namespace             string        `json:",omitempty"`
-	PrefixRewrite         string        `json:",omitempty"`
-	RequestTimeout        time.Duration `json:",omitempty"`
-	NumRetries            uint32        `json:",omitempty"`
-	RetryOnConnectFailure bool          `json:",omitempty"`
-	RetryOnStatusCodes    []uint32      `json:",omitempty"`
-}
-
-func (e *ServiceRouteDestination) MarshalJSON() ([]byte, error) {
-	type Alias ServiceRouteDestination
-	exported := &struct {
-		RequestTimeout string `json:",omitempty"`
-		*Alias
-	}{
-		RequestTimeout: e.RequestTimeout.String(),
-		Alias:          (*Alias)(e),
-	}
-	if e.RequestTimeout == 0 {
-		exported.RequestTimeout = ""
-	}
-
-	return json.Marshal(exported)
-}
-
-func (e *ServiceRouteDestination) UnmarshalJSON(data []byte) error {
-	type Alias ServiceRouteDestination
-	aux := &struct {
-		RequestTimeout string
-		*Alias
-	}{
-		Alias: (*Alias)(e),
-	}
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-	var err error
-	if aux.RequestTimeout != "" {
-		if e.RequestTimeout, err = time.ParseDuration(aux.RequestTimeout); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-type ServiceSplitterConfigEntry struct {
-	Kind string
-	Name string
-
-	Splits []ServiceSplit `json:",omitempty"`
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-func (e *ServiceSplitterConfigEntry) GetKind() string        { return e.Kind }
-func (e *ServiceSplitterConfigEntry) GetName() string        { return e.Name }
-func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
-func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
-
-type ServiceSplit struct {
-	Weight        float32
-	Service       string `json:",omitempty"`
-	ServiceSubset string `json:",omitempty"`
-	Namespace     string `json:",omitempty"`
-}
-
-type ServiceResolverConfigEntry struct {
-	Kind string
-	Name string
-
-	DefaultSubset  string                             `json:",omitempty"`
-	Subsets        map[string]ServiceResolverSubset   `json:",omitempty"`
-	Redirect       *ServiceResolverRedirect           `json:",omitempty"`
-	Failover       map[string]ServiceResolverFailover `json:",omitempty"`
-	ConnectTimeout time.Duration                      `json:",omitempty"`
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-func (e *ServiceResolverConfigEntry) MarshalJSON() ([]byte, error) {
-	type Alias ServiceResolverConfigEntry
-	exported := &struct {
-		ConnectTimeout string `json:",omitempty"`
-		*Alias
-	}{
-		ConnectTimeout: e.ConnectTimeout.String(),
-		Alias:          (*Alias)(e),
-	}
-	if e.ConnectTimeout == 0 {
-		exported.ConnectTimeout = ""
-	}
-
-	return json.Marshal(exported)
-}
-
-func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error {
-	type Alias ServiceResolverConfigEntry
-	aux := &struct {
-		ConnectTimeout string
-		*Alias
-	}{
-		Alias: (*Alias)(e),
-	}
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-	var err error
-	if aux.ConnectTimeout != "" {
-		if e.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (e *ServiceResolverConfigEntry) GetKind() string        { return e.Kind }
-func (e *ServiceResolverConfigEntry) GetName() string        { return e.Name }
-func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex }
-func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex }
-
-type ServiceResolverSubset struct {
-	Filter      string `json:",omitempty"`
-	OnlyPassing bool   `json:",omitempty"`
-}
-
-type ServiceResolverRedirect struct {
-	Service       string `json:",omitempty"`
-	ServiceSubset string `json:",omitempty"`
-	Namespace     string `json:",omitempty"`
-	Datacenter    string `json:",omitempty"`
-}
-
-type ServiceResolverFailover struct {
-	Service       string   `json:",omitempty"`
-	ServiceSubset string   `json:",omitempty"`
-	Namespace     string   `json:",omitempty"`
-	Datacenters   []string `json:",omitempty"`
-}
diff --git a/vendor/github.com/hashicorp/consul/api/connect.go b/vendor/github.com/hashicorp/consul/api/connect.go
deleted file mode 100644
index a40d1e2..0000000
--- a/vendor/github.com/hashicorp/consul/api/connect.go
+++ /dev/null
@@ -1,12 +0,0 @@
-package api
-
-// Connect can be used to work with endpoints related to Connect, the
-// feature for securely connecting services within Consul.
-type Connect struct {
-	c *Client
-}
-
-// Connect returns a handle to the connect-related endpoints
-func (c *Client) Connect() *Connect {
-	return &Connect{c}
-}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_ca.go b/vendor/github.com/hashicorp/consul/api/connect_ca.go
deleted file mode 100644
index 600a3e0..0000000
--- a/vendor/github.com/hashicorp/consul/api/connect_ca.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package api
-
-import (
-	"fmt"
-	"time"
-
-	"github.com/mitchellh/mapstructure"
-)
-
-// CAConfig is the structure for the Connect CA configuration.
-type CAConfig struct {
-	// Provider is the CA provider implementation to use.
-	Provider string
-
-	// Configuration is arbitrary configuration for the provider. This
-	// should only contain primitive values and containers (such as lists
-	// and maps).
-	Config map[string]interface{}
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// CommonCAProviderConfig is the common options available to all CA providers.
-type CommonCAProviderConfig struct {
-	LeafCertTTL      time.Duration
-	SkipValidate     bool
-	CSRMaxPerSecond  float32
-	CSRMaxConcurrent int
-}
-
-// ConsulCAProviderConfig is the config for the built-in Consul CA provider.
-type ConsulCAProviderConfig struct {
-	CommonCAProviderConfig `mapstructure:",squash"`
-
-	PrivateKey     string
-	RootCert       string
-	RotationPeriod time.Duration
-}
-
-// ParseConsulCAConfig takes a raw config map and returns a parsed
-// ConsulCAProviderConfig.
-func ParseConsulCAConfig(raw map[string]interface{}) (*ConsulCAProviderConfig, error) {
-	var config ConsulCAProviderConfig
-	decodeConf := &mapstructure.DecoderConfig{
-		DecodeHook:       mapstructure.StringToTimeDurationHookFunc(),
-		Result:           &config,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := mapstructure.NewDecoder(decodeConf)
-	if err != nil {
-		return nil, err
-	}
-
-	if err := decoder.Decode(raw); err != nil {
-		return nil, fmt.Errorf("error decoding config: %s", err)
-	}
-
-	return &config, nil
-}
-
-// CARootList is the structure for the results of listing roots.
-type CARootList struct {
-	ActiveRootID string
-	TrustDomain  string
-	Roots        []*CARoot
-}
-
-// CARoot represents a root CA certificate that is trusted.
-type CARoot struct {
-	// ID is a globally unique ID (UUID) representing this CA root.
-	ID string
-
-	// Name is a human-friendly name for this CA root. This value is
-	// opaque to Consul and is not used for anything internally.
-	Name string
-
-	// RootCertPEM is the PEM-encoded public certificate.
-	RootCertPEM string `json:"RootCert"`
-
-	// Active is true if this is the current active CA. This must only
-	// be true for exactly one CA. For any method that modifies roots in the
-	// state store, tests should be written to verify that multiple roots
-	// cannot be active.
-	Active bool
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// LeafCert is a certificate that has been issued by a Connect CA.
-type LeafCert struct {
-	// SerialNumber is the unique serial number for this certificate.
-	// This is encoded in standard hex separated by :.
-	SerialNumber string
-
-	// CertPEM and PrivateKeyPEM are the PEM-encoded certificate and private
-	// key for that cert, respectively. This should not be stored in the
-	// state store, but is present in the sign API response.
-	CertPEM       string `json:",omitempty"`
-	PrivateKeyPEM string `json:",omitempty"`
-
-	// Service is the name of the service for which the cert was issued.
-	// ServiceURI is the cert URI value.
-	Service    string
-	ServiceURI string
-
-	// ValidAfter and ValidBefore are the validity periods for the
-	// certificate.
-	ValidAfter  time.Time
-	ValidBefore time.Time
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// CARoots queries the list of available roots.
-func (h *Connect) CARoots(q *QueryOptions) (*CARootList, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/ca/roots")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out CARootList
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
-// CAGetConfig returns the current CA configuration.
-func (h *Connect) CAGetConfig(q *QueryOptions) (*CAConfig, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/ca/configuration")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out CAConfig
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
-// CASetConfig sets the current CA configuration.
-func (h *Connect) CASetConfig(conf *CAConfig, q *WriteOptions) (*WriteMeta, error) {
-	r := h.c.newRequest("PUT", "/v1/connect/ca/configuration")
-	r.setWriteOptions(q)
-	r.obj = conf
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-	return wm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go
deleted file mode 100644
index d25cb84..0000000
--- a/vendor/github.com/hashicorp/consul/api/connect_intention.go
+++ /dev/null
@@ -1,309 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"time"
-)
-
-// Intention defines an intention for the Connect Service Graph. This defines
-// the allowed or denied behavior of a connection between two services using
-// Connect.
-type Intention struct {
-	// ID is the UUID-based ID for the intention, always generated by Consul.
-	ID string
-
-	// Description is a human-friendly description of this intention.
-	// It is opaque to Consul and is only stored and transferred in API
-	// requests.
-	Description string
-
-	// SourceNS, SourceName are the namespace and name, respectively, of
-	// the source service. Either of these may be the wildcard "*", but only
-	// the full value can be a wildcard. Partial wildcards are not allowed.
-	// The source may also be a non-Consul service, as specified by SourceType.
-	//
-	// DestinationNS, DestinationName is the same, but for the destination
-	// service. The same rules apply. The destination is always a Consul
-	// service.
-	SourceNS, SourceName           string
-	DestinationNS, DestinationName string
-
-	// SourceType is the type of the value for the source.
-	SourceType IntentionSourceType
-
-	// Action is whether this is a whitelist or blacklist intention.
-	Action IntentionAction
-
-	// DefaultAddr, DefaultPort of the local listening proxy (if any) to
-	// make this connection.
-	DefaultAddr string
-	DefaultPort int
-
-	// Meta is arbitrary metadata associated with the intention. This is
-	// opaque to Consul but is served in API responses.
-	Meta map[string]string
-
-	// Precedence is the order that the intention will be applied, with
-	// larger numbers being applied first. This is a read-only field, on
-	// any intention update it is updated.
-	Precedence int
-
-	// CreatedAt and UpdatedAt keep track of when this record was created
-	// or modified.
-	CreatedAt, UpdatedAt time.Time
-
-	// Hash of the contents of the intention
-	//
-	// This is needed mainly for replication purposes. When replicating from
-	// one DC to another keeping the content Hash will allow us to detect
-	// content changes more efficiently than checking every single field
-	Hash []byte
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// String returns human-friendly output describing ths intention.
-func (i *Intention) String() string {
-	return fmt.Sprintf("%s => %s (%s)",
-		i.SourceString(),
-		i.DestinationString(),
-		i.Action)
-}
-
-// SourceString returns the namespace/name format for the source, or
-// just "name" if the namespace is the default namespace.
-func (i *Intention) SourceString() string {
-	return i.partString(i.SourceNS, i.SourceName)
-}
-
-// DestinationString returns the namespace/name format for the source, or
-// just "name" if the namespace is the default namespace.
-func (i *Intention) DestinationString() string {
-	return i.partString(i.DestinationNS, i.DestinationName)
-}
-
-func (i *Intention) partString(ns, n string) string {
-	// For now we omit the default namespace from the output. In the future
-	// we might want to look at this and show this in a multi-namespace world.
-	if ns != "" && ns != IntentionDefaultNamespace {
-		n = ns + "/" + n
-	}
-
-	return n
-}
-
-// IntentionDefaultNamespace is the default namespace value.
-const IntentionDefaultNamespace = "default"
-
-// IntentionAction is the action that the intention represents. This
-// can be "allow" or "deny" to whitelist or blacklist intentions.
-type IntentionAction string
-
-const (
-	IntentionActionAllow IntentionAction = "allow"
-	IntentionActionDeny  IntentionAction = "deny"
-)
-
-// IntentionSourceType is the type of the source within an intention.
-type IntentionSourceType string
-
-const (
-	// IntentionSourceConsul is a service within the Consul catalog.
-	IntentionSourceConsul IntentionSourceType = "consul"
-)
-
-// IntentionMatch are the arguments for the intention match API.
-type IntentionMatch struct {
-	By    IntentionMatchType
-	Names []string
-}
-
-// IntentionMatchType is the target for a match request. For example,
-// matching by source will look for all intentions that match the given
-// source value.
-type IntentionMatchType string
-
-const (
-	IntentionMatchSource      IntentionMatchType = "source"
-	IntentionMatchDestination IntentionMatchType = "destination"
-)
-
-// IntentionCheck are the arguments for the intention check API. For
-// more documentation see the IntentionCheck function.
-type IntentionCheck struct {
-	// Source and Destination are the source and destination values to
-	// check. The destination is always a Consul service, but the source
-	// may be other values as defined by the SourceType.
-	Source, Destination string
-
-	// SourceType is the type of the value for the source.
-	SourceType IntentionSourceType
-}
-
-// Intentions returns the list of intentions.
-func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/intentions")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*Intention
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// IntentionGet retrieves a single intention.
-func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/intentions/"+id)
-	r.setQueryOptions(q)
-	rtt, resp, err := h.c.doRequest(r)
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if resp.StatusCode == 404 {
-		return nil, qm, nil
-	} else if resp.StatusCode != 200 {
-		var buf bytes.Buffer
-		io.Copy(&buf, resp.Body)
-		return nil, nil, fmt.Errorf(
-			"Unexpected response %d: %s", resp.StatusCode, buf.String())
-	}
-
-	var out Intention
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return &out, qm, nil
-}
-
-// IntentionDelete deletes a single intention.
-func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) {
-	r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &WriteMeta{}
-	qm.RequestTime = rtt
-
-	return qm, nil
-}
-
-// IntentionMatch returns the list of intentions that match a given source
-// or destination. The returned intentions are ordered by precedence where
-// result[0] is the highest precedence (if that matches, then that rule overrides
-// all other rules).
-//
-// Matching can be done for multiple names at the same time. The resulting
-// map is keyed by the given names. Casing is preserved.
-func (h *Connect) IntentionMatch(args *IntentionMatch, q *QueryOptions) (map[string][]*Intention, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/intentions/match")
-	r.setQueryOptions(q)
-	r.params.Set("by", string(args.By))
-	for _, name := range args.Names {
-		r.params.Add("name", name)
-	}
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out map[string][]*Intention
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// IntentionCheck returns whether a given source/destination would be allowed
-// or not given the current set of intentions and the configuration of Consul.
-func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/connect/intentions/check")
-	r.setQueryOptions(q)
-	r.params.Set("source", args.Source)
-	r.params.Set("destination", args.Destination)
-	if args.SourceType != "" {
-		r.params.Set("source-type", string(args.SourceType))
-	}
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return false, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out struct{ Allowed bool }
-	if err := decodeBody(resp, &out); err != nil {
-		return false, nil, err
-	}
-	return out.Allowed, qm, nil
-}
-
-// IntentionCreate will create a new intention. The ID in the given
-// structure must be empty and a generate ID will be returned on
-// success.
-func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) {
-	r := c.c.newRequest("POST", "/v1/connect/intentions")
-	r.setWriteOptions(q)
-	r.obj = ixn
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// IntentionUpdate will update an existing intention. The ID in the given
-// structure must be non-empty.
-func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) {
-	r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID)
-	r.setWriteOptions(q)
-	r.obj = ixn
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-	return wm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/coordinate.go b/vendor/github.com/hashicorp/consul/api/coordinate.go
deleted file mode 100644
index 53318f1..0000000
--- a/vendor/github.com/hashicorp/consul/api/coordinate.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package api
-
-import (
-	"github.com/hashicorp/serf/coordinate"
-)
-
-// CoordinateEntry represents a node and its associated network coordinate.
-type CoordinateEntry struct {
-	Node    string
-	Segment string
-	Coord   *coordinate.Coordinate
-}
-
-// CoordinateDatacenterMap has the coordinates for servers in a given datacenter
-// and area. Network coordinates are only compatible within the same area.
-type CoordinateDatacenterMap struct {
-	Datacenter  string
-	AreaID      string
-	Coordinates []CoordinateEntry
-}
-
-// Coordinate can be used to query the coordinate endpoints
-type Coordinate struct {
-	c *Client
-}
-
-// Coordinate returns a handle to the coordinate endpoints
-func (c *Client) Coordinate() *Coordinate {
-	return &Coordinate{c}
-}
-
-// Datacenters is used to return the coordinates of all the servers in the WAN
-// pool.
-func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
-	r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
-	_, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out []*CoordinateDatacenterMap
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// Nodes is used to return the coordinates of all the nodes in the LAN pool.
-func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
-	r := c.c.newRequest("GET", "/v1/coordinate/nodes")
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*CoordinateEntry
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Update inserts or updates the LAN coordinate of a node.
-func (c *Coordinate) Update(coord *CoordinateEntry, q *WriteOptions) (*WriteMeta, error) {
-	r := c.c.newRequest("PUT", "/v1/coordinate/update")
-	r.setWriteOptions(q)
-	r.obj = coord
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	return wm, nil
-}
-
-// Node is used to return the coordinates of a single in the LAN pool.
-func (c *Coordinate) Node(node string, q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
-	r := c.c.newRequest("GET", "/v1/coordinate/node/"+node)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*CoordinateEntry
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/debug.go b/vendor/github.com/hashicorp/consul/api/debug.go
deleted file mode 100644
index 2380468..0000000
--- a/vendor/github.com/hashicorp/consul/api/debug.go
+++ /dev/null
@@ -1,106 +0,0 @@
-package api
-
-import (
-	"fmt"
-	"io/ioutil"
-	"strconv"
-)
-
-// Debug can be used to query the /debug/pprof endpoints to gather
-// profiling information about the target agent.Debug
-//
-// The agent must have enable_debug set to true for profiling to be enabled
-// and for these endpoints to function.
-type Debug struct {
-	c *Client
-}
-
-// Debug returns a handle that exposes the internal debug endpoints.
-func (c *Client) Debug() *Debug {
-	return &Debug{c}
-}
-
-// Heap returns a pprof heap dump
-func (d *Debug) Heap() ([]byte, error) {
-	r := d.c.newRequest("GET", "/debug/pprof/heap")
-	_, resp, err := d.c.doRequest(r)
-	if err != nil {
-		return nil, fmt.Errorf("error making request: %s", err)
-	}
-	defer resp.Body.Close()
-
-	// We return a raw response because we're just passing through a response
-	// from the pprof handlers
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding body: %s", err)
-	}
-
-	return body, nil
-}
-
-// Profile returns a pprof CPU profile for the specified number of seconds
-func (d *Debug) Profile(seconds int) ([]byte, error) {
-	r := d.c.newRequest("GET", "/debug/pprof/profile")
-
-	// Capture a profile for the specified number of seconds
-	r.params.Set("seconds", strconv.Itoa(seconds))
-
-	_, resp, err := d.c.doRequest(r)
-	if err != nil {
-		return nil, fmt.Errorf("error making request: %s", err)
-	}
-	defer resp.Body.Close()
-
-	// We return a raw response because we're just passing through a response
-	// from the pprof handlers
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding body: %s", err)
-	}
-
-	return body, nil
-}
-
-// Trace returns an execution trace
-func (d *Debug) Trace(seconds int) ([]byte, error) {
-	r := d.c.newRequest("GET", "/debug/pprof/trace")
-
-	// Capture a trace for the specified number of seconds
-	r.params.Set("seconds", strconv.Itoa(seconds))
-
-	_, resp, err := d.c.doRequest(r)
-	if err != nil {
-		return nil, fmt.Errorf("error making request: %s", err)
-	}
-	defer resp.Body.Close()
-
-	// We return a raw response because we're just passing through a response
-	// from the pprof handlers
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding body: %s", err)
-	}
-
-	return body, nil
-}
-
-// Goroutine returns a pprof goroutine profile
-func (d *Debug) Goroutine() ([]byte, error) {
-	r := d.c.newRequest("GET", "/debug/pprof/goroutine")
-
-	_, resp, err := d.c.doRequest(r)
-	if err != nil {
-		return nil, fmt.Errorf("error making request: %s", err)
-	}
-	defer resp.Body.Close()
-
-	// We return a raw response because we're just passing through a response
-	// from the pprof handlers
-	body, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return nil, fmt.Errorf("error decoding body: %s", err)
-	}
-
-	return body, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go
deleted file mode 100644
index 407a3b0..0000000
--- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package api
-
-import (
-	"encoding/json"
-	"fmt"
-	"time"
-)
-
-// DiscoveryChain can be used to query the discovery-chain endpoints
-type DiscoveryChain struct {
-	c *Client
-}
-
-// DiscoveryChain returns a handle to the discovery-chain endpoints
-func (c *Client) DiscoveryChain() *DiscoveryChain {
-	return &DiscoveryChain{c}
-}
-
-func (d *DiscoveryChain) Get(name string, opts *DiscoveryChainOptions, q *QueryOptions) (*DiscoveryChainResponse, *QueryMeta, error) {
-	if name == "" {
-		return nil, nil, fmt.Errorf("Name parameter must not be empty")
-	}
-
-	method := "GET"
-	if opts != nil && opts.requiresPOST() {
-		method = "POST"
-	}
-
-	r := d.c.newRequest(method, fmt.Sprintf("/v1/discovery-chain/%s", name))
-	r.setQueryOptions(q)
-
-	if opts != nil {
-		if opts.EvaluateInDatacenter != "" {
-			r.params.Set("compile-dc", opts.EvaluateInDatacenter)
-		}
-		// TODO(namespaces): handle possible EvaluateInNamespace here
-	}
-
-	if method == "POST" {
-		r.obj = opts
-	}
-
-	rtt, resp, err := requireOK(d.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out DiscoveryChainResponse
-
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-
-	return &out, qm, nil
-}
-
-type DiscoveryChainOptions struct {
-	EvaluateInDatacenter string `json:"-"`
-
-	// OverrideMeshGateway allows for the mesh gateway setting to be overridden
-	// for any resolver in the compiled chain.
-	OverrideMeshGateway MeshGatewayConfig `json:",omitempty"`
-
-	// OverrideProtocol allows for the final protocol for the chain to be
-	// altered.
-	//
-	// - If the chain ordinarily would be TCP and an L7 protocol is passed here
-	// the chain will not include Routers or Splitters.
-	//
-	// - If the chain ordinarily would be L7 and TCP is passed here the chain
-	// will not include Routers or Splitters.
-	OverrideProtocol string `json:",omitempty"`
-
-	// OverrideConnectTimeout allows for the ConnectTimeout setting to be
-	// overridden for any resolver in the compiled chain.
-	OverrideConnectTimeout time.Duration `json:",omitempty"`
-}
-
-func (o *DiscoveryChainOptions) requiresPOST() bool {
-	if o == nil {
-		return false
-	}
-	return o.OverrideMeshGateway.Mode != "" ||
-		o.OverrideProtocol != "" ||
-		o.OverrideConnectTimeout != 0
-}
-
-type DiscoveryChainResponse struct {
-	Chain *CompiledDiscoveryChain
-}
-
-type CompiledDiscoveryChain struct {
-	ServiceName string
-	Namespace   string
-	Datacenter  string
-
-	// CustomizationHash is a unique hash of any data that affects the
-	// compilation of the discovery chain other than config entries or the
-	// name/namespace/datacenter evaluation criteria.
-	//
-	// If set, this value should be used to prefix/suffix any generated load
-	// balancer data plane objects to avoid sharing customized and
-	// non-customized versions.
-	CustomizationHash string
-
-	// Protocol is the overall protocol shared by everything in the chain.
-	Protocol string
-
-	// StartNode is the first key into the Nodes map that should be followed
-	// when walking the discovery chain.
-	StartNode string
-
-	// Nodes contains all nodes available for traversal in the chain keyed by a
-	// unique name.  You can walk this by starting with StartNode.
-	//
-	// NOTE: The names should be treated as opaque values and are only
-	// guaranteed to be consistent within a single compilation.
-	Nodes map[string]*DiscoveryGraphNode
-
-	// Targets is a list of all targets used in this chain.
-	//
-	// NOTE: The names should be treated as opaque values and are only
-	// guaranteed to be consistent within a single compilation.
-	Targets map[string]*DiscoveryTarget
-}
-
-const (
-	DiscoveryGraphNodeTypeRouter   = "router"
-	DiscoveryGraphNodeTypeSplitter = "splitter"
-	DiscoveryGraphNodeTypeResolver = "resolver"
-)
-
-// DiscoveryGraphNode is a single node in the compiled discovery chain.
-type DiscoveryGraphNode struct {
-	Type string
-	Name string // this is NOT necessarily a service
-
-	// fields for Type==router
-	Routes []*DiscoveryRoute
-
-	// fields for Type==splitter
-	Splits []*DiscoverySplit
-
-	// fields for Type==resolver
-	Resolver *DiscoveryResolver
-}
-
-// compiled form of ServiceRoute
-type DiscoveryRoute struct {
-	Definition *ServiceRoute
-	NextNode   string
-}
-
-// compiled form of ServiceSplit
-type DiscoverySplit struct {
-	Weight   float32
-	NextNode string
-}
-
-// compiled form of ServiceResolverConfigEntry
-type DiscoveryResolver struct {
-	Default        bool
-	ConnectTimeout time.Duration
-	Target         string
-	Failover       *DiscoveryFailover
-}
-
-func (r *DiscoveryResolver) MarshalJSON() ([]byte, error) {
-	type Alias DiscoveryResolver
-	exported := &struct {
-		ConnectTimeout string `json:",omitempty"`
-		*Alias
-	}{
-		ConnectTimeout: r.ConnectTimeout.String(),
-		Alias:          (*Alias)(r),
-	}
-	if r.ConnectTimeout == 0 {
-		exported.ConnectTimeout = ""
-	}
-
-	return json.Marshal(exported)
-}
-
-func (r *DiscoveryResolver) UnmarshalJSON(data []byte) error {
-	type Alias DiscoveryResolver
-	aux := &struct {
-		ConnectTimeout string
-		*Alias
-	}{
-		Alias: (*Alias)(r),
-	}
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-	var err error
-	if aux.ConnectTimeout != "" {
-		if r.ConnectTimeout, err = time.ParseDuration(aux.ConnectTimeout); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-// compiled form of ServiceResolverFailover
-type DiscoveryFailover struct {
-	Targets []string
-}
-
-// DiscoveryTarget represents all of the inputs necessary to use a resolver
-// config entry to execute a catalog query to generate a list of service
-// instances during discovery.
-type DiscoveryTarget struct {
-	ID string
-
-	Service       string
-	ServiceSubset string
-	Namespace     string
-	Datacenter    string
-
-	MeshGateway MeshGatewayConfig
-	Subset      ServiceResolverSubset
-	External    bool
-	SNI         string
-	Name        string
-}
diff --git a/vendor/github.com/hashicorp/consul/api/event.go b/vendor/github.com/hashicorp/consul/api/event.go
deleted file mode 100644
index 85b5b06..0000000
--- a/vendor/github.com/hashicorp/consul/api/event.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"strconv"
-)
-
-// Event can be used to query the Event endpoints
-type Event struct {
-	c *Client
-}
-
-// UserEvent represents an event that was fired by the user
-type UserEvent struct {
-	ID            string
-	Name          string
-	Payload       []byte
-	NodeFilter    string
-	ServiceFilter string
-	TagFilter     string
-	Version       int
-	LTime         uint64
-}
-
-// Event returns a handle to the event endpoints
-func (c *Client) Event() *Event {
-	return &Event{c}
-}
-
-// Fire is used to fire a new user event. Only the Name, Payload and Filters
-// are respected. This returns the ID or an associated error. Cross DC requests
-// are supported.
-func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
-	r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
-	r.setWriteOptions(q)
-	if params.NodeFilter != "" {
-		r.params.Set("node", params.NodeFilter)
-	}
-	if params.ServiceFilter != "" {
-		r.params.Set("service", params.ServiceFilter)
-	}
-	if params.TagFilter != "" {
-		r.params.Set("tag", params.TagFilter)
-	}
-	if params.Payload != nil {
-		r.body = bytes.NewReader(params.Payload)
-	}
-
-	rtt, resp, err := requireOK(e.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-	var out UserEvent
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// List is used to get the most recent events an agent has received.
-// This list can be optionally filtered by the name. This endpoint supports
-// quasi-blocking queries. The index is not monotonic, nor does it provide provide
-// LastContact or KnownLeader.
-func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
-	r := e.c.newRequest("GET", "/v1/event/list")
-	r.setQueryOptions(q)
-	if name != "" {
-		r.params.Set("name", name)
-	}
-	rtt, resp, err := requireOK(e.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var entries []*UserEvent
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// IDToIndex is a bit of a hack. This simulates the index generation to
-// convert an event ID into a WaitIndex.
-func (e *Event) IDToIndex(uuid string) uint64 {
-	lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
-	upper := uuid[19:23] + uuid[24:36]
-	lowVal, err := strconv.ParseUint(lower, 16, 64)
-	if err != nil {
-		panic("Failed to convert " + lower)
-	}
-	highVal, err := strconv.ParseUint(upper, 16, 64)
-	if err != nil {
-		panic("Failed to convert " + upper)
-	}
-	return lowVal ^ highVal
-}
diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod
deleted file mode 100644
index 78fe8a3..0000000
--- a/vendor/github.com/hashicorp/consul/api/go.mod
+++ /dev/null
@@ -1,16 +0,0 @@
-module github.com/hashicorp/consul/api
-
-go 1.12
-
-replace github.com/hashicorp/consul/sdk => ../sdk
-
-require (
-	github.com/hashicorp/consul/sdk v0.2.0
-	github.com/hashicorp/go-cleanhttp v0.5.1
-	github.com/hashicorp/go-rootcerts v1.0.0
-	github.com/hashicorp/go-uuid v1.0.1
-	github.com/hashicorp/serf v0.8.2
-	github.com/mitchellh/mapstructure v1.1.2
-	github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c
-	github.com/stretchr/testify v1.3.0
-)
diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum
deleted file mode 100644
index 01591f9..0000000
--- a/vendor/github.com/hashicorp/consul/api/go.sum
+++ /dev/null
@@ -1,78 +0,0 @@
-github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I=
-github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
-github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
-github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
-github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
-github.com/hashicorp/consul/sdk v0.2.0 h1:GWFYFmry/k4b1hEoy7kSkmU8e30GAyI4VZHk0fRxeL4=
-github.com/hashicorp/consul/sdk v0.2.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
-github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
-github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
-github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0=
-github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
-github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
-github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
-github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o=
-github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI=
-github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
-github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
-github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
-github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
-github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90=
-github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
-github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
-github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
-github.com/hashicorp/memberlist v0.1.3 h1:EmmoJme1matNzb+hMpDuR/0sbJSUisxyqBGG676r31M=
-github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
-github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0=
-github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
-github.com/miekg/dns v1.0.14 h1:9jZdLNd/P4+SfEJ0TNyxYpsK8N4GtfylBLqtbYN1sbA=
-github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
-github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
-github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0=
-github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
-github.com/mitchellh/go-testing-interface v1.0.0 h1:fzU/JVNcaqHQEcVFAKeR41fkiLdIPrefOvVG1VZ96U0=
-github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
-github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
-github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
-github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
-github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c h1:Lgl0gzECD8GnQ5QCWA8o6BtfL6mDH5rQgM4/fX3avOs=
-github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
-github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
-github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
-github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
-github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3 h1:KYQXGkl6vs02hK7pK4eIbw0NpNPedieTSTEiJ//bwGs=
-golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc h1:a3CU5tJYVj92DY2LaA1kUkrsqD5/3mLDhx2NcNqyW+0=
-golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw=
-golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5 h1:x6r4Jo0KNzOOzYd8lbcRsqjuqEASK6ob3auvWYM4/8U=
-golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/hashicorp/consul/api/health.go b/vendor/github.com/hashicorp/consul/api/health.go
deleted file mode 100644
index 9faf6b6..0000000
--- a/vendor/github.com/hashicorp/consul/api/health.go
+++ /dev/null
@@ -1,330 +0,0 @@
-package api
-
-import (
-	"encoding/json"
-	"fmt"
-	"strings"
-	"time"
-)
-
-const (
-	// HealthAny is special, and is used as a wild card,
-	// not as a specific state.
-	HealthAny      = "any"
-	HealthPassing  = "passing"
-	HealthWarning  = "warning"
-	HealthCritical = "critical"
-	HealthMaint    = "maintenance"
-)
-
-const (
-	// NodeMaint is the special key set by a node in maintenance mode.
-	NodeMaint = "_node_maintenance"
-
-	// ServiceMaintPrefix is the prefix for a service in maintenance mode.
-	ServiceMaintPrefix = "_service_maintenance:"
-)
-
-// HealthCheck is used to represent a single check
-type HealthCheck struct {
-	Node        string
-	CheckID     string
-	Name        string
-	Status      string
-	Notes       string
-	Output      string
-	ServiceID   string
-	ServiceName string
-	ServiceTags []string
-
-	Definition HealthCheckDefinition
-
-	CreateIndex uint64
-	ModifyIndex uint64
-}
-
-// HealthCheckDefinition is used to store the details about
-// a health check's execution.
-type HealthCheckDefinition struct {
-	HTTP                                   string
-	Header                                 map[string][]string
-	Method                                 string
-	TLSSkipVerify                          bool
-	TCP                                    string
-	IntervalDuration                       time.Duration `json:"-"`
-	TimeoutDuration                        time.Duration `json:"-"`
-	DeregisterCriticalServiceAfterDuration time.Duration `json:"-"`
-
-	// DEPRECATED in Consul 1.4.1. Use the above time.Duration fields instead.
-	Interval                       ReadableDuration
-	Timeout                        ReadableDuration
-	DeregisterCriticalServiceAfter ReadableDuration
-}
-
-func (d *HealthCheckDefinition) MarshalJSON() ([]byte, error) {
-	type Alias HealthCheckDefinition
-	out := &struct {
-		Interval                       string
-		Timeout                        string
-		DeregisterCriticalServiceAfter string
-		*Alias
-	}{
-		Interval:                       d.Interval.String(),
-		Timeout:                        d.Timeout.String(),
-		DeregisterCriticalServiceAfter: d.DeregisterCriticalServiceAfter.String(),
-		Alias:                          (*Alias)(d),
-	}
-
-	if d.IntervalDuration != 0 {
-		out.Interval = d.IntervalDuration.String()
-	} else if d.Interval != 0 {
-		out.Interval = d.Interval.String()
-	}
-	if d.TimeoutDuration != 0 {
-		out.Timeout = d.TimeoutDuration.String()
-	} else if d.Timeout != 0 {
-		out.Timeout = d.Timeout.String()
-	}
-	if d.DeregisterCriticalServiceAfterDuration != 0 {
-		out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfterDuration.String()
-	} else if d.DeregisterCriticalServiceAfter != 0 {
-		out.DeregisterCriticalServiceAfter = d.DeregisterCriticalServiceAfter.String()
-	}
-
-	return json.Marshal(out)
-}
-
-func (d *HealthCheckDefinition) UnmarshalJSON(data []byte) error {
-	type Alias HealthCheckDefinition
-	aux := &struct {
-		Interval                       string
-		Timeout                        string
-		DeregisterCriticalServiceAfter string
-		*Alias
-	}{
-		Alias: (*Alias)(d),
-	}
-	if err := json.Unmarshal(data, &aux); err != nil {
-		return err
-	}
-
-	// Parse the values into both the time.Duration and old ReadableDuration fields.
-	var err error
-	if aux.Interval != "" {
-		if d.IntervalDuration, err = time.ParseDuration(aux.Interval); err != nil {
-			return err
-		}
-		d.Interval = ReadableDuration(d.IntervalDuration)
-	}
-	if aux.Timeout != "" {
-		if d.TimeoutDuration, err = time.ParseDuration(aux.Timeout); err != nil {
-			return err
-		}
-		d.Timeout = ReadableDuration(d.TimeoutDuration)
-	}
-	if aux.DeregisterCriticalServiceAfter != "" {
-		if d.DeregisterCriticalServiceAfterDuration, err = time.ParseDuration(aux.DeregisterCriticalServiceAfter); err != nil {
-			return err
-		}
-		d.DeregisterCriticalServiceAfter = ReadableDuration(d.DeregisterCriticalServiceAfterDuration)
-	}
-	return nil
-}
-
-// HealthChecks is a collection of HealthCheck structs.
-type HealthChecks []*HealthCheck
-
-// AggregatedStatus returns the "best" status for the list of health checks.
-// Because a given entry may have many service and node-level health checks
-// attached, this function determines the best representative of the status as
-// as single string using the following heuristic:
-//
-//  maintenance > critical > warning > passing
-//
-func (c HealthChecks) AggregatedStatus() string {
-	var passing, warning, critical, maintenance bool
-	for _, check := range c {
-		id := string(check.CheckID)
-		if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
-			maintenance = true
-			continue
-		}
-
-		switch check.Status {
-		case HealthPassing:
-			passing = true
-		case HealthWarning:
-			warning = true
-		case HealthCritical:
-			critical = true
-		default:
-			return ""
-		}
-	}
-
-	switch {
-	case maintenance:
-		return HealthMaint
-	case critical:
-		return HealthCritical
-	case warning:
-		return HealthWarning
-	case passing:
-		return HealthPassing
-	default:
-		return HealthPassing
-	}
-}
-
-// ServiceEntry is used for the health service endpoint
-type ServiceEntry struct {
-	Node    *Node
-	Service *AgentService
-	Checks  HealthChecks
-}
-
-// Health can be used to query the Health endpoints
-type Health struct {
-	c *Client
-}
-
-// Health returns a handle to the health endpoints
-func (c *Client) Health() *Health {
-	return &Health{c}
-}
-
-// Node is used to query for checks belonging to a given node
-func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/health/node/"+node)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out HealthChecks
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Checks is used to return the checks associated with a service
-func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
-	r := h.c.newRequest("GET", "/v1/health/checks/"+service)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out HealthChecks
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Service is used to query health information along with service info
-// for a given service. It can optionally do server-side filtering on a tag
-// or nodes with passing health checks only.
-func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
-	var tags []string
-	if tag != "" {
-		tags = []string{tag}
-	}
-	return h.service(service, tags, passingOnly, q, false)
-}
-
-func (h *Health) ServiceMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
-	return h.service(service, tags, passingOnly, q, false)
-}
-
-// Connect is equivalent to Service except that it will only return services
-// which are Connect-enabled and will returns the connection address for Connect
-// client's to use which may be a proxy in front of the named service. If
-// passingOnly is true only instances where both the service and any proxy are
-// healthy will be returned.
-func (h *Health) Connect(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
-	var tags []string
-	if tag != "" {
-		tags = []string{tag}
-	}
-	return h.service(service, tags, passingOnly, q, true)
-}
-
-func (h *Health) ConnectMultipleTags(service string, tags []string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
-	return h.service(service, tags, passingOnly, q, true)
-}
-
-func (h *Health) service(service string, tags []string, passingOnly bool, q *QueryOptions, connect bool) ([]*ServiceEntry, *QueryMeta, error) {
-	path := "/v1/health/service/" + service
-	if connect {
-		path = "/v1/health/connect/" + service
-	}
-	r := h.c.newRequest("GET", path)
-	r.setQueryOptions(q)
-	if len(tags) > 0 {
-		for _, tag := range tags {
-			r.params.Add("tag", tag)
-		}
-	}
-	if passingOnly {
-		r.params.Set(HealthPassing, "1")
-	}
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out []*ServiceEntry
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// State is used to retrieve all the checks in a given state.
-// The wildcard "any" state can also be used for all checks.
-func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
-	switch state {
-	case HealthAny:
-	case HealthWarning:
-	case HealthCritical:
-	case HealthPassing:
-	default:
-		return nil, nil, fmt.Errorf("Unsupported state: %v", state)
-	}
-	r := h.c.newRequest("GET", "/v1/health/state/"+state)
-	r.setQueryOptions(q)
-	rtt, resp, err := requireOK(h.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	var out HealthChecks
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/kv.go b/vendor/github.com/hashicorp/consul/api/kv.go
deleted file mode 100644
index bd45a06..0000000
--- a/vendor/github.com/hashicorp/consul/api/kv.go
+++ /dev/null
@@ -1,286 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"net/http"
-	"strconv"
-	"strings"
-)
-
-// KVPair is used to represent a single K/V entry
-type KVPair struct {
-	// Key is the name of the key. It is also part of the URL path when accessed
-	// via the API.
-	Key string
-
-	// CreateIndex holds the index corresponding the creation of this KVPair. This
-	// is a read-only field.
-	CreateIndex uint64
-
-	// ModifyIndex is used for the Check-And-Set operations and can also be fed
-	// back into the WaitIndex of the QueryOptions in order to perform blocking
-	// queries.
-	ModifyIndex uint64
-
-	// LockIndex holds the index corresponding to a lock on this key, if any. This
-	// is a read-only field.
-	LockIndex uint64
-
-	// Flags are any user-defined flags on the key. It is up to the implementer
-	// to check these values, since Consul does not treat them specially.
-	Flags uint64
-
-	// Value is the value for the key. This can be any value, but it will be
-	// base64 encoded upon transport.
-	Value []byte
-
-	// Session is a string representing the ID of the session. Any other
-	// interactions with this key over the same session must specify the same
-	// session ID.
-	Session string
-}
-
-// KVPairs is a list of KVPair objects
-type KVPairs []*KVPair
-
-// KV is used to manipulate the K/V API
-type KV struct {
-	c *Client
-}
-
-// KV is used to return a handle to the K/V apis
-func (c *Client) KV() *KV {
-	return &KV{c}
-}
-
-// Get is used to lookup a single key. The returned pointer
-// to the KVPair will be nil if the key does not exist.
-func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
-	resp, qm, err := k.getInternal(key, nil, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	if resp == nil {
-		return nil, qm, nil
-	}
-	defer resp.Body.Close()
-
-	var entries []*KVPair
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	if len(entries) > 0 {
-		return entries[0], qm, nil
-	}
-	return nil, qm, nil
-}
-
-// List is used to lookup all keys under a prefix
-func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
-	resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	if resp == nil {
-		return nil, qm, nil
-	}
-	defer resp.Body.Close()
-
-	var entries []*KVPair
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// Keys is used to list all the keys under a prefix. Optionally,
-// a separator can be used to limit the responses.
-func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
-	params := map[string]string{"keys": ""}
-	if separator != "" {
-		params["separator"] = separator
-	}
-	resp, qm, err := k.getInternal(prefix, params, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	if resp == nil {
-		return nil, qm, nil
-	}
-	defer resp.Body.Close()
-
-	var entries []string
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
-	r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
-	r.setQueryOptions(q)
-	for param, val := range params {
-		r.params.Set(param, val)
-	}
-	rtt, resp, err := k.c.doRequest(r)
-	if err != nil {
-		return nil, nil, err
-	}
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if resp.StatusCode == 404 {
-		resp.Body.Close()
-		return nil, qm, nil
-	} else if resp.StatusCode != 200 {
-		resp.Body.Close()
-		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
-	}
-	return resp, qm, nil
-}
-
-// Put is used to write a new value. Only the
-// Key, Flags and Value is respected.
-func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
-	params := make(map[string]string, 1)
-	if p.Flags != 0 {
-		params["flags"] = strconv.FormatUint(p.Flags, 10)
-	}
-	_, wm, err := k.put(p.Key, params, p.Value, q)
-	return wm, err
-}
-
-// CAS is used for a Check-And-Set operation. The Key,
-// ModifyIndex, Flags and Value are respected. Returns true
-// on success or false on failures.
-func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
-	params := make(map[string]string, 2)
-	if p.Flags != 0 {
-		params["flags"] = strconv.FormatUint(p.Flags, 10)
-	}
-	params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
-	return k.put(p.Key, params, p.Value, q)
-}
-
-// Acquire is used for a lock acquisition operation. The Key,
-// Flags, Value and Session are respected. Returns true
-// on success or false on failures.
-func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
-	params := make(map[string]string, 2)
-	if p.Flags != 0 {
-		params["flags"] = strconv.FormatUint(p.Flags, 10)
-	}
-	params["acquire"] = p.Session
-	return k.put(p.Key, params, p.Value, q)
-}
-
-// Release is used for a lock release operation. The Key,
-// Flags, Value and Session are respected. Returns true
-// on success or false on failures.
-func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
-	params := make(map[string]string, 2)
-	if p.Flags != 0 {
-		params["flags"] = strconv.FormatUint(p.Flags, 10)
-	}
-	params["release"] = p.Session
-	return k.put(p.Key, params, p.Value, q)
-}
-
-func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
-	if len(key) > 0 && key[0] == '/' {
-		return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
-	}
-
-	r := k.c.newRequest("PUT", "/v1/kv/"+key)
-	r.setWriteOptions(q)
-	for param, val := range params {
-		r.params.Set(param, val)
-	}
-	r.body = bytes.NewReader(body)
-	rtt, resp, err := requireOK(k.c.doRequest(r))
-	if err != nil {
-		return false, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &WriteMeta{}
-	qm.RequestTime = rtt
-
-	var buf bytes.Buffer
-	if _, err := io.Copy(&buf, resp.Body); err != nil {
-		return false, nil, fmt.Errorf("Failed to read response: %v", err)
-	}
-	res := strings.Contains(buf.String(), "true")
-	return res, qm, nil
-}
-
-// Delete is used to delete a single key
-func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
-	_, qm, err := k.deleteInternal(key, nil, w)
-	return qm, err
-}
-
-// DeleteCAS is used for a Delete Check-And-Set operation. The Key
-// and ModifyIndex are respected. Returns true on success or false on failures.
-func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
-	params := map[string]string{
-		"cas": strconv.FormatUint(p.ModifyIndex, 10),
-	}
-	return k.deleteInternal(p.Key, params, q)
-}
-
-// DeleteTree is used to delete all keys under a prefix
-func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
-	_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
-	return qm, err
-}
-
-func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
-	r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
-	r.setWriteOptions(q)
-	for param, val := range params {
-		r.params.Set(param, val)
-	}
-	rtt, resp, err := requireOK(k.c.doRequest(r))
-	if err != nil {
-		return false, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &WriteMeta{}
-	qm.RequestTime = rtt
-
-	var buf bytes.Buffer
-	if _, err := io.Copy(&buf, resp.Body); err != nil {
-		return false, nil, fmt.Errorf("Failed to read response: %v", err)
-	}
-	res := strings.Contains(buf.String(), "true")
-	return res, qm, nil
-}
-
-// The Txn function has been deprecated from the KV object; please see the Txn
-// object for more information about Transactions.
-func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
-	var ops TxnOps
-	for _, op := range txn {
-		ops = append(ops, &TxnOp{KV: op})
-	}
-
-	respOk, txnResp, qm, err := k.c.txn(ops, q)
-	if err != nil {
-		return false, nil, nil, err
-	}
-
-	// Convert from the internal format.
-	kvResp := KVTxnResponse{
-		Errors: txnResp.Errors,
-	}
-	for _, result := range txnResp.Results {
-		kvResp.Results = append(kvResp.Results, result.KV)
-	}
-	return respOk, &kvResp, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go
deleted file mode 100644
index 82339cb..0000000
--- a/vendor/github.com/hashicorp/consul/api/lock.go
+++ /dev/null
@@ -1,386 +0,0 @@
-package api
-
-import (
-	"fmt"
-	"sync"
-	"time"
-)
-
-const (
-	// DefaultLockSessionName is the Session Name we assign if none is provided
-	DefaultLockSessionName = "Consul API Lock"
-
-	// DefaultLockSessionTTL is the default session TTL if no Session is provided
-	// when creating a new Lock. This is used because we do not have another
-	// other check to depend upon.
-	DefaultLockSessionTTL = "15s"
-
-	// DefaultLockWaitTime is how long we block for at a time to check if lock
-	// acquisition is possible. This affects the minimum time it takes to cancel
-	// a Lock acquisition.
-	DefaultLockWaitTime = 15 * time.Second
-
-	// DefaultLockRetryTime is how long we wait after a failed lock acquisition
-	// before attempting to do the lock again. This is so that once a lock-delay
-	// is in effect, we do not hot loop retrying the acquisition.
-	DefaultLockRetryTime = 5 * time.Second
-
-	// DefaultMonitorRetryTime is how long we wait after a failed monitor check
-	// of a lock (500 response code). This allows the monitor to ride out brief
-	// periods of unavailability, subject to the MonitorRetries setting in the
-	// lock options which is by default set to 0, disabling this feature. This
-	// affects locks and semaphores.
-	DefaultMonitorRetryTime = 2 * time.Second
-
-	// LockFlagValue is a magic flag we set to indicate a key
-	// is being used for a lock. It is used to detect a potential
-	// conflict with a semaphore.
-	LockFlagValue = 0x2ddccbc058a50c18
-)
-
-var (
-	// ErrLockHeld is returned if we attempt to double lock
-	ErrLockHeld = fmt.Errorf("Lock already held")
-
-	// ErrLockNotHeld is returned if we attempt to unlock a lock
-	// that we do not hold.
-	ErrLockNotHeld = fmt.Errorf("Lock not held")
-
-	// ErrLockInUse is returned if we attempt to destroy a lock
-	// that is in use.
-	ErrLockInUse = fmt.Errorf("Lock in use")
-
-	// ErrLockConflict is returned if the flags on a key
-	// used for a lock do not match expectation
-	ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
-)
-
-// Lock is used to implement client-side leader election. It is follows the
-// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
-type Lock struct {
-	c    *Client
-	opts *LockOptions
-
-	isHeld       bool
-	sessionRenew chan struct{}
-	lockSession  string
-	l            sync.Mutex
-}
-
-// LockOptions is used to parameterize the Lock behavior.
-type LockOptions struct {
-	Key              string        // Must be set and have write permissions
-	Value            []byte        // Optional, value to associate with the lock
-	Session          string        // Optional, created if not specified
-	SessionOpts      *SessionEntry // Optional, options to use when creating a session
-	SessionName      string        // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
-	SessionTTL       string        // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
-	MonitorRetries   int           // Optional, defaults to 0 which means no retries
-	MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
-	LockWaitTime     time.Duration // Optional, defaults to DefaultLockWaitTime
-	LockTryOnce      bool          // Optional, defaults to false which means try forever
-}
-
-// LockKey returns a handle to a lock struct which can be used
-// to acquire and release the mutex. The key used must have
-// write permissions.
-func (c *Client) LockKey(key string) (*Lock, error) {
-	opts := &LockOptions{
-		Key: key,
-	}
-	return c.LockOpts(opts)
-}
-
-// LockOpts returns a handle to a lock struct which can be used
-// to acquire and release the mutex. The key used must have
-// write permissions.
-func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
-	if opts.Key == "" {
-		return nil, fmt.Errorf("missing key")
-	}
-	if opts.SessionName == "" {
-		opts.SessionName = DefaultLockSessionName
-	}
-	if opts.SessionTTL == "" {
-		opts.SessionTTL = DefaultLockSessionTTL
-	} else {
-		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
-			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
-		}
-	}
-	if opts.MonitorRetryTime == 0 {
-		opts.MonitorRetryTime = DefaultMonitorRetryTime
-	}
-	if opts.LockWaitTime == 0 {
-		opts.LockWaitTime = DefaultLockWaitTime
-	}
-	l := &Lock{
-		c:    c,
-		opts: opts,
-	}
-	return l, nil
-}
-
-// Lock attempts to acquire the lock and blocks while doing so.
-// Providing a non-nil stopCh can be used to abort the lock attempt.
-// Returns a channel that is closed if our lock is lost or an error.
-// This channel could be closed at any time due to session invalidation,
-// communication errors, operator intervention, etc. It is NOT safe to
-// assume that the lock is held until Unlock() unless the Session is specifically
-// created without any associated health checks. By default Consul sessions
-// prefer liveness over safety and an application must be able to handle
-// the lock being lost.
-func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
-	// Hold the lock as we try to acquire
-	l.l.Lock()
-	defer l.l.Unlock()
-
-	// Check if we already hold the lock
-	if l.isHeld {
-		return nil, ErrLockHeld
-	}
-
-	// Check if we need to create a session first
-	l.lockSession = l.opts.Session
-	if l.lockSession == "" {
-		s, err := l.createSession()
-		if err != nil {
-			return nil, fmt.Errorf("failed to create session: %v", err)
-		}
-
-		l.sessionRenew = make(chan struct{})
-		l.lockSession = s
-		session := l.c.Session()
-		go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
-
-		// If we fail to acquire the lock, cleanup the session
-		defer func() {
-			if !l.isHeld {
-				close(l.sessionRenew)
-				l.sessionRenew = nil
-			}
-		}()
-	}
-
-	// Setup the query options
-	kv := l.c.KV()
-	qOpts := &QueryOptions{
-		WaitTime: l.opts.LockWaitTime,
-	}
-
-	start := time.Now()
-	attempts := 0
-WAIT:
-	// Check if we should quit
-	select {
-	case <-stopCh:
-		return nil, nil
-	default:
-	}
-
-	// Handle the one-shot mode.
-	if l.opts.LockTryOnce && attempts > 0 {
-		elapsed := time.Since(start)
-		if elapsed > l.opts.LockWaitTime {
-			return nil, nil
-		}
-
-		// Query wait time should not exceed the lock wait time
-		qOpts.WaitTime = l.opts.LockWaitTime - elapsed
-	}
-	attempts++
-
-	// Look for an existing lock, blocking until not taken
-	pair, meta, err := kv.Get(l.opts.Key, qOpts)
-	if err != nil {
-		return nil, fmt.Errorf("failed to read lock: %v", err)
-	}
-	if pair != nil && pair.Flags != LockFlagValue {
-		return nil, ErrLockConflict
-	}
-	locked := false
-	if pair != nil && pair.Session == l.lockSession {
-		goto HELD
-	}
-	if pair != nil && pair.Session != "" {
-		qOpts.WaitIndex = meta.LastIndex
-		goto WAIT
-	}
-
-	// Try to acquire the lock
-	pair = l.lockEntry(l.lockSession)
-	locked, _, err = kv.Acquire(pair, nil)
-	if err != nil {
-		return nil, fmt.Errorf("failed to acquire lock: %v", err)
-	}
-
-	// Handle the case of not getting the lock
-	if !locked {
-		// Determine why the lock failed
-		qOpts.WaitIndex = 0
-		pair, meta, err = kv.Get(l.opts.Key, qOpts)
-		if pair != nil && pair.Session != "" {
-			//If the session is not null, this means that a wait can safely happen
-			//using a long poll
-			qOpts.WaitIndex = meta.LastIndex
-			goto WAIT
-		} else {
-			// If the session is empty and the lock failed to acquire, then it means
-			// a lock-delay is in effect and a timed wait must be used
-			select {
-			case <-time.After(DefaultLockRetryTime):
-				goto WAIT
-			case <-stopCh:
-				return nil, nil
-			}
-		}
-	}
-
-HELD:
-	// Watch to ensure we maintain leadership
-	leaderCh := make(chan struct{})
-	go l.monitorLock(l.lockSession, leaderCh)
-
-	// Set that we own the lock
-	l.isHeld = true
-
-	// Locked! All done
-	return leaderCh, nil
-}
-
-// Unlock released the lock. It is an error to call this
-// if the lock is not currently held.
-func (l *Lock) Unlock() error {
-	// Hold the lock as we try to release
-	l.l.Lock()
-	defer l.l.Unlock()
-
-	// Ensure the lock is actually held
-	if !l.isHeld {
-		return ErrLockNotHeld
-	}
-
-	// Set that we no longer own the lock
-	l.isHeld = false
-
-	// Stop the session renew
-	if l.sessionRenew != nil {
-		defer func() {
-			close(l.sessionRenew)
-			l.sessionRenew = nil
-		}()
-	}
-
-	// Get the lock entry, and clear the lock session
-	lockEnt := l.lockEntry(l.lockSession)
-	l.lockSession = ""
-
-	// Release the lock explicitly
-	kv := l.c.KV()
-	_, _, err := kv.Release(lockEnt, nil)
-	if err != nil {
-		return fmt.Errorf("failed to release lock: %v", err)
-	}
-	return nil
-}
-
-// Destroy is used to cleanup the lock entry. It is not necessary
-// to invoke. It will fail if the lock is in use.
-func (l *Lock) Destroy() error {
-	// Hold the lock as we try to release
-	l.l.Lock()
-	defer l.l.Unlock()
-
-	// Check if we already hold the lock
-	if l.isHeld {
-		return ErrLockHeld
-	}
-
-	// Look for an existing lock
-	kv := l.c.KV()
-	pair, _, err := kv.Get(l.opts.Key, nil)
-	if err != nil {
-		return fmt.Errorf("failed to read lock: %v", err)
-	}
-
-	// Nothing to do if the lock does not exist
-	if pair == nil {
-		return nil
-	}
-
-	// Check for possible flag conflict
-	if pair.Flags != LockFlagValue {
-		return ErrLockConflict
-	}
-
-	// Check if it is in use
-	if pair.Session != "" {
-		return ErrLockInUse
-	}
-
-	// Attempt the delete
-	didRemove, _, err := kv.DeleteCAS(pair, nil)
-	if err != nil {
-		return fmt.Errorf("failed to remove lock: %v", err)
-	}
-	if !didRemove {
-		return ErrLockInUse
-	}
-	return nil
-}
-
-// createSession is used to create a new managed session
-func (l *Lock) createSession() (string, error) {
-	session := l.c.Session()
-	se := l.opts.SessionOpts
-	if se == nil {
-		se = &SessionEntry{
-			Name: l.opts.SessionName,
-			TTL:  l.opts.SessionTTL,
-		}
-	}
-	id, _, err := session.Create(se, nil)
-	if err != nil {
-		return "", err
-	}
-	return id, nil
-}
-
-// lockEntry returns a formatted KVPair for the lock
-func (l *Lock) lockEntry(session string) *KVPair {
-	return &KVPair{
-		Key:     l.opts.Key,
-		Value:   l.opts.Value,
-		Session: session,
-		Flags:   LockFlagValue,
-	}
-}
-
-// monitorLock is a long running routine to monitor a lock ownership
-// It closes the stopCh if we lose our leadership.
-func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
-	defer close(stopCh)
-	kv := l.c.KV()
-	opts := &QueryOptions{RequireConsistent: true}
-WAIT:
-	retries := l.opts.MonitorRetries
-RETRY:
-	pair, meta, err := kv.Get(l.opts.Key, opts)
-	if err != nil {
-		// If configured we can try to ride out a brief Consul unavailability
-		// by doing retries. Note that we have to attempt the retry in a non-
-		// blocking fashion so that we have a clean place to reset the retry
-		// counter if service is restored.
-		if retries > 0 && IsRetryableError(err) {
-			time.Sleep(l.opts.MonitorRetryTime)
-			retries--
-			opts.WaitIndex = 0
-			goto RETRY
-		}
-		return
-	}
-	if pair != nil && pair.Session == session {
-		opts.WaitIndex = meta.LastIndex
-		goto WAIT
-	}
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator.go b/vendor/github.com/hashicorp/consul/api/operator.go
deleted file mode 100644
index 079e224..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package api
-
-// Operator can be used to perform low-level operator tasks for Consul.
-type Operator struct {
-	c *Client
-}
-
-// Operator returns a handle to the operator endpoints.
-func (c *Client) Operator() *Operator {
-	return &Operator{c}
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_area.go b/vendor/github.com/hashicorp/consul/api/operator_area.go
deleted file mode 100644
index 5cf7e49..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_area.go
+++ /dev/null
@@ -1,194 +0,0 @@
-package api
-
-// The /v1/operator/area endpoints are available only in Consul Enterprise and
-// interact with its network area subsystem. Network areas are used to link
-// together Consul servers in different Consul datacenters. With network areas,
-// Consul datacenters can be linked together in ways other than a fully-connected
-// mesh, as is required for Consul's WAN.
-
-import (
-	"net"
-	"time"
-)
-
-// Area defines a network area.
-type Area struct {
-	// ID is this identifier for an area (a UUID). This must be left empty
-	// when creating a new area.
-	ID string
-
-	// PeerDatacenter is the peer Consul datacenter that will make up the
-	// other side of this network area. Network areas always involve a pair
-	// of datacenters: the datacenter where the area was created, and the
-	// peer datacenter. This is required.
-	PeerDatacenter string
-
-	// RetryJoin specifies the address of Consul servers to join to, such as
-	// an IPs or hostnames with an optional port number. This is optional.
-	RetryJoin []string
-
-	// UseTLS specifies whether gossip over this area should be encrypted with TLS
-	// if possible.
-	UseTLS bool
-}
-
-// AreaJoinResponse is returned when a join occurs and gives the result for each
-// address.
-type AreaJoinResponse struct {
-	// The address that was joined.
-	Address string
-
-	// Whether or not the join was a success.
-	Joined bool
-
-	// If we couldn't join, this is the message with information.
-	Error string
-}
-
-// SerfMember is a generic structure for reporting information about members in
-// a Serf cluster. This is only used by the area endpoints right now, but this
-// could be expanded to other endpoints in the future.
-type SerfMember struct {
-	// ID is the node identifier (a UUID).
-	ID string
-
-	// Name is the node name.
-	Name string
-
-	// Addr has the IP address.
-	Addr net.IP
-
-	// Port is the RPC port.
-	Port uint16
-
-	// Datacenter is the DC name.
-	Datacenter string
-
-	// Role is "client", "server", or "unknown".
-	Role string
-
-	// Build has the version of the Consul agent.
-	Build string
-
-	// Protocol is the protocol of the Consul agent.
-	Protocol int
-
-	// Status is the Serf health status "none", "alive", "leaving", "left",
-	// or "failed".
-	Status string
-
-	// RTT is the estimated round trip time from the server handling the
-	// request to the this member. This will be negative if no RTT estimate
-	// is available.
-	RTT time.Duration
-}
-
-// AreaCreate will create a new network area. The ID in the given structure must
-// be empty and a generated ID will be returned on success.
-func (op *Operator) AreaCreate(area *Area, q *WriteOptions) (string, *WriteMeta, error) {
-	r := op.c.newRequest("POST", "/v1/operator/area")
-	r.setWriteOptions(q)
-	r.obj = area
-	rtt, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// AreaUpdate will update the configuration of the network area with the given ID.
-func (op *Operator) AreaUpdate(areaID string, area *Area, q *WriteOptions) (string, *WriteMeta, error) {
-	r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID)
-	r.setWriteOptions(q)
-	r.obj = area
-	rtt, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// AreaGet returns a single network area.
-func (op *Operator) AreaGet(areaID string, q *QueryOptions) ([]*Area, *QueryMeta, error) {
-	var out []*Area
-	qm, err := op.c.query("/v1/operator/area/"+areaID, &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// AreaList returns all the available network areas.
-func (op *Operator) AreaList(q *QueryOptions) ([]*Area, *QueryMeta, error) {
-	var out []*Area
-	qm, err := op.c.query("/v1/operator/area", &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// AreaDelete deletes the given network area.
-func (op *Operator) AreaDelete(areaID string, q *WriteOptions) (*WriteMeta, error) {
-	r := op.c.newRequest("DELETE", "/v1/operator/area/"+areaID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-	return wm, nil
-}
-
-// AreaJoin attempts to join the given set of join addresses to the given
-// network area. See the Area structure for details about join addresses.
-func (op *Operator) AreaJoin(areaID string, addresses []string, q *WriteOptions) ([]*AreaJoinResponse, *WriteMeta, error) {
-	r := op.c.newRequest("PUT", "/v1/operator/area/"+areaID+"/join")
-	r.setWriteOptions(q)
-	r.obj = addresses
-	rtt, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	var out []*AreaJoinResponse
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, nil, err
-	}
-	return out, wm, nil
-}
-
-// AreaMembers lists the Serf information about the members in the given area.
-func (op *Operator) AreaMembers(areaID string, q *QueryOptions) ([]*SerfMember, *QueryMeta, error) {
-	var out []*SerfMember
-	qm, err := op.c.query("/v1/operator/area/"+areaID+"/members", &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
deleted file mode 100644
index b179406..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// AutopilotConfiguration is used for querying/setting the Autopilot configuration.
-// Autopilot helps manage operator tasks related to Consul servers like removing
-// failed servers from the Raft quorum.
-type AutopilotConfiguration struct {
-	// CleanupDeadServers controls whether to remove dead servers from the Raft
-	// peer list when a new server joins
-	CleanupDeadServers bool
-
-	// LastContactThreshold is the limit on the amount of time a server can go
-	// without leader contact before being considered unhealthy.
-	LastContactThreshold *ReadableDuration
-
-	// MaxTrailingLogs is the amount of entries in the Raft Log that a server can
-	// be behind before being considered unhealthy.
-	MaxTrailingLogs uint64
-
-	// ServerStabilizationTime is the minimum amount of time a server must be
-	// in a stable, healthy state before it can be added to the cluster. Only
-	// applicable with Raft protocol version 3 or higher.
-	ServerStabilizationTime *ReadableDuration
-
-	// (Enterprise-only) RedundancyZoneTag is the node tag to use for separating
-	// servers into zones for redundancy. If left blank, this feature will be disabled.
-	RedundancyZoneTag string
-
-	// (Enterprise-only) DisableUpgradeMigration will disable Autopilot's upgrade migration
-	// strategy of waiting until enough newer-versioned servers have been added to the
-	// cluster before promoting them to voters.
-	DisableUpgradeMigration bool
-
-	// (Enterprise-only) UpgradeVersionTag is the node tag to use for version info when
-	// performing upgrade migrations. If left blank, the Consul version will be used.
-	UpgradeVersionTag string
-
-	// CreateIndex holds the index corresponding the creation of this configuration.
-	// This is a read-only field.
-	CreateIndex uint64
-
-	// ModifyIndex will be set to the index of the last update when retrieving the
-	// Autopilot configuration. Resubmitting a configuration with
-	// AutopilotCASConfiguration will perform a check-and-set operation which ensures
-	// there hasn't been a subsequent update since the configuration was retrieved.
-	ModifyIndex uint64
-}
-
-// ServerHealth is the health (from the leader's point of view) of a server.
-type ServerHealth struct {
-	// ID is the raft ID of the server.
-	ID string
-
-	// Name is the node name of the server.
-	Name string
-
-	// Address is the address of the server.
-	Address string
-
-	// The status of the SerfHealth check for the server.
-	SerfStatus string
-
-	// Version is the Consul version of the server.
-	Version string
-
-	// Leader is whether this server is currently the leader.
-	Leader bool
-
-	// LastContact is the time since this node's last contact with the leader.
-	LastContact *ReadableDuration
-
-	// LastTerm is the highest leader term this server has a record of in its Raft log.
-	LastTerm uint64
-
-	// LastIndex is the last log index this server has a record of in its Raft log.
-	LastIndex uint64
-
-	// Healthy is whether or not the server is healthy according to the current
-	// Autopilot config.
-	Healthy bool
-
-	// Voter is whether this is a voting server.
-	Voter bool
-
-	// StableSince is the last time this server's Healthy value changed.
-	StableSince time.Time
-}
-
-// OperatorHealthReply is a representation of the overall health of the cluster
-type OperatorHealthReply struct {
-	// Healthy is true if all the servers in the cluster are healthy.
-	Healthy bool
-
-	// FailureTolerance is the number of healthy servers that could be lost without
-	// an outage occurring.
-	FailureTolerance int
-
-	// Servers holds the health of each server.
-	Servers []ServerHealth
-}
-
-// ReadableDuration is a duration type that is serialized to JSON in human readable format.
-type ReadableDuration time.Duration
-
-func NewReadableDuration(dur time.Duration) *ReadableDuration {
-	d := ReadableDuration(dur)
-	return &d
-}
-
-func (d *ReadableDuration) String() string {
-	return d.Duration().String()
-}
-
-func (d *ReadableDuration) Duration() time.Duration {
-	if d == nil {
-		return time.Duration(0)
-	}
-	return time.Duration(*d)
-}
-
-func (d *ReadableDuration) MarshalJSON() ([]byte, error) {
-	return []byte(fmt.Sprintf(`"%s"`, d.Duration().String())), nil
-}
-
-func (d *ReadableDuration) UnmarshalJSON(raw []byte) error {
-	if d == nil {
-		return fmt.Errorf("cannot unmarshal to nil pointer")
-	}
-
-	str := string(raw)
-	if len(str) < 2 || str[0] != '"' || str[len(str)-1] != '"' {
-		return fmt.Errorf("must be enclosed with quotes: %s", str)
-	}
-	dur, err := time.ParseDuration(str[1 : len(str)-1])
-	if err != nil {
-		return err
-	}
-	*d = ReadableDuration(dur)
-	return nil
-}
-
-// AutopilotGetConfiguration is used to query the current Autopilot configuration.
-func (op *Operator) AutopilotGetConfiguration(q *QueryOptions) (*AutopilotConfiguration, error) {
-	r := op.c.newRequest("GET", "/v1/operator/autopilot/configuration")
-	r.setQueryOptions(q)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out AutopilotConfiguration
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-
-	return &out, nil
-}
-
-// AutopilotSetConfiguration is used to set the current Autopilot configuration.
-func (op *Operator) AutopilotSetConfiguration(conf *AutopilotConfiguration, q *WriteOptions) error {
-	r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
-	r.setWriteOptions(q)
-	r.obj = conf
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// AutopilotCASConfiguration is used to perform a Check-And-Set update on the
-// Autopilot configuration. The ModifyIndex value will be respected. Returns
-// true on success or false on failures.
-func (op *Operator) AutopilotCASConfiguration(conf *AutopilotConfiguration, q *WriteOptions) (bool, error) {
-	r := op.c.newRequest("PUT", "/v1/operator/autopilot/configuration")
-	r.setWriteOptions(q)
-	r.params.Set("cas", strconv.FormatUint(conf.ModifyIndex, 10))
-	r.obj = conf
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return false, err
-	}
-	defer resp.Body.Close()
-
-	var buf bytes.Buffer
-	if _, err := io.Copy(&buf, resp.Body); err != nil {
-		return false, fmt.Errorf("Failed to read response: %v", err)
-	}
-	res := strings.Contains(buf.String(), "true")
-
-	return res, nil
-}
-
-// AutopilotServerHealth
-func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply, error) {
-	r := op.c.newRequest("GET", "/v1/operator/autopilot/health")
-	r.setQueryOptions(q)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out OperatorHealthReply
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return &out, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_keyring.go b/vendor/github.com/hashicorp/consul/api/operator_keyring.go
deleted file mode 100644
index 038d5d5..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_keyring.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package api
-
-// keyringRequest is used for performing Keyring operations
-type keyringRequest struct {
-	Key string
-}
-
-// KeyringResponse is returned when listing the gossip encryption keys
-type KeyringResponse struct {
-	// Whether this response is for a WAN ring
-	WAN bool
-
-	// The datacenter name this request corresponds to
-	Datacenter string
-
-	// Segment has the network segment this request corresponds to.
-	Segment string
-
-	// Messages has information or errors from serf
-	Messages map[string]string `json:",omitempty"`
-
-	// A map of the encryption keys to the number of nodes they're installed on
-	Keys map[string]int
-
-	// The total number of nodes in this ring
-	NumNodes int
-}
-
-// KeyringInstall is used to install a new gossip encryption key into the cluster
-func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
-	r := op.c.newRequest("POST", "/v1/operator/keyring")
-	r.setWriteOptions(q)
-	r.obj = keyringRequest{
-		Key: key,
-	}
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// KeyringList is used to list the gossip keys installed in the cluster
-func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
-	r := op.c.newRequest("GET", "/v1/operator/keyring")
-	r.setQueryOptions(q)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out []*KeyringResponse
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return out, nil
-}
-
-// KeyringRemove is used to remove a gossip encryption key from the cluster
-func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
-	r := op.c.newRequest("DELETE", "/v1/operator/keyring")
-	r.setWriteOptions(q)
-	r.obj = keyringRequest{
-		Key: key,
-	}
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
-
-// KeyringUse is used to change the active gossip encryption key
-func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
-	r := op.c.newRequest("PUT", "/v1/operator/keyring")
-	r.setWriteOptions(q)
-	r.obj = keyringRequest{
-		Key: key,
-	}
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	resp.Body.Close()
-	return nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_license.go b/vendor/github.com/hashicorp/consul/api/operator_license.go
deleted file mode 100644
index 25aa702..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_license.go
+++ /dev/null
@@ -1,111 +0,0 @@
-package api
-
-import (
-	"io/ioutil"
-	"strings"
-	"time"
-)
-
-type License struct {
-	// The unique identifier of the license
-	LicenseID string `json:"license_id"`
-
-	// The customer ID associated with the license
-	CustomerID string `json:"customer_id"`
-
-	// If set, an identifier that should be used to lock the license to a
-	// particular site, cluster, etc.
-	InstallationID string `json:"installation_id"`
-
-	// The time at which the license was issued
-	IssueTime time.Time `json:"issue_time"`
-
-	// The time at which the license starts being valid
-	StartTime time.Time `json:"start_time"`
-
-	// The time after which the license expires
-	ExpirationTime time.Time `json:"expiration_time"`
-
-	// The time at which the license ceases to function and can
-	// no longer be used in any capacity
-	TerminationTime time.Time `json:"termination_time"`
-
-	// The product the license is valid for
-	Product string `json:"product"`
-
-	// License Specific Flags
-	Flags map[string]interface{} `json:"flags"`
-
-	// List of features enabled by the license
-	Features []string `json:"features"`
-}
-
-type LicenseReply struct {
-	Valid    bool
-	License  *License
-	Warnings []string
-}
-
-func (op *Operator) LicenseGet(q *QueryOptions) (*LicenseReply, error) {
-	var reply LicenseReply
-	if _, err := op.c.query("/v1/operator/license", &reply, q); err != nil {
-		return nil, err
-	} else {
-		return &reply, nil
-	}
-}
-
-func (op *Operator) LicenseGetSigned(q *QueryOptions) (string, error) {
-	r := op.c.newRequest("GET", "/v1/operator/license")
-	r.params.Set("signed", "1")
-	r.setQueryOptions(q)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return "", err
-	}
-	defer resp.Body.Close()
-
-	data, err := ioutil.ReadAll(resp.Body)
-	if err != nil {
-		return "", err
-	}
-
-	return string(data), nil
-}
-
-// LicenseReset will reset the license to the builtin one if it is still valid.
-// If the builtin license is invalid, the current license stays active.
-func (op *Operator) LicenseReset(opts *WriteOptions) (*LicenseReply, error) {
-	var reply LicenseReply
-	r := op.c.newRequest("DELETE", "/v1/operator/license")
-	r.setWriteOptions(opts)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	if err := decodeBody(resp, &reply); err != nil {
-		return nil, err
-	}
-
-	return &reply, nil
-}
-
-func (op *Operator) LicensePut(license string, opts *WriteOptions) (*LicenseReply, error) {
-	var reply LicenseReply
-	r := op.c.newRequest("PUT", "/v1/operator/license")
-	r.setWriteOptions(opts)
-	r.body = strings.NewReader(license)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	if err := decodeBody(resp, &reply); err != nil {
-		return nil, err
-	}
-
-	return &reply, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_raft.go b/vendor/github.com/hashicorp/consul/api/operator_raft.go
deleted file mode 100644
index a9844df..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_raft.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package api
-
-// RaftServer has information about a server in the Raft configuration.
-type RaftServer struct {
-	// ID is the unique ID for the server. These are currently the same
-	// as the address, but they will be changed to a real GUID in a future
-	// release of Consul.
-	ID string
-
-	// Node is the node name of the server, as known by Consul, or this
-	// will be set to "(unknown)" otherwise.
-	Node string
-
-	// Address is the IP:port of the server, used for Raft communications.
-	Address string
-
-	// Leader is true if this server is the current cluster leader.
-	Leader bool
-
-	// Protocol version is the raft protocol version used by the server
-	ProtocolVersion string
-
-	// Voter is true if this server has a vote in the cluster. This might
-	// be false if the server is staging and still coming online, or if
-	// it's a non-voting server, which will be added in a future release of
-	// Consul.
-	Voter bool
-}
-
-// RaftConfiguration is returned when querying for the current Raft configuration.
-type RaftConfiguration struct {
-	// Servers has the list of servers in the Raft configuration.
-	Servers []*RaftServer
-
-	// Index has the Raft index of this configuration.
-	Index uint64
-}
-
-// RaftGetConfiguration is used to query the current Raft peer set.
-func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
-	r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
-	r.setQueryOptions(q)
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var out RaftConfiguration
-	if err := decodeBody(resp, &out); err != nil {
-		return nil, err
-	}
-	return &out, nil
-}
-
-// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
-// quorum but no longer known to Serf or the catalog) by address in the form of
-// "IP:port".
-func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
-	r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
-	r.setWriteOptions(q)
-
-	r.params.Set("address", string(address))
-
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-
-	resp.Body.Close()
-	return nil
-}
-
-// RaftRemovePeerByID is used to kick a stale peer (one that it in the Raft
-// quorum but no longer known to Serf or the catalog) by ID.
-func (op *Operator) RaftRemovePeerByID(id string, q *WriteOptions) error {
-	r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
-	r.setWriteOptions(q)
-
-	r.params.Set("id", string(id))
-
-	_, resp, err := requireOK(op.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-
-	resp.Body.Close()
-	return nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/operator_segment.go b/vendor/github.com/hashicorp/consul/api/operator_segment.go
deleted file mode 100644
index 92b05d3..0000000
--- a/vendor/github.com/hashicorp/consul/api/operator_segment.go
+++ /dev/null
@@ -1,11 +0,0 @@
-package api
-
-// SegmentList returns all the available LAN segments.
-func (op *Operator) SegmentList(q *QueryOptions) ([]string, *QueryMeta, error) {
-	var out []string
-	qm, err := op.c.query("/v1/operator/segment", &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/prepared_query.go b/vendor/github.com/hashicorp/consul/api/prepared_query.go
deleted file mode 100644
index 0204581..0000000
--- a/vendor/github.com/hashicorp/consul/api/prepared_query.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package api
-
-// QueryDatacenterOptions sets options about how we fail over if there are no
-// healthy nodes in the local datacenter.
-type QueryDatacenterOptions struct {
-	// NearestN is set to the number of remote datacenters to try, based on
-	// network coordinates.
-	NearestN int
-
-	// Datacenters is a fixed list of datacenters to try after NearestN. We
-	// never try a datacenter multiple times, so those are subtracted from
-	// this list before proceeding.
-	Datacenters []string
-}
-
-// QueryDNSOptions controls settings when query results are served over DNS.
-type QueryDNSOptions struct {
-	// TTL is the time to live for the served DNS results.
-	TTL string
-}
-
-// ServiceQuery is used to query for a set of healthy nodes offering a specific
-// service.
-type ServiceQuery struct {
-	// Service is the service to query.
-	Service string
-
-	// Near allows baking in the name of a node to automatically distance-
-	// sort from. The magic "_agent" value is supported, which sorts near
-	// the agent which initiated the request by default.
-	Near string
-
-	// Failover controls what we do if there are no healthy nodes in the
-	// local datacenter.
-	Failover QueryDatacenterOptions
-
-	// IgnoreCheckIDs is an optional list of health check IDs to ignore when
-	// considering which nodes are healthy. It is useful as an emergency measure
-	// to temporarily override some health check that is producing false negatives
-	// for example.
-	IgnoreCheckIDs []string
-
-	// If OnlyPassing is true then we will only include nodes with passing
-	// health checks (critical AND warning checks will cause a node to be
-	// discarded)
-	OnlyPassing bool
-
-	// Tags are a set of required and/or disallowed tags. If a tag is in
-	// this list it must be present. If the tag is preceded with "!" then
-	// it is disallowed.
-	Tags []string
-
-	// NodeMeta is a map of required node metadata fields. If a key/value
-	// pair is in this map it must be present on the node in order for the
-	// service entry to be returned.
-	NodeMeta map[string]string
-
-	// ServiceMeta is a map of required service metadata fields. If a key/value
-	// pair is in this map it must be present on the node in order for the
-	// service entry to be returned.
-	ServiceMeta map[string]string
-
-	// Connect if true will filter the prepared query results to only
-	// include Connect-capable services. These include both native services
-	// and proxies for matching services. Note that if a proxy matches,
-	// the constraints in the query above (Near, OnlyPassing, etc.) apply
-	// to the _proxy_ and not the service being proxied. In practice, proxies
-	// should be directly next to their services so this isn't an issue.
-	Connect bool
-}
-
-// QueryTemplate carries the arguments for creating a templated query.
-type QueryTemplate struct {
-	// Type specifies the type of the query template. Currently only
-	// "name_prefix_match" is supported. This field is required.
-	Type string
-
-	// Regexp allows specifying a regex pattern to match against the name
-	// of the query being executed.
-	Regexp string
-}
-
-// PreparedQueryDefinition defines a complete prepared query.
-type PreparedQueryDefinition struct {
-	// ID is this UUID-based ID for the query, always generated by Consul.
-	ID string
-
-	// Name is an optional friendly name for the query supplied by the
-	// user. NOTE - if this feature is used then it will reduce the security
-	// of any read ACL associated with this query/service since this name
-	// can be used to locate nodes with supplying any ACL.
-	Name string
-
-	// Session is an optional session to tie this query's lifetime to. If
-	// this is omitted then the query will not expire.
-	Session string
-
-	// Token is the ACL token used when the query was created, and it is
-	// used when a query is subsequently executed. This token, or a token
-	// with management privileges, must be used to change the query later.
-	Token string
-
-	// Service defines a service query (leaving things open for other types
-	// later).
-	Service ServiceQuery
-
-	// DNS has options that control how the results of this query are
-	// served over DNS.
-	DNS QueryDNSOptions
-
-	// Template is used to pass through the arguments for creating a
-	// prepared query with an attached template. If a template is given,
-	// interpolations are possible in other struct fields.
-	Template QueryTemplate
-}
-
-// PreparedQueryExecuteResponse has the results of executing a query.
-type PreparedQueryExecuteResponse struct {
-	// Service is the service that was queried.
-	Service string
-
-	// Nodes has the nodes that were output by the query.
-	Nodes []ServiceEntry
-
-	// DNS has the options for serving these results over DNS.
-	DNS QueryDNSOptions
-
-	// Datacenter is the datacenter that these results came from.
-	Datacenter string
-
-	// Failovers is a count of how many times we had to query a remote
-	// datacenter.
-	Failovers int
-}
-
-// PreparedQuery can be used to query the prepared query endpoints.
-type PreparedQuery struct {
-	c *Client
-}
-
-// PreparedQuery returns a handle to the prepared query endpoints.
-func (c *Client) PreparedQuery() *PreparedQuery {
-	return &PreparedQuery{c}
-}
-
-// Create makes a new prepared query. The ID of the new query is returned.
-func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
-	r := c.c.newRequest("POST", "/v1/query")
-	r.setWriteOptions(q)
-	r.obj = query
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return "", nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-
-	var out struct{ ID string }
-	if err := decodeBody(resp, &out); err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// Update makes updates to an existing prepared query.
-func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
-	return c.c.write("/v1/query/"+query.ID, query, nil, q)
-}
-
-// List is used to fetch all the prepared queries (always requires a management
-// token).
-func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
-	var out []*PreparedQueryDefinition
-	qm, err := c.c.query("/v1/query", &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Get is used to fetch a specific prepared query.
-func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
-	var out []*PreparedQueryDefinition
-	qm, err := c.c.query("/v1/query/"+queryID, &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
-
-// Delete is used to delete a specific prepared query.
-func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
-	r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
-	r.setWriteOptions(q)
-	rtt, resp, err := requireOK(c.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{}
-	wm.RequestTime = rtt
-	return wm, nil
-}
-
-// Execute is used to execute a specific prepared query. You can execute using
-// a query ID or name.
-func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
-	var out *PreparedQueryExecuteResponse
-	qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return out, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/raw.go b/vendor/github.com/hashicorp/consul/api/raw.go
deleted file mode 100644
index 745a208..0000000
--- a/vendor/github.com/hashicorp/consul/api/raw.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package api
-
-// Raw can be used to do raw queries against custom endpoints
-type Raw struct {
-	c *Client
-}
-
-// Raw returns a handle to query endpoints
-func (c *Client) Raw() *Raw {
-	return &Raw{c}
-}
-
-// Query is used to do a GET request against an endpoint
-// and deserialize the response into an interface using
-// standard Consul conventions.
-func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
-	return raw.c.query(endpoint, out, q)
-}
-
-// Write is used to do a PUT request against an endpoint
-// and serialize/deserialized using the standard Consul conventions.
-func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
-	return raw.c.write(endpoint, in, out, q)
-}
diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go
deleted file mode 100644
index bc4f885..0000000
--- a/vendor/github.com/hashicorp/consul/api/semaphore.go
+++ /dev/null
@@ -1,514 +0,0 @@
-package api
-
-import (
-	"encoding/json"
-	"fmt"
-	"path"
-	"sync"
-	"time"
-)
-
-const (
-	// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
-	DefaultSemaphoreSessionName = "Consul API Semaphore"
-
-	// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
-	// when creating a new Semaphore. This is used because we do not have another
-	// other check to depend upon.
-	DefaultSemaphoreSessionTTL = "15s"
-
-	// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
-	// acquisition is possible. This affects the minimum time it takes to cancel
-	// a Semaphore acquisition.
-	DefaultSemaphoreWaitTime = 15 * time.Second
-
-	// DefaultSemaphoreKey is the key used within the prefix to
-	// use for coordination between all the contenders.
-	DefaultSemaphoreKey = ".lock"
-
-	// SemaphoreFlagValue is a magic flag we set to indicate a key
-	// is being used for a semaphore. It is used to detect a potential
-	// conflict with a lock.
-	SemaphoreFlagValue = 0xe0f69a2baa414de0
-)
-
-var (
-	// ErrSemaphoreHeld is returned if we attempt to double lock
-	ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
-
-	// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
-	// that we do not hold.
-	ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
-
-	// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
-	// that is in use.
-	ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
-
-	// ErrSemaphoreConflict is returned if the flags on a key
-	// used for a semaphore do not match expectation
-	ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
-)
-
-// Semaphore is used to implement a distributed semaphore
-// using the Consul KV primitives.
-type Semaphore struct {
-	c    *Client
-	opts *SemaphoreOptions
-
-	isHeld       bool
-	sessionRenew chan struct{}
-	lockSession  string
-	l            sync.Mutex
-}
-
-// SemaphoreOptions is used to parameterize the Semaphore
-type SemaphoreOptions struct {
-	Prefix            string        // Must be set and have write permissions
-	Limit             int           // Must be set, and be positive
-	Value             []byte        // Optional, value to associate with the contender entry
-	Session           string        // Optional, created if not specified
-	SessionName       string        // Optional, defaults to DefaultLockSessionName
-	SessionTTL        string        // Optional, defaults to DefaultLockSessionTTL
-	MonitorRetries    int           // Optional, defaults to 0 which means no retries
-	MonitorRetryTime  time.Duration // Optional, defaults to DefaultMonitorRetryTime
-	SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
-	SemaphoreTryOnce  bool          // Optional, defaults to false which means try forever
-}
-
-// semaphoreLock is written under the DefaultSemaphoreKey and
-// is used to coordinate between all the contenders.
-type semaphoreLock struct {
-	// Limit is the integer limit of holders. This is used to
-	// verify that all the holders agree on the value.
-	Limit int
-
-	// Holders is a list of all the semaphore holders.
-	// It maps the session ID to true. It is used as a set effectively.
-	Holders map[string]bool
-}
-
-// SemaphorePrefix is used to created a Semaphore which will operate
-// at the given KV prefix and uses the given limit for the semaphore.
-// The prefix must have write privileges, and the limit must be agreed
-// upon by all contenders.
-func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
-	opts := &SemaphoreOptions{
-		Prefix: prefix,
-		Limit:  limit,
-	}
-	return c.SemaphoreOpts(opts)
-}
-
-// SemaphoreOpts is used to create a Semaphore with the given options.
-// The prefix must have write privileges, and the limit must be agreed
-// upon by all contenders. If a Session is not provided, one will be created.
-func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
-	if opts.Prefix == "" {
-		return nil, fmt.Errorf("missing prefix")
-	}
-	if opts.Limit <= 0 {
-		return nil, fmt.Errorf("semaphore limit must be positive")
-	}
-	if opts.SessionName == "" {
-		opts.SessionName = DefaultSemaphoreSessionName
-	}
-	if opts.SessionTTL == "" {
-		opts.SessionTTL = DefaultSemaphoreSessionTTL
-	} else {
-		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
-			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
-		}
-	}
-	if opts.MonitorRetryTime == 0 {
-		opts.MonitorRetryTime = DefaultMonitorRetryTime
-	}
-	if opts.SemaphoreWaitTime == 0 {
-		opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
-	}
-	s := &Semaphore{
-		c:    c,
-		opts: opts,
-	}
-	return s, nil
-}
-
-// Acquire attempts to reserve a slot in the semaphore, blocking until
-// success, interrupted via the stopCh or an error is encountered.
-// Providing a non-nil stopCh can be used to abort the attempt.
-// On success, a channel is returned that represents our slot.
-// This channel could be closed at any time due to session invalidation,
-// communication errors, operator intervention, etc. It is NOT safe to
-// assume that the slot is held until Release() unless the Session is specifically
-// created without any associated health checks. By default Consul sessions
-// prefer liveness over safety and an application must be able to handle
-// the session being lost.
-func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
-	// Hold the lock as we try to acquire
-	s.l.Lock()
-	defer s.l.Unlock()
-
-	// Check if we already hold the semaphore
-	if s.isHeld {
-		return nil, ErrSemaphoreHeld
-	}
-
-	// Check if we need to create a session first
-	s.lockSession = s.opts.Session
-	if s.lockSession == "" {
-		sess, err := s.createSession()
-		if err != nil {
-			return nil, fmt.Errorf("failed to create session: %v", err)
-		}
-
-		s.sessionRenew = make(chan struct{})
-		s.lockSession = sess
-		session := s.c.Session()
-		go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
-
-		// If we fail to acquire the lock, cleanup the session
-		defer func() {
-			if !s.isHeld {
-				close(s.sessionRenew)
-				s.sessionRenew = nil
-			}
-		}()
-	}
-
-	// Create the contender entry
-	kv := s.c.KV()
-	made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
-	if err != nil || !made {
-		return nil, fmt.Errorf("failed to make contender entry: %v", err)
-	}
-
-	// Setup the query options
-	qOpts := &QueryOptions{
-		WaitTime: s.opts.SemaphoreWaitTime,
-	}
-
-	start := time.Now()
-	attempts := 0
-WAIT:
-	// Check if we should quit
-	select {
-	case <-stopCh:
-		return nil, nil
-	default:
-	}
-
-	// Handle the one-shot mode.
-	if s.opts.SemaphoreTryOnce && attempts > 0 {
-		elapsed := time.Since(start)
-		if elapsed > s.opts.SemaphoreWaitTime {
-			return nil, nil
-		}
-
-		// Query wait time should not exceed the semaphore wait time
-		qOpts.WaitTime = s.opts.SemaphoreWaitTime - elapsed
-	}
-	attempts++
-
-	// Read the prefix
-	pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
-	if err != nil {
-		return nil, fmt.Errorf("failed to read prefix: %v", err)
-	}
-
-	// Decode the lock
-	lockPair := s.findLock(pairs)
-	if lockPair.Flags != SemaphoreFlagValue {
-		return nil, ErrSemaphoreConflict
-	}
-	lock, err := s.decodeLock(lockPair)
-	if err != nil {
-		return nil, err
-	}
-
-	// Verify we agree with the limit
-	if lock.Limit != s.opts.Limit {
-		return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
-			lock.Limit, s.opts.Limit)
-	}
-
-	// Prune the dead holders
-	s.pruneDeadHolders(lock, pairs)
-
-	// Check if the lock is held
-	if len(lock.Holders) >= lock.Limit {
-		qOpts.WaitIndex = meta.LastIndex
-		goto WAIT
-	}
-
-	// Create a new lock with us as a holder
-	lock.Holders[s.lockSession] = true
-	newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
-	if err != nil {
-		return nil, err
-	}
-
-	// Attempt the acquisition
-	didSet, _, err := kv.CAS(newLock, nil)
-	if err != nil {
-		return nil, fmt.Errorf("failed to update lock: %v", err)
-	}
-	if !didSet {
-		// Update failed, could have been a race with another contender,
-		// retry the operation
-		goto WAIT
-	}
-
-	// Watch to ensure we maintain ownership of the slot
-	lockCh := make(chan struct{})
-	go s.monitorLock(s.lockSession, lockCh)
-
-	// Set that we own the lock
-	s.isHeld = true
-
-	// Acquired! All done
-	return lockCh, nil
-}
-
-// Release is used to voluntarily give up our semaphore slot. It is
-// an error to call this if the semaphore has not been acquired.
-func (s *Semaphore) Release() error {
-	// Hold the lock as we try to release
-	s.l.Lock()
-	defer s.l.Unlock()
-
-	// Ensure the lock is actually held
-	if !s.isHeld {
-		return ErrSemaphoreNotHeld
-	}
-
-	// Set that we no longer own the lock
-	s.isHeld = false
-
-	// Stop the session renew
-	if s.sessionRenew != nil {
-		defer func() {
-			close(s.sessionRenew)
-			s.sessionRenew = nil
-		}()
-	}
-
-	// Get and clear the lock session
-	lockSession := s.lockSession
-	s.lockSession = ""
-
-	// Remove ourselves as a lock holder
-	kv := s.c.KV()
-	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
-READ:
-	pair, _, err := kv.Get(key, nil)
-	if err != nil {
-		return err
-	}
-	if pair == nil {
-		pair = &KVPair{}
-	}
-	lock, err := s.decodeLock(pair)
-	if err != nil {
-		return err
-	}
-
-	// Create a new lock without us as a holder
-	if _, ok := lock.Holders[lockSession]; ok {
-		delete(lock.Holders, lockSession)
-		newLock, err := s.encodeLock(lock, pair.ModifyIndex)
-		if err != nil {
-			return err
-		}
-
-		// Swap the locks
-		didSet, _, err := kv.CAS(newLock, nil)
-		if err != nil {
-			return fmt.Errorf("failed to update lock: %v", err)
-		}
-		if !didSet {
-			goto READ
-		}
-	}
-
-	// Destroy the contender entry
-	contenderKey := path.Join(s.opts.Prefix, lockSession)
-	if _, err := kv.Delete(contenderKey, nil); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Destroy is used to cleanup the semaphore entry. It is not necessary
-// to invoke. It will fail if the semaphore is in use.
-func (s *Semaphore) Destroy() error {
-	// Hold the lock as we try to acquire
-	s.l.Lock()
-	defer s.l.Unlock()
-
-	// Check if we already hold the semaphore
-	if s.isHeld {
-		return ErrSemaphoreHeld
-	}
-
-	// List for the semaphore
-	kv := s.c.KV()
-	pairs, _, err := kv.List(s.opts.Prefix, nil)
-	if err != nil {
-		return fmt.Errorf("failed to read prefix: %v", err)
-	}
-
-	// Find the lock pair, bail if it doesn't exist
-	lockPair := s.findLock(pairs)
-	if lockPair.ModifyIndex == 0 {
-		return nil
-	}
-	if lockPair.Flags != SemaphoreFlagValue {
-		return ErrSemaphoreConflict
-	}
-
-	// Decode the lock
-	lock, err := s.decodeLock(lockPair)
-	if err != nil {
-		return err
-	}
-
-	// Prune the dead holders
-	s.pruneDeadHolders(lock, pairs)
-
-	// Check if there are any holders
-	if len(lock.Holders) > 0 {
-		return ErrSemaphoreInUse
-	}
-
-	// Attempt the delete
-	didRemove, _, err := kv.DeleteCAS(lockPair, nil)
-	if err != nil {
-		return fmt.Errorf("failed to remove semaphore: %v", err)
-	}
-	if !didRemove {
-		return ErrSemaphoreInUse
-	}
-	return nil
-}
-
-// createSession is used to create a new managed session
-func (s *Semaphore) createSession() (string, error) {
-	session := s.c.Session()
-	se := &SessionEntry{
-		Name:     s.opts.SessionName,
-		TTL:      s.opts.SessionTTL,
-		Behavior: SessionBehaviorDelete,
-	}
-	id, _, err := session.Create(se, nil)
-	if err != nil {
-		return "", err
-	}
-	return id, nil
-}
-
-// contenderEntry returns a formatted KVPair for the contender
-func (s *Semaphore) contenderEntry(session string) *KVPair {
-	return &KVPair{
-		Key:     path.Join(s.opts.Prefix, session),
-		Value:   s.opts.Value,
-		Session: session,
-		Flags:   SemaphoreFlagValue,
-	}
-}
-
-// findLock is used to find the KV Pair which is used for coordination
-func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
-	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
-	for _, pair := range pairs {
-		if pair.Key == key {
-			return pair
-		}
-	}
-	return &KVPair{Flags: SemaphoreFlagValue}
-}
-
-// decodeLock is used to decode a semaphoreLock from an
-// entry in Consul
-func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
-	// Handle if there is no lock
-	if pair == nil || pair.Value == nil {
-		return &semaphoreLock{
-			Limit:   s.opts.Limit,
-			Holders: make(map[string]bool),
-		}, nil
-	}
-
-	l := &semaphoreLock{}
-	if err := json.Unmarshal(pair.Value, l); err != nil {
-		return nil, fmt.Errorf("lock decoding failed: %v", err)
-	}
-	return l, nil
-}
-
-// encodeLock is used to encode a semaphoreLock into a KVPair
-// that can be PUT
-func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
-	enc, err := json.Marshal(l)
-	if err != nil {
-		return nil, fmt.Errorf("lock encoding failed: %v", err)
-	}
-	pair := &KVPair{
-		Key:         path.Join(s.opts.Prefix, DefaultSemaphoreKey),
-		Value:       enc,
-		Flags:       SemaphoreFlagValue,
-		ModifyIndex: oldIndex,
-	}
-	return pair, nil
-}
-
-// pruneDeadHolders is used to remove all the dead lock holders
-func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
-	// Gather all the live holders
-	alive := make(map[string]struct{}, len(pairs))
-	for _, pair := range pairs {
-		if pair.Session != "" {
-			alive[pair.Session] = struct{}{}
-		}
-	}
-
-	// Remove any holders that are dead
-	for holder := range lock.Holders {
-		if _, ok := alive[holder]; !ok {
-			delete(lock.Holders, holder)
-		}
-	}
-}
-
-// monitorLock is a long running routine to monitor a semaphore ownership
-// It closes the stopCh if we lose our slot.
-func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
-	defer close(stopCh)
-	kv := s.c.KV()
-	opts := &QueryOptions{RequireConsistent: true}
-WAIT:
-	retries := s.opts.MonitorRetries
-RETRY:
-	pairs, meta, err := kv.List(s.opts.Prefix, opts)
-	if err != nil {
-		// If configured we can try to ride out a brief Consul unavailability
-		// by doing retries. Note that we have to attempt the retry in a non-
-		// blocking fashion so that we have a clean place to reset the retry
-		// counter if service is restored.
-		if retries > 0 && IsRetryableError(err) {
-			time.Sleep(s.opts.MonitorRetryTime)
-			retries--
-			opts.WaitIndex = 0
-			goto RETRY
-		}
-		return
-	}
-	lockPair := s.findLock(pairs)
-	lock, err := s.decodeLock(lockPair)
-	if err != nil {
-		return
-	}
-	s.pruneDeadHolders(lock, pairs)
-	if _, ok := lock.Holders[session]; ok {
-		opts.WaitIndex = meta.LastIndex
-		goto WAIT
-	}
-}
diff --git a/vendor/github.com/hashicorp/consul/api/session.go b/vendor/github.com/hashicorp/consul/api/session.go
deleted file mode 100644
index 1613f11..0000000
--- a/vendor/github.com/hashicorp/consul/api/session.go
+++ /dev/null
@@ -1,224 +0,0 @@
-package api
-
-import (
-	"errors"
-	"fmt"
-	"time"
-)
-
-const (
-	// SessionBehaviorRelease is the default behavior and causes
-	// all associated locks to be released on session invalidation.
-	SessionBehaviorRelease = "release"
-
-	// SessionBehaviorDelete is new in Consul 0.5 and changes the
-	// behavior to delete all associated locks on session invalidation.
-	// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
-	SessionBehaviorDelete = "delete"
-)
-
-var ErrSessionExpired = errors.New("session expired")
-
-// SessionEntry represents a session in consul
-type SessionEntry struct {
-	CreateIndex uint64
-	ID          string
-	Name        string
-	Node        string
-	Checks      []string
-	LockDelay   time.Duration
-	Behavior    string
-	TTL         string
-}
-
-// Session can be used to query the Session endpoints
-type Session struct {
-	c *Client
-}
-
-// Session returns a handle to the session endpoints
-func (c *Client) Session() *Session {
-	return &Session{c}
-}
-
-// CreateNoChecks is like Create but is used specifically to create
-// a session with no associated health checks.
-func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
-	body := make(map[string]interface{})
-	body["Checks"] = []string{}
-	if se != nil {
-		if se.Name != "" {
-			body["Name"] = se.Name
-		}
-		if se.Node != "" {
-			body["Node"] = se.Node
-		}
-		if se.LockDelay != 0 {
-			body["LockDelay"] = durToMsec(se.LockDelay)
-		}
-		if se.Behavior != "" {
-			body["Behavior"] = se.Behavior
-		}
-		if se.TTL != "" {
-			body["TTL"] = se.TTL
-		}
-	}
-	return s.create(body, q)
-
-}
-
-// Create makes a new session. Providing a session entry can
-// customize the session. It can also be nil to use defaults.
-func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
-	var obj interface{}
-	if se != nil {
-		body := make(map[string]interface{})
-		obj = body
-		if se.Name != "" {
-			body["Name"] = se.Name
-		}
-		if se.Node != "" {
-			body["Node"] = se.Node
-		}
-		if se.LockDelay != 0 {
-			body["LockDelay"] = durToMsec(se.LockDelay)
-		}
-		if len(se.Checks) > 0 {
-			body["Checks"] = se.Checks
-		}
-		if se.Behavior != "" {
-			body["Behavior"] = se.Behavior
-		}
-		if se.TTL != "" {
-			body["TTL"] = se.TTL
-		}
-	}
-	return s.create(obj, q)
-}
-
-func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
-	var out struct{ ID string }
-	wm, err := s.c.write("/v1/session/create", obj, &out, q)
-	if err != nil {
-		return "", nil, err
-	}
-	return out.ID, wm, nil
-}
-
-// Destroy invalidates a given session
-func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
-	wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
-	if err != nil {
-		return nil, err
-	}
-	return wm, nil
-}
-
-// Renew renews the TTL on a given session
-func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
-	r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
-	r.setWriteOptions(q)
-	rtt, resp, err := s.c.doRequest(r)
-	if err != nil {
-		return nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	wm := &WriteMeta{RequestTime: rtt}
-
-	if resp.StatusCode == 404 {
-		return nil, wm, nil
-	} else if resp.StatusCode != 200 {
-		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
-	}
-
-	var entries []*SessionEntry
-	if err := decodeBody(resp, &entries); err != nil {
-		return nil, nil, fmt.Errorf("Failed to read response: %v", err)
-	}
-	if len(entries) > 0 {
-		return entries[0], wm, nil
-	}
-	return nil, wm, nil
-}
-
-// RenewPeriodic is used to periodically invoke Session.Renew on a
-// session until a doneCh is closed. This is meant to be used in a long running
-// goroutine to ensure a session stays valid.
-func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh <-chan struct{}) error {
-	ctx := q.Context()
-
-	ttl, err := time.ParseDuration(initialTTL)
-	if err != nil {
-		return err
-	}
-
-	waitDur := ttl / 2
-	lastRenewTime := time.Now()
-	var lastErr error
-	for {
-		if time.Since(lastRenewTime) > ttl {
-			return lastErr
-		}
-		select {
-		case <-time.After(waitDur):
-			entry, _, err := s.Renew(id, q)
-			if err != nil {
-				waitDur = time.Second
-				lastErr = err
-				continue
-			}
-			if entry == nil {
-				return ErrSessionExpired
-			}
-
-			// Handle the server updating the TTL
-			ttl, _ = time.ParseDuration(entry.TTL)
-			waitDur = ttl / 2
-			lastRenewTime = time.Now()
-
-		case <-doneCh:
-			// Attempt a session destroy
-			s.Destroy(id, q)
-			return nil
-
-		case <-ctx.Done():
-			// Bail immediately since attempting the destroy would
-			// use the canceled context in q, which would just bail.
-			return ctx.Err()
-		}
-	}
-}
-
-// Info looks up a single session
-func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
-	var entries []*SessionEntry
-	qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	if len(entries) > 0 {
-		return entries[0], qm, nil
-	}
-	return nil, qm, nil
-}
-
-// List gets sessions for a node
-func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
-	var entries []*SessionEntry
-	qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
-
-// List gets all active sessions
-func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
-	var entries []*SessionEntry
-	qm, err := s.c.query("/v1/session/list", &entries, q)
-	if err != nil {
-		return nil, nil, err
-	}
-	return entries, qm, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/snapshot.go b/vendor/github.com/hashicorp/consul/api/snapshot.go
deleted file mode 100644
index e902377..0000000
--- a/vendor/github.com/hashicorp/consul/api/snapshot.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package api
-
-import (
-	"io"
-)
-
-// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
-// Consul's internal state and restore snapshots for disaster recovery.
-type Snapshot struct {
-	c *Client
-}
-
-// Snapshot returns a handle that exposes the snapshot endpoints.
-func (c *Client) Snapshot() *Snapshot {
-	return &Snapshot{c}
-}
-
-// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
-// data to save. If this doesn't return an error, then it's the responsibility
-// of the caller to close it. Only a subset of the QueryOptions are supported:
-// Datacenter, AllowStale, and Token.
-func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
-	r := s.c.newRequest("GET", "/v1/snapshot")
-	r.setQueryOptions(q)
-
-	rtt, resp, err := requireOK(s.c.doRequest(r))
-	if err != nil {
-		return nil, nil, err
-	}
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-	return resp.Body, qm, nil
-}
-
-// Restore streams in an existing snapshot and attempts to restore it.
-func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
-	r := s.c.newRequest("PUT", "/v1/snapshot")
-	r.body = in
-	r.setWriteOptions(q)
-	_, _, err := requireOK(s.c.doRequest(r))
-	if err != nil {
-		return err
-	}
-	return nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go
deleted file mode 100644
index 74ef61a..0000000
--- a/vendor/github.com/hashicorp/consul/api/status.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package api
-
-// Status can be used to query the Status endpoints
-type Status struct {
-	c *Client
-}
-
-// Status returns a handle to the status endpoints
-func (c *Client) Status() *Status {
-	return &Status{c}
-}
-
-// Leader is used to query for a known leader
-func (s *Status) Leader() (string, error) {
-	r := s.c.newRequest("GET", "/v1/status/leader")
-	_, resp, err := requireOK(s.c.doRequest(r))
-	if err != nil {
-		return "", err
-	}
-	defer resp.Body.Close()
-
-	var leader string
-	if err := decodeBody(resp, &leader); err != nil {
-		return "", err
-	}
-	return leader, nil
-}
-
-// Peers is used to query for a known raft peers
-func (s *Status) Peers() ([]string, error) {
-	r := s.c.newRequest("GET", "/v1/status/peers")
-	_, resp, err := requireOK(s.c.doRequest(r))
-	if err != nil {
-		return nil, err
-	}
-	defer resp.Body.Close()
-
-	var peers []string
-	if err := decodeBody(resp, &peers); err != nil {
-		return nil, err
-	}
-	return peers, nil
-}
diff --git a/vendor/github.com/hashicorp/consul/api/txn.go b/vendor/github.com/hashicorp/consul/api/txn.go
deleted file mode 100644
index 65d7a16..0000000
--- a/vendor/github.com/hashicorp/consul/api/txn.go
+++ /dev/null
@@ -1,230 +0,0 @@
-package api
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"net/http"
-)
-
-// Txn is used to manipulate the Txn API
-type Txn struct {
-	c *Client
-}
-
-// Txn is used to return a handle to the K/V apis
-func (c *Client) Txn() *Txn {
-	return &Txn{c}
-}
-
-// TxnOp is the internal format we send to Consul. Currently only K/V and
-// check operations are supported.
-type TxnOp struct {
-	KV      *KVTxnOp
-	Node    *NodeTxnOp
-	Service *ServiceTxnOp
-	Check   *CheckTxnOp
-}
-
-// TxnOps is a list of transaction operations.
-type TxnOps []*TxnOp
-
-// TxnResult is the internal format we receive from Consul.
-type TxnResult struct {
-	KV      *KVPair
-	Node    *Node
-	Service *CatalogService
-	Check   *HealthCheck
-}
-
-// TxnResults is a list of TxnResult objects.
-type TxnResults []*TxnResult
-
-// TxnError is used to return information about an operation in a transaction.
-type TxnError struct {
-	OpIndex int
-	What    string
-}
-
-// TxnErrors is a list of TxnError objects.
-type TxnErrors []*TxnError
-
-// TxnResponse is the internal format we receive from Consul.
-type TxnResponse struct {
-	Results TxnResults
-	Errors  TxnErrors
-}
-
-// KVOp constants give possible operations available in a transaction.
-type KVOp string
-
-const (
-	KVSet            KVOp = "set"
-	KVDelete         KVOp = "delete"
-	KVDeleteCAS      KVOp = "delete-cas"
-	KVDeleteTree     KVOp = "delete-tree"
-	KVCAS            KVOp = "cas"
-	KVLock           KVOp = "lock"
-	KVUnlock         KVOp = "unlock"
-	KVGet            KVOp = "get"
-	KVGetTree        KVOp = "get-tree"
-	KVCheckSession   KVOp = "check-session"
-	KVCheckIndex     KVOp = "check-index"
-	KVCheckNotExists KVOp = "check-not-exists"
-)
-
-// KVTxnOp defines a single operation inside a transaction.
-type KVTxnOp struct {
-	Verb    KVOp
-	Key     string
-	Value   []byte
-	Flags   uint64
-	Index   uint64
-	Session string
-}
-
-// KVTxnOps defines a set of operations to be performed inside a single
-// transaction.
-type KVTxnOps []*KVTxnOp
-
-// KVTxnResponse has the outcome of a transaction.
-type KVTxnResponse struct {
-	Results []*KVPair
-	Errors  TxnErrors
-}
-
-// NodeOp constants give possible operations available in a transaction.
-type NodeOp string
-
-const (
-	NodeGet       NodeOp = "get"
-	NodeSet       NodeOp = "set"
-	NodeCAS       NodeOp = "cas"
-	NodeDelete    NodeOp = "delete"
-	NodeDeleteCAS NodeOp = "delete-cas"
-)
-
-// NodeTxnOp defines a single operation inside a transaction.
-type NodeTxnOp struct {
-	Verb NodeOp
-	Node Node
-}
-
-// ServiceOp constants give possible operations available in a transaction.
-type ServiceOp string
-
-const (
-	ServiceGet       ServiceOp = "get"
-	ServiceSet       ServiceOp = "set"
-	ServiceCAS       ServiceOp = "cas"
-	ServiceDelete    ServiceOp = "delete"
-	ServiceDeleteCAS ServiceOp = "delete-cas"
-)
-
-// ServiceTxnOp defines a single operation inside a transaction.
-type ServiceTxnOp struct {
-	Verb    ServiceOp
-	Node    string
-	Service AgentService
-}
-
-// CheckOp constants give possible operations available in a transaction.
-type CheckOp string
-
-const (
-	CheckGet       CheckOp = "get"
-	CheckSet       CheckOp = "set"
-	CheckCAS       CheckOp = "cas"
-	CheckDelete    CheckOp = "delete"
-	CheckDeleteCAS CheckOp = "delete-cas"
-)
-
-// CheckTxnOp defines a single operation inside a transaction.
-type CheckTxnOp struct {
-	Verb  CheckOp
-	Check HealthCheck
-}
-
-// Txn is used to apply multiple Consul operations in a single, atomic transaction.
-//
-// Note that Go will perform the required base64 encoding on the values
-// automatically because the type is a byte slice. Transactions are defined as a
-// list of operations to perform, using the different fields in the TxnOp structure
-// to define operations. If any operation fails, none of the changes are applied
-// to the state store.
-//
-// Even though this is generally a write operation, we take a QueryOptions input
-// and return a QueryMeta output. If the transaction contains only read ops, then
-// Consul will fast-path it to a different endpoint internally which supports
-// consistency controls, but not blocking. If there are write operations then
-// the request will always be routed through raft and any consistency settings
-// will be ignored.
-//
-// Here's an example:
-//
-//	   ops := KVTxnOps{
-//		   &KVTxnOp{
-//			   Verb:    KVLock,
-//			   Key:     "test/lock",
-//			   Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
-//			   Value:   []byte("hello"),
-//		   },
-//		   &KVTxnOp{
-//			   Verb:    KVGet,
-//			   Key:     "another/key",
-//		   },
-//		   &CheckTxnOp{
-//			   Verb:        CheckSet,
-//			   HealthCheck: HealthCheck{
-//				   Node:    "foo",
-//				   CheckID: "redis:a",
-//				   Name:    "Redis Health Check",
-//				   Status:  "passing",
-//			   },
-//		   }
-//	   }
-//	   ok, response, _, err := kv.Txn(&ops, nil)
-//
-// If there is a problem making the transaction request then an error will be
-// returned. Otherwise, the ok value will be true if the transaction succeeded
-// or false if it was rolled back. The response is a structured return value which
-// will have the outcome of the transaction. Its Results member will have entries
-// for each operation. For KV operations, Deleted keys will have a nil entry in the
-// results, and to save space, the Value of each key in the Results will be nil
-// unless the operation is a KVGet. If the transaction was rolled back, the Errors
-// member will have entries referencing the index of the operation that failed
-// along with an error message.
-func (t *Txn) Txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
-	return t.c.txn(txn, q)
-}
-
-func (c *Client) txn(txn TxnOps, q *QueryOptions) (bool, *TxnResponse, *QueryMeta, error) {
-	r := c.newRequest("PUT", "/v1/txn")
-	r.setQueryOptions(q)
-
-	r.obj = txn
-	rtt, resp, err := c.doRequest(r)
-	if err != nil {
-		return false, nil, nil, err
-	}
-	defer resp.Body.Close()
-
-	qm := &QueryMeta{}
-	parseQueryMeta(resp, qm)
-	qm.RequestTime = rtt
-
-	if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
-		var txnResp TxnResponse
-		if err := decodeBody(resp, &txnResp); err != nil {
-			return false, nil, nil, err
-		}
-
-		return resp.StatusCode == http.StatusOK, &txnResp, qm, nil
-	}
-
-	var buf bytes.Buffer
-	if _, err := io.Copy(&buf, resp.Body); err != nil {
-		return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
-	}
-	return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
-}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
deleted file mode 100644
index e87a115..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
+++ /dev/null
@@ -1,363 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
-     means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. "Incompatible With Secondary Licenses"
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of
-        version 1.1 or earlier of the License, but not also under the terms of
-        a Secondary License.
-
-1.6. "Executable Form"
-
-     means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
-     means a work that combines Covered Software with other material, in a
-     separate file or files, that is not Covered Software.
-
-1.8. "License"
-
-     means this document.
-
-1.9. "Licensable"
-
-     means having the right to grant, to the maximum extent possible, whether
-     at the time of the initial grant or subsequently, any and all of the
-     rights conveyed by this License.
-
-1.10. "Modifications"
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to,
-        deletion from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
-      means any patent claim(s), including without limitation, method,
-      process, and apparatus claims, in any patent Licensable by such
-      Contributor that would be infringed, but for the grant of the License,
-      by the making, using, selling, offering for sale, having made, import,
-      or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
-      means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, "You" includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, "control" means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or
-        as part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its
-        Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution
-     become effective for each Contribution on the date the Contributor first
-     distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under
-     this License. No additional rights or licenses will be implied from the
-     distribution or licensing of Covered Software under this License.
-     Notwithstanding Section 2.1(b) above, no patent license is granted by a
-     Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party's
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of
-        its Contributions.
-
-     This License does not grant any rights in the trademarks, service marks,
-     or logos of any Contributor (except as may be necessary to comply with
-     the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this
-     License (see Section 10.2) or under the terms of a Secondary License (if
-     permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its
-     Contributions are its original creation(s) or it has sufficient rights to
-     grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under
-     applicable copyright doctrines of fair use, fair dealing, or other
-     equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under
-     the terms of this License. You must inform recipients that the Source
-     Code Form of the Covered Software is governed by the terms of this
-     License, and how they can obtain a copy of this License. You may not
-     attempt to alter or restrict the recipients' rights in the Source Code
-     Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this
-        License, or sublicense it under different terms, provided that the
-        license for the Executable Form does not attempt to limit or alter the
-        recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for
-     the Covered Software. If the Larger Work is a combination of Covered
-     Software with a work governed by one or more Secondary Licenses, and the
-     Covered Software is not Incompatible With Secondary Licenses, this
-     License permits You to additionally distribute such Covered Software
-     under the terms of such Secondary License(s), so that the recipient of
-     the Larger Work may, at their option, further distribute the Covered
-     Software under the terms of either this License or such Secondary
-     License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices
-     (including copyright notices, patent notices, disclaimers of warranty, or
-     limitations of liability) contained within the Source Code Form of the
-     Covered Software, except that You may alter any license notices to the
-     extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on
-     behalf of any Contributor. You must make it absolutely clear that any
-     such warranty, support, indemnity, or liability obligation is offered by
-     You alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute,
-   judicial order, or regulation then You must: (a) comply with the terms of
-   this License to the maximum extent possible; and (b) describe the
-   limitations and the code they affect. Such description must be placed in a
-   text file included with all distributions of the Covered Software under
-   this License. Except to the extent prohibited by statute or regulation,
-   such description must be sufficiently detailed for a recipient of ordinary
-   skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing
-     basis, if such Contributor fails to notify You of the non-compliance by
-     some reasonable means prior to 60 days after You have come back into
-     compliance. Moreover, Your grants from a particular Contributor are
-     reinstated on an ongoing basis if such Contributor notifies You of the
-     non-compliance by some reasonable means, this is the first time You have
-     received notice of non-compliance with this License from such
-     Contributor, and You become compliant prior to 30 days after Your receipt
-     of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions,
-     counter-claims, and cross-claims) alleging that a Contributor Version
-     directly or indirectly infringes any patent, then the rights granted to
-     You by any and all Contributors for the Covered Software under Section
-     2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an "as is" basis,
-   without warranty of any kind, either expressed, implied, or statutory,
-   including, without limitation, warranties that the Covered Software is free
-   of defects, merchantable, fit for a particular purpose or non-infringing.
-   The entire risk as to the quality and performance of the Covered Software
-   is with You. Should any Covered Software prove defective in any respect,
-   You (not any Contributor) assume the cost of any necessary servicing,
-   repair, or correction. This disclaimer of warranty constitutes an essential
-   part of this License. No use of  any Covered Software is authorized under
-   this License except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from
-   such party's negligence to the extent applicable law prohibits such
-   limitation. Some jurisdictions do not allow the exclusion or limitation of
-   incidental or consequential damages, so this exclusion and limitation may
-   not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts
-   of a jurisdiction where the defendant maintains its principal place of
-   business and such litigation shall be governed by laws of that
-   jurisdiction, without reference to its conflict-of-law provisions. Nothing
-   in this Section shall prevent a party's ability to bring cross-claims or
-   counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject
-   matter hereof. If any provision of this License is held to be
-   unenforceable, such provision shall be reformed only to the extent
-   necessary to make it enforceable. Any law or regulation which provides that
-   the language of a contract shall be construed against the drafter shall not
-   be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version
-      of the License under which You originally received the Covered Software,
-      or under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a
-      modified version of this License if you rename the license and remove
-      any references to the name of the license steward (except to note that
-      such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-      Licenses If You choose to distribute Source Code Form that is
-      Incompatible With Secondary Licenses under the terms of this version of
-      the License, the notice described in Exhibit B of this License must be
-      attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-      This Source Code Form is "Incompatible
-      With Secondary Licenses", as defined by
-      the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/README.md b/vendor/github.com/hashicorp/go-cleanhttp/README.md
deleted file mode 100644
index 036e531..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/README.md
+++ /dev/null
@@ -1,30 +0,0 @@
-# cleanhttp
-
-Functions for accessing "clean" Go http.Client values
-
--------------
-
-The Go standard library contains a default `http.Client` called
-`http.DefaultClient`. It is a common idiom in Go code to start with
-`http.DefaultClient` and tweak it as necessary, and in fact, this is
-encouraged; from the `http` package documentation:
-
-> The Client's Transport typically has internal state (cached TCP connections),
-so Clients should be reused instead of created as needed. Clients are safe for
-concurrent use by multiple goroutines.
-
-Unfortunately, this is a shared value, and it is not uncommon for libraries to
-assume that they are free to modify it at will. With enough dependencies, it
-can be very easy to encounter strange problems and race conditions due to
-manipulation of this shared value across libraries and goroutines (clients are
-safe for concurrent use, but writing values to the client struct itself is not
-protected).
-
-Making things worse is the fact that a bare `http.Client` will use a default
-`http.Transport` called `http.DefaultTransport`, which is another global value
-that behaves the same way. So it is not simply enough to replace
-`http.DefaultClient` with `&http.Client{}`.
-
-This repository provides some simple functions to get a "clean" `http.Client`
--- one that uses the same default values as the Go standard library, but
-returns a client that does not share any state with other clients.
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
deleted file mode 100644
index 8d306bf..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
+++ /dev/null
@@ -1,57 +0,0 @@
-package cleanhttp
-
-import (
-	"net"
-	"net/http"
-	"runtime"
-	"time"
-)
-
-// DefaultTransport returns a new http.Transport with similar default values to
-// http.DefaultTransport, but with idle connections and keepalives disabled.
-func DefaultTransport() *http.Transport {
-	transport := DefaultPooledTransport()
-	transport.DisableKeepAlives = true
-	transport.MaxIdleConnsPerHost = -1
-	return transport
-}
-
-// DefaultPooledTransport returns a new http.Transport with similar default
-// values to http.DefaultTransport. Do not use this for transient transports as
-// it can leak file descriptors over time. Only use this for transports that
-// will be re-used for the same host(s).
-func DefaultPooledTransport() *http.Transport {
-	transport := &http.Transport{
-		Proxy: http.ProxyFromEnvironment,
-		DialContext: (&net.Dialer{
-			Timeout:   30 * time.Second,
-			KeepAlive: 30 * time.Second,
-			DualStack: true,
-		}).DialContext,
-		MaxIdleConns:          100,
-		IdleConnTimeout:       90 * time.Second,
-		TLSHandshakeTimeout:   10 * time.Second,
-		ExpectContinueTimeout: 1 * time.Second,
-		MaxIdleConnsPerHost:   runtime.GOMAXPROCS(0) + 1,
-	}
-	return transport
-}
-
-// DefaultClient returns a new http.Client with similar default values to
-// http.Client, but with a non-shared Transport, idle connections disabled, and
-// keepalives disabled.
-func DefaultClient() *http.Client {
-	return &http.Client{
-		Transport: DefaultTransport(),
-	}
-}
-
-// DefaultPooledClient returns a new http.Client with similar default values to
-// http.Client, but with a shared Transport. Do not use this function for
-// transient clients as it can leak file descriptors over time. Only use this
-// for clients that will be re-used for the same host(s).
-func DefaultPooledClient() *http.Client {
-	return &http.Client{
-		Transport: DefaultPooledTransport(),
-	}
-}
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/vendor/github.com/hashicorp/go-cleanhttp/doc.go
deleted file mode 100644
index 0584109..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/doc.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// Package cleanhttp offers convenience utilities for acquiring "clean"
-// http.Transport and http.Client structs.
-//
-// Values set on http.DefaultClient and http.DefaultTransport affect all
-// callers. This can have detrimental effects, esepcially in TLS contexts,
-// where client or root certificates set to talk to multiple endpoints can end
-// up displacing each other, leading to hard-to-debug issues. This package
-// provides non-shared http.Client and http.Transport structs to ensure that
-// the configuration will not be overwritten by other parts of the application
-// or dependencies.
-//
-// The DefaultClient and DefaultTransport functions disable idle connections
-// and keepalives. Without ensuring that idle connections are closed before
-// garbage collection, short-term clients/transports can leak file descriptors,
-// eventually leading to "too many open files" errors. If you will be
-// connecting to the same hosts repeatedly from the same client, you can use
-// DefaultPooledClient to receive a client that has connection pooling
-// semantics similar to http.DefaultClient.
-//
-package cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/go.mod b/vendor/github.com/hashicorp/go-cleanhttp/go.mod
deleted file mode 100644
index 310f075..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/hashicorp/go-cleanhttp
diff --git a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go b/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
deleted file mode 100644
index 3c845dc..0000000
--- a/vendor/github.com/hashicorp/go-cleanhttp/handlers.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package cleanhttp
-
-import (
-	"net/http"
-	"strings"
-	"unicode"
-)
-
-// HandlerInput provides input options to cleanhttp's handlers
-type HandlerInput struct {
-	ErrStatus int
-}
-
-// PrintablePathCheckHandler is a middleware that ensures the request path
-// contains only printable runes.
-func PrintablePathCheckHandler(next http.Handler, input *HandlerInput) http.Handler {
-	// Nil-check on input to make it optional
-	if input == nil {
-		input = &HandlerInput{
-			ErrStatus: http.StatusBadRequest,
-		}
-	}
-
-	// Default to http.StatusBadRequest on error
-	if input.ErrStatus == 0 {
-		input.ErrStatus = http.StatusBadRequest
-	}
-
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		if r != nil {
-			// Check URL path for non-printable characters
-			idx := strings.IndexFunc(r.URL.Path, func(c rune) bool {
-				return !unicode.IsPrint(c)
-			})
-
-			if idx != -1 {
-				w.WriteHeader(input.ErrStatus)
-				return
-			}
-
-			if next != nil {
-				next.ServeHTTP(w, r)
-			}
-		}
-
-		return
-	})
-}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore b/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
deleted file mode 100644
index daf913b..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
deleted file mode 100644
index 1a0bbea..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-language: go
-go:
-  - tip
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
deleted file mode 100644
index dd7c0ef..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md
+++ /dev/null
@@ -1,9 +0,0 @@
-# 1.1.0 (May 22nd, 2019)
-
-FEATURES
-
-* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)]
-
-# 1.0.0 (August 30th, 2018)
-
-* go mod adopted
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE b/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
deleted file mode 100644
index e87a115..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/LICENSE
+++ /dev/null
@@ -1,363 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
-     means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. "Incompatible With Secondary Licenses"
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of
-        version 1.1 or earlier of the License, but not also under the terms of
-        a Secondary License.
-
-1.6. "Executable Form"
-
-     means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
-     means a work that combines Covered Software with other material, in a
-     separate file or files, that is not Covered Software.
-
-1.8. "License"
-
-     means this document.
-
-1.9. "Licensable"
-
-     means having the right to grant, to the maximum extent possible, whether
-     at the time of the initial grant or subsequently, any and all of the
-     rights conveyed by this License.
-
-1.10. "Modifications"
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to,
-        deletion from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
-      means any patent claim(s), including without limitation, method,
-      process, and apparatus claims, in any patent Licensable by such
-      Contributor that would be infringed, but for the grant of the License,
-      by the making, using, selling, offering for sale, having made, import,
-      or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
-      means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, "You" includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, "control" means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or
-        as part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its
-        Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution
-     become effective for each Contribution on the date the Contributor first
-     distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under
-     this License. No additional rights or licenses will be implied from the
-     distribution or licensing of Covered Software under this License.
-     Notwithstanding Section 2.1(b) above, no patent license is granted by a
-     Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party's
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of
-        its Contributions.
-
-     This License does not grant any rights in the trademarks, service marks,
-     or logos of any Contributor (except as may be necessary to comply with
-     the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this
-     License (see Section 10.2) or under the terms of a Secondary License (if
-     permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its
-     Contributions are its original creation(s) or it has sufficient rights to
-     grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under
-     applicable copyright doctrines of fair use, fair dealing, or other
-     equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under
-     the terms of this License. You must inform recipients that the Source
-     Code Form of the Covered Software is governed by the terms of this
-     License, and how they can obtain a copy of this License. You may not
-     attempt to alter or restrict the recipients' rights in the Source Code
-     Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this
-        License, or sublicense it under different terms, provided that the
-        license for the Executable Form does not attempt to limit or alter the
-        recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for
-     the Covered Software. If the Larger Work is a combination of Covered
-     Software with a work governed by one or more Secondary Licenses, and the
-     Covered Software is not Incompatible With Secondary Licenses, this
-     License permits You to additionally distribute such Covered Software
-     under the terms of such Secondary License(s), so that the recipient of
-     the Larger Work may, at their option, further distribute the Covered
-     Software under the terms of either this License or such Secondary
-     License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices
-     (including copyright notices, patent notices, disclaimers of warranty, or
-     limitations of liability) contained within the Source Code Form of the
-     Covered Software, except that You may alter any license notices to the
-     extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on
-     behalf of any Contributor. You must make it absolutely clear that any
-     such warranty, support, indemnity, or liability obligation is offered by
-     You alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute,
-   judicial order, or regulation then You must: (a) comply with the terms of
-   this License to the maximum extent possible; and (b) describe the
-   limitations and the code they affect. Such description must be placed in a
-   text file included with all distributions of the Covered Software under
-   this License. Except to the extent prohibited by statute or regulation,
-   such description must be sufficiently detailed for a recipient of ordinary
-   skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing
-     basis, if such Contributor fails to notify You of the non-compliance by
-     some reasonable means prior to 60 days after You have come back into
-     compliance. Moreover, Your grants from a particular Contributor are
-     reinstated on an ongoing basis if such Contributor notifies You of the
-     non-compliance by some reasonable means, this is the first time You have
-     received notice of non-compliance with this License from such
-     Contributor, and You become compliant prior to 30 days after Your receipt
-     of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions,
-     counter-claims, and cross-claims) alleging that a Contributor Version
-     directly or indirectly infringes any patent, then the rights granted to
-     You by any and all Contributors for the Covered Software under Section
-     2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an "as is" basis,
-   without warranty of any kind, either expressed, implied, or statutory,
-   including, without limitation, warranties that the Covered Software is free
-   of defects, merchantable, fit for a particular purpose or non-infringing.
-   The entire risk as to the quality and performance of the Covered Software
-   is with You. Should any Covered Software prove defective in any respect,
-   You (not any Contributor) assume the cost of any necessary servicing,
-   repair, or correction. This disclaimer of warranty constitutes an essential
-   part of this License. No use of  any Covered Software is authorized under
-   this License except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from
-   such party's negligence to the extent applicable law prohibits such
-   limitation. Some jurisdictions do not allow the exclusion or limitation of
-   incidental or consequential damages, so this exclusion and limitation may
-   not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts
-   of a jurisdiction where the defendant maintains its principal place of
-   business and such litigation shall be governed by laws of that
-   jurisdiction, without reference to its conflict-of-law provisions. Nothing
-   in this Section shall prevent a party's ability to bring cross-claims or
-   counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject
-   matter hereof. If any provision of this License is held to be
-   unenforceable, such provision shall be reformed only to the extent
-   necessary to make it enforceable. Any law or regulation which provides that
-   the language of a contract shall be construed against the drafter shall not
-   be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version
-      of the License under which You originally received the Covered Software,
-      or under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a
-      modified version of this License if you rename the license and remove
-      any references to the name of the license steward (except to note that
-      such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-      Licenses If You choose to distribute Source Code Form that is
-      Incompatible With Secondary Licenses under the terms of this version of
-      the License, the notice described in Exhibit B of this License must be
-      attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-      This Source Code Form is "Incompatible
-      With Secondary Licenses", as defined by
-      the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md
deleted file mode 100644
index 4b6338b..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/README.md
+++ /dev/null
@@ -1,66 +0,0 @@
-go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix)
-=========
-
-Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree).
-The package only provides a single `Tree` implementation, optimized for sparse nodes.
-
-As a radix tree, it provides the following:
- * O(k) operations. In many cases, this can be faster than a hash table since
-   the hash function is an O(k) operation, and hash tables have very poor cache locality.
- * Minimum / Maximum value lookups
- * Ordered iteration
-
-A tree supports using a transaction to batch multiple updates (insert, delete)
-in a more efficient manner than performing each operation one at a time.
-
-For a mutable variant, see [go-radix](https://github.com/armon/go-radix).
-
-Documentation
-=============
-
-The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/go-immutable-radix).
-
-Example
-=======
-
-Below is a simple example of usage
-
-```go
-// Create a tree
-r := iradix.New()
-r, _, _ = r.Insert([]byte("foo"), 1)
-r, _, _ = r.Insert([]byte("bar"), 2)
-r, _, _ = r.Insert([]byte("foobar"), 2)
-
-// Find the longest prefix match
-m, _, _ := r.Root().LongestPrefix([]byte("foozip"))
-if string(m) != "foo" {
-    panic("should be foo")
-}
-```
-
-Here is an example of performing a range scan of the keys.
-
-```go
-// Create a tree
-r := iradix.New()
-r, _, _ = r.Insert([]byte("001"), 1)
-r, _, _ = r.Insert([]byte("002"), 2)
-r, _, _ = r.Insert([]byte("005"), 5)
-r, _, _ = r.Insert([]byte("010"), 10)
-r, _, _ = r.Insert([]byte("100"), 10)
-
-// Range scan over the keys that sort lexicographically between [003, 050)
-it := r.Root().Iterator()
-it.SeekLowerBound([]byte("003"))
-for key, _, ok := it.Next(); ok; key, _, ok = it.Next() {
-  if key >= "050" {
-      break
-  }
-  fmt.Println(key)
-}
-// Output:
-//  005
-//  010
-```
-
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/edges.go b/vendor/github.com/hashicorp/go-immutable-radix/edges.go
deleted file mode 100644
index a636747..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/edges.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package iradix
-
-import "sort"
-
-type edges []edge
-
-func (e edges) Len() int {
-	return len(e)
-}
-
-func (e edges) Less(i, j int) bool {
-	return e[i].label < e[j].label
-}
-
-func (e edges) Swap(i, j int) {
-	e[i], e[j] = e[j], e[i]
-}
-
-func (e edges) Sort() {
-	sort.Sort(e)
-}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.mod b/vendor/github.com/hashicorp/go-immutable-radix/go.mod
deleted file mode 100644
index 27e7b7c..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/go.mod
+++ /dev/null
@@ -1,6 +0,0 @@
-module github.com/hashicorp/go-immutable-radix
-
-require (
-	github.com/hashicorp/go-uuid v1.0.0
-	github.com/hashicorp/golang-lru v0.5.0
-)
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/go.sum b/vendor/github.com/hashicorp/go-immutable-radix/go.sum
deleted file mode 100644
index 7de5dfc..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/go.sum
+++ /dev/null
@@ -1,4 +0,0 @@
-github.com/hashicorp/go-uuid v1.0.0 h1:RS8zrF7PhGwyNPOtxSClXXj9HA8feRnJzgnI1RJCSnM=
-github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
-github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
deleted file mode 100644
index e5e6e57..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go
+++ /dev/null
@@ -1,662 +0,0 @@
-package iradix
-
-import (
-	"bytes"
-	"strings"
-
-	"github.com/hashicorp/golang-lru/simplelru"
-)
-
-const (
-	// defaultModifiedCache is the default size of the modified node
-	// cache used per transaction. This is used to cache the updates
-	// to the nodes near the root, while the leaves do not need to be
-	// cached. This is important for very large transactions to prevent
-	// the modified cache from growing to be enormous. This is also used
-	// to set the max size of the mutation notify maps since those should
-	// also be bounded in a similar way.
-	defaultModifiedCache = 8192
-)
-
-// Tree implements an immutable radix tree. This can be treated as a
-// Dictionary abstract data type. The main advantage over a standard
-// hash map is prefix-based lookups and ordered iteration. The immutability
-// means that it is safe to concurrently read from a Tree without any
-// coordination.
-type Tree struct {
-	root *Node
-	size int
-}
-
-// New returns an empty Tree
-func New() *Tree {
-	t := &Tree{
-		root: &Node{
-			mutateCh: make(chan struct{}),
-		},
-	}
-	return t
-}
-
-// Len is used to return the number of elements in the tree
-func (t *Tree) Len() int {
-	return t.size
-}
-
-// Txn is a transaction on the tree. This transaction is applied
-// atomically and returns a new tree when committed. A transaction
-// is not thread safe, and should only be used by a single goroutine.
-type Txn struct {
-	// root is the modified root for the transaction.
-	root *Node
-
-	// snap is a snapshot of the root node for use if we have to run the
-	// slow notify algorithm.
-	snap *Node
-
-	// size tracks the size of the tree as it is modified during the
-	// transaction.
-	size int
-
-	// writable is a cache of writable nodes that have been created during
-	// the course of the transaction. This allows us to re-use the same
-	// nodes for further writes and avoid unnecessary copies of nodes that
-	// have never been exposed outside the transaction. This will only hold
-	// up to defaultModifiedCache number of entries.
-	writable *simplelru.LRU
-
-	// trackChannels is used to hold channels that need to be notified to
-	// signal mutation of the tree. This will only hold up to
-	// defaultModifiedCache number of entries, after which we will set the
-	// trackOverflow flag, which will cause us to use a more expensive
-	// algorithm to perform the notifications. Mutation tracking is only
-	// performed if trackMutate is true.
-	trackChannels map[chan struct{}]struct{}
-	trackOverflow bool
-	trackMutate   bool
-}
-
-// Txn starts a new transaction that can be used to mutate the tree
-func (t *Tree) Txn() *Txn {
-	txn := &Txn{
-		root: t.root,
-		snap: t.root,
-		size: t.size,
-	}
-	return txn
-}
-
-// TrackMutate can be used to toggle if mutations are tracked. If this is enabled
-// then notifications will be issued for affected internal nodes and leaves when
-// the transaction is committed.
-func (t *Txn) TrackMutate(track bool) {
-	t.trackMutate = track
-}
-
-// trackChannel safely attempts to track the given mutation channel, setting the
-// overflow flag if we can no longer track any more. This limits the amount of
-// state that will accumulate during a transaction and we have a slower algorithm
-// to switch to if we overflow.
-func (t *Txn) trackChannel(ch chan struct{}) {
-	// In overflow, make sure we don't store any more objects.
-	if t.trackOverflow {
-		return
-	}
-
-	// If this would overflow the state we reject it and set the flag (since
-	// we aren't tracking everything that's required any longer).
-	if len(t.trackChannels) >= defaultModifiedCache {
-		// Mark that we are in the overflow state
-		t.trackOverflow = true
-
-		// Clear the map so that the channels can be garbage collected. It is
-		// safe to do this since we have already overflowed and will be using
-		// the slow notify algorithm.
-		t.trackChannels = nil
-		return
-	}
-
-	// Create the map on the fly when we need it.
-	if t.trackChannels == nil {
-		t.trackChannels = make(map[chan struct{}]struct{})
-	}
-
-	// Otherwise we are good to track it.
-	t.trackChannels[ch] = struct{}{}
-}
-
-// writeNode returns a node to be modified, if the current node has already been
-// modified during the course of the transaction, it is used in-place. Set
-// forLeafUpdate to true if you are getting a write node to update the leaf,
-// which will set leaf mutation tracking appropriately as well.
-func (t *Txn) writeNode(n *Node, forLeafUpdate bool) *Node {
-	// Ensure the writable set exists.
-	if t.writable == nil {
-		lru, err := simplelru.NewLRU(defaultModifiedCache, nil)
-		if err != nil {
-			panic(err)
-		}
-		t.writable = lru
-	}
-
-	// If this node has already been modified, we can continue to use it
-	// during this transaction. We know that we don't need to track it for
-	// a node update since the node is writable, but if this is for a leaf
-	// update we track it, in case the initial write to this node didn't
-	// update the leaf.
-	if _, ok := t.writable.Get(n); ok {
-		if t.trackMutate && forLeafUpdate && n.leaf != nil {
-			t.trackChannel(n.leaf.mutateCh)
-		}
-		return n
-	}
-
-	// Mark this node as being mutated.
-	if t.trackMutate {
-		t.trackChannel(n.mutateCh)
-	}
-
-	// Mark its leaf as being mutated, if appropriate.
-	if t.trackMutate && forLeafUpdate && n.leaf != nil {
-		t.trackChannel(n.leaf.mutateCh)
-	}
-
-	// Copy the existing node. If you have set forLeafUpdate it will be
-	// safe to replace this leaf with another after you get your node for
-	// writing. You MUST replace it, because the channel associated with
-	// this leaf will be closed when this transaction is committed.
-	nc := &Node{
-		mutateCh: make(chan struct{}),
-		leaf:     n.leaf,
-	}
-	if n.prefix != nil {
-		nc.prefix = make([]byte, len(n.prefix))
-		copy(nc.prefix, n.prefix)
-	}
-	if len(n.edges) != 0 {
-		nc.edges = make([]edge, len(n.edges))
-		copy(nc.edges, n.edges)
-	}
-
-	// Mark this node as writable.
-	t.writable.Add(nc, nil)
-	return nc
-}
-
-// Visit all the nodes in the tree under n, and add their mutateChannels to the transaction
-// Returns the size of the subtree visited
-func (t *Txn) trackChannelsAndCount(n *Node) int {
-	// Count only leaf nodes
-	leaves := 0
-	if n.leaf != nil {
-		leaves = 1
-	}
-	// Mark this node as being mutated.
-	if t.trackMutate {
-		t.trackChannel(n.mutateCh)
-	}
-
-	// Mark its leaf as being mutated, if appropriate.
-	if t.trackMutate && n.leaf != nil {
-		t.trackChannel(n.leaf.mutateCh)
-	}
-
-	// Recurse on the children
-	for _, e := range n.edges {
-		leaves += t.trackChannelsAndCount(e.node)
-	}
-	return leaves
-}
-
-// mergeChild is called to collapse the given node with its child. This is only
-// called when the given node is not a leaf and has a single edge.
-func (t *Txn) mergeChild(n *Node) {
-	// Mark the child node as being mutated since we are about to abandon
-	// it. We don't need to mark the leaf since we are retaining it if it
-	// is there.
-	e := n.edges[0]
-	child := e.node
-	if t.trackMutate {
-		t.trackChannel(child.mutateCh)
-	}
-
-	// Merge the nodes.
-	n.prefix = concat(n.prefix, child.prefix)
-	n.leaf = child.leaf
-	if len(child.edges) != 0 {
-		n.edges = make([]edge, len(child.edges))
-		copy(n.edges, child.edges)
-	} else {
-		n.edges = nil
-	}
-}
-
-// insert does a recursive insertion
-func (t *Txn) insert(n *Node, k, search []byte, v interface{}) (*Node, interface{}, bool) {
-	// Handle key exhaustion
-	if len(search) == 0 {
-		var oldVal interface{}
-		didUpdate := false
-		if n.isLeaf() {
-			oldVal = n.leaf.val
-			didUpdate = true
-		}
-
-		nc := t.writeNode(n, true)
-		nc.leaf = &leafNode{
-			mutateCh: make(chan struct{}),
-			key:      k,
-			val:      v,
-		}
-		return nc, oldVal, didUpdate
-	}
-
-	// Look for the edge
-	idx, child := n.getEdge(search[0])
-
-	// No edge, create one
-	if child == nil {
-		e := edge{
-			label: search[0],
-			node: &Node{
-				mutateCh: make(chan struct{}),
-				leaf: &leafNode{
-					mutateCh: make(chan struct{}),
-					key:      k,
-					val:      v,
-				},
-				prefix: search,
-			},
-		}
-		nc := t.writeNode(n, false)
-		nc.addEdge(e)
-		return nc, nil, false
-	}
-
-	// Determine longest prefix of the search key on match
-	commonPrefix := longestPrefix(search, child.prefix)
-	if commonPrefix == len(child.prefix) {
-		search = search[commonPrefix:]
-		newChild, oldVal, didUpdate := t.insert(child, k, search, v)
-		if newChild != nil {
-			nc := t.writeNode(n, false)
-			nc.edges[idx].node = newChild
-			return nc, oldVal, didUpdate
-		}
-		return nil, oldVal, didUpdate
-	}
-
-	// Split the node
-	nc := t.writeNode(n, false)
-	splitNode := &Node{
-		mutateCh: make(chan struct{}),
-		prefix:   search[:commonPrefix],
-	}
-	nc.replaceEdge(edge{
-		label: search[0],
-		node:  splitNode,
-	})
-
-	// Restore the existing child node
-	modChild := t.writeNode(child, false)
-	splitNode.addEdge(edge{
-		label: modChild.prefix[commonPrefix],
-		node:  modChild,
-	})
-	modChild.prefix = modChild.prefix[commonPrefix:]
-
-	// Create a new leaf node
-	leaf := &leafNode{
-		mutateCh: make(chan struct{}),
-		key:      k,
-		val:      v,
-	}
-
-	// If the new key is a subset, add to to this node
-	search = search[commonPrefix:]
-	if len(search) == 0 {
-		splitNode.leaf = leaf
-		return nc, nil, false
-	}
-
-	// Create a new edge for the node
-	splitNode.addEdge(edge{
-		label: search[0],
-		node: &Node{
-			mutateCh: make(chan struct{}),
-			leaf:     leaf,
-			prefix:   search,
-		},
-	})
-	return nc, nil, false
-}
-
-// delete does a recursive deletion
-func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) {
-	// Check for key exhaustion
-	if len(search) == 0 {
-		if !n.isLeaf() {
-			return nil, nil
-		}
-		// Copy the pointer in case we are in a transaction that already
-		// modified this node since the node will be reused. Any changes
-		// made to the node will not affect returning the original leaf
-		// value.
-		oldLeaf := n.leaf
-
-		// Remove the leaf node
-		nc := t.writeNode(n, true)
-		nc.leaf = nil
-
-		// Check if this node should be merged
-		if n != t.root && len(nc.edges) == 1 {
-			t.mergeChild(nc)
-		}
-		return nc, oldLeaf
-	}
-
-	// Look for an edge
-	label := search[0]
-	idx, child := n.getEdge(label)
-	if child == nil || !bytes.HasPrefix(search, child.prefix) {
-		return nil, nil
-	}
-
-	// Consume the search prefix
-	search = search[len(child.prefix):]
-	newChild, leaf := t.delete(n, child, search)
-	if newChild == nil {
-		return nil, nil
-	}
-
-	// Copy this node. WATCH OUT - it's safe to pass "false" here because we
-	// will only ADD a leaf via nc.mergeChild() if there isn't one due to
-	// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
-	// so be careful if you change any of the logic here.
-	nc := t.writeNode(n, false)
-
-	// Delete the edge if the node has no edges
-	if newChild.leaf == nil && len(newChild.edges) == 0 {
-		nc.delEdge(label)
-		if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
-			t.mergeChild(nc)
-		}
-	} else {
-		nc.edges[idx].node = newChild
-	}
-	return nc, leaf
-}
-
-// delete does a recursive deletion
-func (t *Txn) deletePrefix(parent, n *Node, search []byte) (*Node, int) {
-	// Check for key exhaustion
-	if len(search) == 0 {
-		nc := t.writeNode(n, true)
-		if n.isLeaf() {
-			nc.leaf = nil
-		}
-		nc.edges = nil
-		return nc, t.trackChannelsAndCount(n)
-	}
-
-	// Look for an edge
-	label := search[0]
-	idx, child := n.getEdge(label)
-	// We make sure that either the child node's prefix starts with the search term, or the search term starts with the child node's prefix
-	// Need to do both so that we can delete prefixes that don't correspond to any node in the tree
-	if child == nil || (!bytes.HasPrefix(child.prefix, search) && !bytes.HasPrefix(search, child.prefix)) {
-		return nil, 0
-	}
-
-	// Consume the search prefix
-	if len(child.prefix) > len(search) {
-		search = []byte("")
-	} else {
-		search = search[len(child.prefix):]
-	}
-	newChild, numDeletions := t.deletePrefix(n, child, search)
-	if newChild == nil {
-		return nil, 0
-	}
-	// Copy this node. WATCH OUT - it's safe to pass "false" here because we
-	// will only ADD a leaf via nc.mergeChild() if there isn't one due to
-	// the !nc.isLeaf() check in the logic just below. This is pretty subtle,
-	// so be careful if you change any of the logic here.
-
-	nc := t.writeNode(n, false)
-
-	// Delete the edge if the node has no edges
-	if newChild.leaf == nil && len(newChild.edges) == 0 {
-		nc.delEdge(label)
-		if n != t.root && len(nc.edges) == 1 && !nc.isLeaf() {
-			t.mergeChild(nc)
-		}
-	} else {
-		nc.edges[idx].node = newChild
-	}
-	return nc, numDeletions
-}
-
-// Insert is used to add or update a given key. The return provides
-// the previous value and a bool indicating if any was set.
-func (t *Txn) Insert(k []byte, v interface{}) (interface{}, bool) {
-	newRoot, oldVal, didUpdate := t.insert(t.root, k, k, v)
-	if newRoot != nil {
-		t.root = newRoot
-	}
-	if !didUpdate {
-		t.size++
-	}
-	return oldVal, didUpdate
-}
-
-// Delete is used to delete a given key. Returns the old value if any,
-// and a bool indicating if the key was set.
-func (t *Txn) Delete(k []byte) (interface{}, bool) {
-	newRoot, leaf := t.delete(nil, t.root, k)
-	if newRoot != nil {
-		t.root = newRoot
-	}
-	if leaf != nil {
-		t.size--
-		return leaf.val, true
-	}
-	return nil, false
-}
-
-// DeletePrefix is used to delete an entire subtree that matches the prefix
-// This will delete all nodes under that prefix
-func (t *Txn) DeletePrefix(prefix []byte) bool {
-	newRoot, numDeletions := t.deletePrefix(nil, t.root, prefix)
-	if newRoot != nil {
-		t.root = newRoot
-		t.size = t.size - numDeletions
-		return true
-	}
-	return false
-
-}
-
-// Root returns the current root of the radix tree within this
-// transaction. The root is not safe across insert and delete operations,
-// but can be used to read the current state during a transaction.
-func (t *Txn) Root() *Node {
-	return t.root
-}
-
-// Get is used to lookup a specific key, returning
-// the value and if it was found
-func (t *Txn) Get(k []byte) (interface{}, bool) {
-	return t.root.Get(k)
-}
-
-// GetWatch is used to lookup a specific key, returning
-// the watch channel, value and if it was found
-func (t *Txn) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
-	return t.root.GetWatch(k)
-}
-
-// Commit is used to finalize the transaction and return a new tree. If mutation
-// tracking is turned on then notifications will also be issued.
-func (t *Txn) Commit() *Tree {
-	nt := t.CommitOnly()
-	if t.trackMutate {
-		t.Notify()
-	}
-	return nt
-}
-
-// CommitOnly is used to finalize the transaction and return a new tree, but
-// does not issue any notifications until Notify is called.
-func (t *Txn) CommitOnly() *Tree {
-	nt := &Tree{t.root, t.size}
-	t.writable = nil
-	return nt
-}
-
-// slowNotify does a complete comparison of the before and after trees in order
-// to trigger notifications. This doesn't require any additional state but it
-// is very expensive to compute.
-func (t *Txn) slowNotify() {
-	snapIter := t.snap.rawIterator()
-	rootIter := t.root.rawIterator()
-	for snapIter.Front() != nil || rootIter.Front() != nil {
-		// If we've exhausted the nodes in the old snapshot, we know
-		// there's nothing remaining to notify.
-		if snapIter.Front() == nil {
-			return
-		}
-		snapElem := snapIter.Front()
-
-		// If we've exhausted the nodes in the new root, we know we need
-		// to invalidate everything that remains in the old snapshot. We
-		// know from the loop condition there's something in the old
-		// snapshot.
-		if rootIter.Front() == nil {
-			close(snapElem.mutateCh)
-			if snapElem.isLeaf() {
-				close(snapElem.leaf.mutateCh)
-			}
-			snapIter.Next()
-			continue
-		}
-
-		// Do one string compare so we can check the various conditions
-		// below without repeating the compare.
-		cmp := strings.Compare(snapIter.Path(), rootIter.Path())
-
-		// If the snapshot is behind the root, then we must have deleted
-		// this node during the transaction.
-		if cmp < 0 {
-			close(snapElem.mutateCh)
-			if snapElem.isLeaf() {
-				close(snapElem.leaf.mutateCh)
-			}
-			snapIter.Next()
-			continue
-		}
-
-		// If the snapshot is ahead of the root, then we must have added
-		// this node during the transaction.
-		if cmp > 0 {
-			rootIter.Next()
-			continue
-		}
-
-		// If we have the same path, then we need to see if we mutated a
-		// node and possibly the leaf.
-		rootElem := rootIter.Front()
-		if snapElem != rootElem {
-			close(snapElem.mutateCh)
-			if snapElem.leaf != nil && (snapElem.leaf != rootElem.leaf) {
-				close(snapElem.leaf.mutateCh)
-			}
-		}
-		snapIter.Next()
-		rootIter.Next()
-	}
-}
-
-// Notify is used along with TrackMutate to trigger notifications. This must
-// only be done once a transaction is committed via CommitOnly, and it is called
-// automatically by Commit.
-func (t *Txn) Notify() {
-	if !t.trackMutate {
-		return
-	}
-
-	// If we've overflowed the tracking state we can't use it in any way and
-	// need to do a full tree compare.
-	if t.trackOverflow {
-		t.slowNotify()
-	} else {
-		for ch := range t.trackChannels {
-			close(ch)
-		}
-	}
-
-	// Clean up the tracking state so that a re-notify is safe (will trigger
-	// the else clause above which will be a no-op).
-	t.trackChannels = nil
-	t.trackOverflow = false
-}
-
-// Insert is used to add or update a given key. The return provides
-// the new tree, previous value and a bool indicating if any was set.
-func (t *Tree) Insert(k []byte, v interface{}) (*Tree, interface{}, bool) {
-	txn := t.Txn()
-	old, ok := txn.Insert(k, v)
-	return txn.Commit(), old, ok
-}
-
-// Delete is used to delete a given key. Returns the new tree,
-// old value if any, and a bool indicating if the key was set.
-func (t *Tree) Delete(k []byte) (*Tree, interface{}, bool) {
-	txn := t.Txn()
-	old, ok := txn.Delete(k)
-	return txn.Commit(), old, ok
-}
-
-// DeletePrefix is used to delete all nodes starting with a given prefix. Returns the new tree,
-// and a bool indicating if the prefix matched any nodes
-func (t *Tree) DeletePrefix(k []byte) (*Tree, bool) {
-	txn := t.Txn()
-	ok := txn.DeletePrefix(k)
-	return txn.Commit(), ok
-}
-
-// Root returns the root node of the tree which can be used for richer
-// query operations.
-func (t *Tree) Root() *Node {
-	return t.root
-}
-
-// Get is used to lookup a specific key, returning
-// the value and if it was found
-func (t *Tree) Get(k []byte) (interface{}, bool) {
-	return t.root.Get(k)
-}
-
-// longestPrefix finds the length of the shared prefix
-// of two strings
-func longestPrefix(k1, k2 []byte) int {
-	max := len(k1)
-	if l := len(k2); l < max {
-		max = l
-	}
-	var i int
-	for i = 0; i < max; i++ {
-		if k1[i] != k2[i] {
-			break
-		}
-	}
-	return i
-}
-
-// concat two byte slices, returning a third new copy
-func concat(a, b []byte) []byte {
-	c := make([]byte, len(a)+len(b))
-	copy(c, a)
-	copy(c[len(a):], b)
-	return c
-}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go
deleted file mode 100644
index 1ecaf83..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package iradix
-
-import (
-	"bytes"
-)
-
-// Iterator is used to iterate over a set of nodes
-// in pre-order
-type Iterator struct {
-	node  *Node
-	stack []edges
-}
-
-// SeekPrefixWatch is used to seek the iterator to a given prefix
-// and returns the watch channel of the finest granularity
-func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) {
-	// Wipe the stack
-	i.stack = nil
-	n := i.node
-	watch = n.mutateCh
-	search := prefix
-	for {
-		// Check for key exhaution
-		if len(search) == 0 {
-			i.node = n
-			return
-		}
-
-		// Look for an edge
-		_, n = n.getEdge(search[0])
-		if n == nil {
-			i.node = nil
-			return
-		}
-
-		// Update to the finest granularity as the search makes progress
-		watch = n.mutateCh
-
-		// Consume the search prefix
-		if bytes.HasPrefix(search, n.prefix) {
-			search = search[len(n.prefix):]
-
-		} else if bytes.HasPrefix(n.prefix, search) {
-			i.node = n
-			return
-		} else {
-			i.node = nil
-			return
-		}
-	}
-}
-
-// SeekPrefix is used to seek the iterator to a given prefix
-func (i *Iterator) SeekPrefix(prefix []byte) {
-	i.SeekPrefixWatch(prefix)
-}
-
-func (i *Iterator) recurseMin(n *Node) *Node {
-	// Traverse to the minimum child
-	if n.leaf != nil {
-		return n
-	}
-	if len(n.edges) > 0 {
-		// Add all the other edges to the stack (the min node will be added as
-		// we recurse)
-		i.stack = append(i.stack, n.edges[1:])
-		return i.recurseMin(n.edges[0].node)
-	}
-	// Shouldn't be possible
-	return nil
-}
-
-// SeekLowerBound is used to seek the iterator to the smallest key that is
-// greater or equal to the given key. There is no watch variant as it's hard to
-// predict based on the radix structure which node(s) changes might affect the
-// result.
-func (i *Iterator) SeekLowerBound(key []byte) {
-	// Wipe the stack. Unlike Prefix iteration, we need to build the stack as we
-	// go because we need only a subset of edges of many nodes in the path to the
-	// leaf with the lower bound.
-	i.stack = []edges{}
-	n := i.node
-	search := key
-
-	found := func(n *Node) {
-		i.node = n
-		i.stack = append(i.stack, edges{edge{node: n}})
-	}
-
-	for {
-		// Compare current prefix with the search key's same-length prefix.
-		var prefixCmp int
-		if len(n.prefix) < len(search) {
-			prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)])
-		} else {
-			prefixCmp = bytes.Compare(n.prefix, search)
-		}
-
-		if prefixCmp > 0 {
-			// Prefix is larger, that means the lower bound is greater than the search
-			// and from now on we need to follow the minimum path to the smallest
-			// leaf under this subtree.
-			n = i.recurseMin(n)
-			if n != nil {
-				found(n)
-			}
-			return
-		}
-
-		if prefixCmp < 0 {
-			// Prefix is smaller than search prefix, that means there is no lower
-			// bound
-			i.node = nil
-			return
-		}
-
-		// Prefix is equal, we are still heading for an exact match. If this is a
-		// leaf we're done.
-		if n.leaf != nil {
-			if bytes.Compare(n.leaf.key, key) < 0 {
-				i.node = nil
-				return
-			}
-			found(n)
-			return
-		}
-
-		// Consume the search prefix
-		if len(n.prefix) > len(search) {
-			search = []byte{}
-		} else {
-			search = search[len(n.prefix):]
-		}
-
-		// Otherwise, take the lower bound next edge.
-		idx, lbNode := n.getLowerBoundEdge(search[0])
-		if lbNode == nil {
-			i.node = nil
-			return
-		}
-
-		// Create stack edges for the all strictly higher edges in this node.
-		if idx+1 < len(n.edges) {
-			i.stack = append(i.stack, n.edges[idx+1:])
-		}
-
-		i.node = lbNode
-		// Recurse
-		n = lbNode
-	}
-}
-
-// Next returns the next node in order
-func (i *Iterator) Next() ([]byte, interface{}, bool) {
-	// Initialize our stack if needed
-	if i.stack == nil && i.node != nil {
-		i.stack = []edges{
-			edges{
-				edge{node: i.node},
-			},
-		}
-	}
-
-	for len(i.stack) > 0 {
-		// Inspect the last element of the stack
-		n := len(i.stack)
-		last := i.stack[n-1]
-		elem := last[0].node
-
-		// Update the stack
-		if len(last) > 1 {
-			i.stack[n-1] = last[1:]
-		} else {
-			i.stack = i.stack[:n-1]
-		}
-
-		// Push the edges onto the frontier
-		if len(elem.edges) > 0 {
-			i.stack = append(i.stack, elem.edges)
-		}
-
-		// Return the leaf values if any
-		if elem.leaf != nil {
-			return elem.leaf.key, elem.leaf.val, true
-		}
-	}
-	return nil, nil, false
-}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go
deleted file mode 100644
index 3ab904e..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/node.go
+++ /dev/null
@@ -1,304 +0,0 @@
-package iradix
-
-import (
-	"bytes"
-	"sort"
-)
-
-// WalkFn is used when walking the tree. Takes a
-// key and value, returning if iteration should
-// be terminated.
-type WalkFn func(k []byte, v interface{}) bool
-
-// leafNode is used to represent a value
-type leafNode struct {
-	mutateCh chan struct{}
-	key      []byte
-	val      interface{}
-}
-
-// edge is used to represent an edge node
-type edge struct {
-	label byte
-	node  *Node
-}
-
-// Node is an immutable node in the radix tree
-type Node struct {
-	// mutateCh is closed if this node is modified
-	mutateCh chan struct{}
-
-	// leaf is used to store possible leaf
-	leaf *leafNode
-
-	// prefix is the common prefix we ignore
-	prefix []byte
-
-	// Edges should be stored in-order for iteration.
-	// We avoid a fully materialized slice to save memory,
-	// since in most cases we expect to be sparse
-	edges edges
-}
-
-func (n *Node) isLeaf() bool {
-	return n.leaf != nil
-}
-
-func (n *Node) addEdge(e edge) {
-	num := len(n.edges)
-	idx := sort.Search(num, func(i int) bool {
-		return n.edges[i].label >= e.label
-	})
-	n.edges = append(n.edges, e)
-	if idx != num {
-		copy(n.edges[idx+1:], n.edges[idx:num])
-		n.edges[idx] = e
-	}
-}
-
-func (n *Node) replaceEdge(e edge) {
-	num := len(n.edges)
-	idx := sort.Search(num, func(i int) bool {
-		return n.edges[i].label >= e.label
-	})
-	if idx < num && n.edges[idx].label == e.label {
-		n.edges[idx].node = e.node
-		return
-	}
-	panic("replacing missing edge")
-}
-
-func (n *Node) getEdge(label byte) (int, *Node) {
-	num := len(n.edges)
-	idx := sort.Search(num, func(i int) bool {
-		return n.edges[i].label >= label
-	})
-	if idx < num && n.edges[idx].label == label {
-		return idx, n.edges[idx].node
-	}
-	return -1, nil
-}
-
-func (n *Node) getLowerBoundEdge(label byte) (int, *Node) {
-	num := len(n.edges)
-	idx := sort.Search(num, func(i int) bool {
-		return n.edges[i].label >= label
-	})
-	// we want lower bound behavior so return even if it's not an exact match
-	if idx < num {
-		return idx, n.edges[idx].node
-	}
-	return -1, nil
-}
-
-func (n *Node) delEdge(label byte) {
-	num := len(n.edges)
-	idx := sort.Search(num, func(i int) bool {
-		return n.edges[i].label >= label
-	})
-	if idx < num && n.edges[idx].label == label {
-		copy(n.edges[idx:], n.edges[idx+1:])
-		n.edges[len(n.edges)-1] = edge{}
-		n.edges = n.edges[:len(n.edges)-1]
-	}
-}
-
-func (n *Node) GetWatch(k []byte) (<-chan struct{}, interface{}, bool) {
-	search := k
-	watch := n.mutateCh
-	for {
-		// Check for key exhaustion
-		if len(search) == 0 {
-			if n.isLeaf() {
-				return n.leaf.mutateCh, n.leaf.val, true
-			}
-			break
-		}
-
-		// Look for an edge
-		_, n = n.getEdge(search[0])
-		if n == nil {
-			break
-		}
-
-		// Update to the finest granularity as the search makes progress
-		watch = n.mutateCh
-
-		// Consume the search prefix
-		if bytes.HasPrefix(search, n.prefix) {
-			search = search[len(n.prefix):]
-		} else {
-			break
-		}
-	}
-	return watch, nil, false
-}
-
-func (n *Node) Get(k []byte) (interface{}, bool) {
-	_, val, ok := n.GetWatch(k)
-	return val, ok
-}
-
-// LongestPrefix is like Get, but instead of an
-// exact match, it will return the longest prefix match.
-func (n *Node) LongestPrefix(k []byte) ([]byte, interface{}, bool) {
-	var last *leafNode
-	search := k
-	for {
-		// Look for a leaf node
-		if n.isLeaf() {
-			last = n.leaf
-		}
-
-		// Check for key exhaution
-		if len(search) == 0 {
-			break
-		}
-
-		// Look for an edge
-		_, n = n.getEdge(search[0])
-		if n == nil {
-			break
-		}
-
-		// Consume the search prefix
-		if bytes.HasPrefix(search, n.prefix) {
-			search = search[len(n.prefix):]
-		} else {
-			break
-		}
-	}
-	if last != nil {
-		return last.key, last.val, true
-	}
-	return nil, nil, false
-}
-
-// Minimum is used to return the minimum value in the tree
-func (n *Node) Minimum() ([]byte, interface{}, bool) {
-	for {
-		if n.isLeaf() {
-			return n.leaf.key, n.leaf.val, true
-		}
-		if len(n.edges) > 0 {
-			n = n.edges[0].node
-		} else {
-			break
-		}
-	}
-	return nil, nil, false
-}
-
-// Maximum is used to return the maximum value in the tree
-func (n *Node) Maximum() ([]byte, interface{}, bool) {
-	for {
-		if num := len(n.edges); num > 0 {
-			n = n.edges[num-1].node
-			continue
-		}
-		if n.isLeaf() {
-			return n.leaf.key, n.leaf.val, true
-		} else {
-			break
-		}
-	}
-	return nil, nil, false
-}
-
-// Iterator is used to return an iterator at
-// the given node to walk the tree
-func (n *Node) Iterator() *Iterator {
-	return &Iterator{node: n}
-}
-
-// rawIterator is used to return a raw iterator at the given node to walk the
-// tree.
-func (n *Node) rawIterator() *rawIterator {
-	iter := &rawIterator{node: n}
-	iter.Next()
-	return iter
-}
-
-// Walk is used to walk the tree
-func (n *Node) Walk(fn WalkFn) {
-	recursiveWalk(n, fn)
-}
-
-// WalkPrefix is used to walk the tree under a prefix
-func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) {
-	search := prefix
-	for {
-		// Check for key exhaution
-		if len(search) == 0 {
-			recursiveWalk(n, fn)
-			return
-		}
-
-		// Look for an edge
-		_, n = n.getEdge(search[0])
-		if n == nil {
-			break
-		}
-
-		// Consume the search prefix
-		if bytes.HasPrefix(search, n.prefix) {
-			search = search[len(n.prefix):]
-
-		} else if bytes.HasPrefix(n.prefix, search) {
-			// Child may be under our search prefix
-			recursiveWalk(n, fn)
-			return
-		} else {
-			break
-		}
-	}
-}
-
-// WalkPath is used to walk the tree, but only visiting nodes
-// from the root down to a given leaf. Where WalkPrefix walks
-// all the entries *under* the given prefix, this walks the
-// entries *above* the given prefix.
-func (n *Node) WalkPath(path []byte, fn WalkFn) {
-	search := path
-	for {
-		// Visit the leaf values if any
-		if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
-			return
-		}
-
-		// Check for key exhaution
-		if len(search) == 0 {
-			return
-		}
-
-		// Look for an edge
-		_, n = n.getEdge(search[0])
-		if n == nil {
-			return
-		}
-
-		// Consume the search prefix
-		if bytes.HasPrefix(search, n.prefix) {
-			search = search[len(n.prefix):]
-		} else {
-			break
-		}
-	}
-}
-
-// recursiveWalk is used to do a pre-order walk of a node
-// recursively. Returns true if the walk should be aborted
-func recursiveWalk(n *Node, fn WalkFn) bool {
-	// Visit the leaf values if any
-	if n.leaf != nil && fn(n.leaf.key, n.leaf.val) {
-		return true
-	}
-
-	// Recurse on the children
-	for _, e := range n.edges {
-		if recursiveWalk(e.node, fn) {
-			return true
-		}
-	}
-	return false
-}
diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
deleted file mode 100644
index 04814c1..0000000
--- a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package iradix
-
-// rawIterator visits each of the nodes in the tree, even the ones that are not
-// leaves. It keeps track of the effective path (what a leaf at a given node
-// would be called), which is useful for comparing trees.
-type rawIterator struct {
-	// node is the starting node in the tree for the iterator.
-	node *Node
-
-	// stack keeps track of edges in the frontier.
-	stack []rawStackEntry
-
-	// pos is the current position of the iterator.
-	pos *Node
-
-	// path is the effective path of the current iterator position,
-	// regardless of whether the current node is a leaf.
-	path string
-}
-
-// rawStackEntry is used to keep track of the cumulative common path as well as
-// its associated edges in the frontier.
-type rawStackEntry struct {
-	path  string
-	edges edges
-}
-
-// Front returns the current node that has been iterated to.
-func (i *rawIterator) Front() *Node {
-	return i.pos
-}
-
-// Path returns the effective path of the current node, even if it's not actually
-// a leaf.
-func (i *rawIterator) Path() string {
-	return i.path
-}
-
-// Next advances the iterator to the next node.
-func (i *rawIterator) Next() {
-	// Initialize our stack if needed.
-	if i.stack == nil && i.node != nil {
-		i.stack = []rawStackEntry{
-			rawStackEntry{
-				edges: edges{
-					edge{node: i.node},
-				},
-			},
-		}
-	}
-
-	for len(i.stack) > 0 {
-		// Inspect the last element of the stack.
-		n := len(i.stack)
-		last := i.stack[n-1]
-		elem := last.edges[0].node
-
-		// Update the stack.
-		if len(last.edges) > 1 {
-			i.stack[n-1].edges = last.edges[1:]
-		} else {
-			i.stack = i.stack[:n-1]
-		}
-
-		// Push the edges onto the frontier.
-		if len(elem.edges) > 0 {
-			path := last.path + string(elem.prefix)
-			i.stack = append(i.stack, rawStackEntry{path, elem.edges})
-		}
-
-		i.pos = elem
-		i.path = last.path + string(elem.prefix)
-		return
-	}
-
-	i.pos = nil
-	i.path = ""
-}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
deleted file mode 100644
index 80e1de4..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-sudo: false
-
-language: go
-
-go:
-  - 1.6
-
-branches:
-  only:
-    - master
-
-script: make test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/LICENSE b/vendor/github.com/hashicorp/go-rootcerts/LICENSE
deleted file mode 100644
index e87a115..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/LICENSE
+++ /dev/null
@@ -1,363 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
-     means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. "Incompatible With Secondary Licenses"
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of
-        version 1.1 or earlier of the License, but not also under the terms of
-        a Secondary License.
-
-1.6. "Executable Form"
-
-     means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
-     means a work that combines Covered Software with other material, in a
-     separate file or files, that is not Covered Software.
-
-1.8. "License"
-
-     means this document.
-
-1.9. "Licensable"
-
-     means having the right to grant, to the maximum extent possible, whether
-     at the time of the initial grant or subsequently, any and all of the
-     rights conveyed by this License.
-
-1.10. "Modifications"
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to,
-        deletion from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
-      means any patent claim(s), including without limitation, method,
-      process, and apparatus claims, in any patent Licensable by such
-      Contributor that would be infringed, but for the grant of the License,
-      by the making, using, selling, offering for sale, having made, import,
-      or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
-      means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, "You" includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, "control" means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or
-        as part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its
-        Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution
-     become effective for each Contribution on the date the Contributor first
-     distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under
-     this License. No additional rights or licenses will be implied from the
-     distribution or licensing of Covered Software under this License.
-     Notwithstanding Section 2.1(b) above, no patent license is granted by a
-     Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party's
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of
-        its Contributions.
-
-     This License does not grant any rights in the trademarks, service marks,
-     or logos of any Contributor (except as may be necessary to comply with
-     the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this
-     License (see Section 10.2) or under the terms of a Secondary License (if
-     permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its
-     Contributions are its original creation(s) or it has sufficient rights to
-     grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under
-     applicable copyright doctrines of fair use, fair dealing, or other
-     equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under
-     the terms of this License. You must inform recipients that the Source
-     Code Form of the Covered Software is governed by the terms of this
-     License, and how they can obtain a copy of this License. You may not
-     attempt to alter or restrict the recipients' rights in the Source Code
-     Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this
-        License, or sublicense it under different terms, provided that the
-        license for the Executable Form does not attempt to limit or alter the
-        recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for
-     the Covered Software. If the Larger Work is a combination of Covered
-     Software with a work governed by one or more Secondary Licenses, and the
-     Covered Software is not Incompatible With Secondary Licenses, this
-     License permits You to additionally distribute such Covered Software
-     under the terms of such Secondary License(s), so that the recipient of
-     the Larger Work may, at their option, further distribute the Covered
-     Software under the terms of either this License or such Secondary
-     License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices
-     (including copyright notices, patent notices, disclaimers of warranty, or
-     limitations of liability) contained within the Source Code Form of the
-     Covered Software, except that You may alter any license notices to the
-     extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on
-     behalf of any Contributor. You must make it absolutely clear that any
-     such warranty, support, indemnity, or liability obligation is offered by
-     You alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute,
-   judicial order, or regulation then You must: (a) comply with the terms of
-   this License to the maximum extent possible; and (b) describe the
-   limitations and the code they affect. Such description must be placed in a
-   text file included with all distributions of the Covered Software under
-   this License. Except to the extent prohibited by statute or regulation,
-   such description must be sufficiently detailed for a recipient of ordinary
-   skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing
-     basis, if such Contributor fails to notify You of the non-compliance by
-     some reasonable means prior to 60 days after You have come back into
-     compliance. Moreover, Your grants from a particular Contributor are
-     reinstated on an ongoing basis if such Contributor notifies You of the
-     non-compliance by some reasonable means, this is the first time You have
-     received notice of non-compliance with this License from such
-     Contributor, and You become compliant prior to 30 days after Your receipt
-     of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions,
-     counter-claims, and cross-claims) alleging that a Contributor Version
-     directly or indirectly infringes any patent, then the rights granted to
-     You by any and all Contributors for the Covered Software under Section
-     2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an "as is" basis,
-   without warranty of any kind, either expressed, implied, or statutory,
-   including, without limitation, warranties that the Covered Software is free
-   of defects, merchantable, fit for a particular purpose or non-infringing.
-   The entire risk as to the quality and performance of the Covered Software
-   is with You. Should any Covered Software prove defective in any respect,
-   You (not any Contributor) assume the cost of any necessary servicing,
-   repair, or correction. This disclaimer of warranty constitutes an essential
-   part of this License. No use of  any Covered Software is authorized under
-   this License except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from
-   such party's negligence to the extent applicable law prohibits such
-   limitation. Some jurisdictions do not allow the exclusion or limitation of
-   incidental or consequential damages, so this exclusion and limitation may
-   not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts
-   of a jurisdiction where the defendant maintains its principal place of
-   business and such litigation shall be governed by laws of that
-   jurisdiction, without reference to its conflict-of-law provisions. Nothing
-   in this Section shall prevent a party's ability to bring cross-claims or
-   counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject
-   matter hereof. If any provision of this License is held to be
-   unenforceable, such provision shall be reformed only to the extent
-   necessary to make it enforceable. Any law or regulation which provides that
-   the language of a contract shall be construed against the drafter shall not
-   be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version
-      of the License under which You originally received the Covered Software,
-      or under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a
-      modified version of this License if you rename the license and remove
-      any references to the name of the license steward (except to note that
-      such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-      Licenses If You choose to distribute Source Code Form that is
-      Incompatible With Secondary Licenses under the terms of this version of
-      the License, the notice described in Exhibit B of this License must be
-      attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-      This Source Code Form is "Incompatible
-      With Secondary Licenses", as defined by
-      the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/go-rootcerts/Makefile b/vendor/github.com/hashicorp/go-rootcerts/Makefile
deleted file mode 100644
index c3989e7..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/Makefile
+++ /dev/null
@@ -1,8 +0,0 @@
-TEST?=./...
-
-test:
-	go test $(TEST) $(TESTARGS) -timeout=3s -parallel=4
-	go vet $(TEST)
-	go test $(TEST) -race
-
-.PHONY: test
diff --git a/vendor/github.com/hashicorp/go-rootcerts/README.md b/vendor/github.com/hashicorp/go-rootcerts/README.md
deleted file mode 100644
index f5abffc..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# rootcerts
-
-Functions for loading root certificates for TLS connections.
-
------
-
-Go's standard library `crypto/tls` provides a common mechanism for configuring
-TLS connections in `tls.Config`. The `RootCAs` field on this struct is a pool
-of certificates for the client to use as a trust store when verifying server
-certificates.
-
-This library contains utility functions for loading certificates destined for
-that field, as well as one other important thing:
-
-When the `RootCAs` field is `nil`, the standard library attempts to load the
-host's root CA set.  This behavior is OS-specific, and the Darwin
-implementation contains [a bug that prevents trusted certificates from the
-System and Login keychains from being loaded][1]. This library contains
-Darwin-specific behavior that works around that bug.
-
-[1]: https://github.com/golang/go/issues/14514
-
-## Example Usage
-
-Here's a snippet demonstrating how this library is meant to be used:
-
-```go
-func httpClient() (*http.Client, error)
-	tlsConfig := &tls.Config{}
-	err := rootcerts.ConfigureTLS(tlsConfig, &rootcerts.Config{
-		CAFile: os.Getenv("MYAPP_CAFILE"),
-		CAPath: os.Getenv("MYAPP_CAPATH"),
-	})
-	if err != nil {
-		return nil, err
-	}
-	c := cleanhttp.DefaultClient()
-	t := cleanhttp.DefaultTransport()
-	t.TLSClientConfig = tlsConfig
-	c.Transport = t
-	return c, nil
-}
-```
diff --git a/vendor/github.com/hashicorp/go-rootcerts/doc.go b/vendor/github.com/hashicorp/go-rootcerts/doc.go
deleted file mode 100644
index b55cc62..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/doc.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Package rootcerts contains functions to aid in loading CA certificates for
-// TLS connections.
-//
-// In addition, its default behavior on Darwin works around an open issue [1]
-// in Go's crypto/x509 that prevents certicates from being loaded from the
-// System or Login keychains.
-//
-// [1] https://github.com/golang/go/issues/14514
-package rootcerts
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.mod b/vendor/github.com/hashicorp/go-rootcerts/go.mod
deleted file mode 100644
index e2dd024..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/go.mod
+++ /dev/null
@@ -1,5 +0,0 @@
-module github.com/hashicorp/go-rootcerts
-
-go 1.12
-
-require github.com/mitchellh/go-homedir v1.1.0
diff --git a/vendor/github.com/hashicorp/go-rootcerts/go.sum b/vendor/github.com/hashicorp/go-rootcerts/go.sum
deleted file mode 100644
index ae38d14..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/go.sum
+++ /dev/null
@@ -1,2 +0,0 @@
-github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
-github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
deleted file mode 100644
index aeb30ec..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts.go
+++ /dev/null
@@ -1,103 +0,0 @@
-package rootcerts
-
-import (
-	"crypto/tls"
-	"crypto/x509"
-	"fmt"
-	"io/ioutil"
-	"os"
-	"path/filepath"
-)
-
-// Config determines where LoadCACerts will load certificates from. When both
-// CAFile and CAPath are blank, this library's functions will either load
-// system roots explicitly and return them, or set the CertPool to nil to allow
-// Go's standard library to load system certs.
-type Config struct {
-	// CAFile is a path to a PEM-encoded certificate file or bundle. Takes
-	// precedence over CAPath.
-	CAFile string
-
-	// CAPath is a path to a directory populated with PEM-encoded certificates.
-	CAPath string
-}
-
-// ConfigureTLS sets up the RootCAs on the provided tls.Config based on the
-// Config specified.
-func ConfigureTLS(t *tls.Config, c *Config) error {
-	if t == nil {
-		return nil
-	}
-	pool, err := LoadCACerts(c)
-	if err != nil {
-		return err
-	}
-	t.RootCAs = pool
-	return nil
-}
-
-// LoadCACerts loads a CertPool based on the Config specified.
-func LoadCACerts(c *Config) (*x509.CertPool, error) {
-	if c == nil {
-		c = &Config{}
-	}
-	if c.CAFile != "" {
-		return LoadCAFile(c.CAFile)
-	}
-	if c.CAPath != "" {
-		return LoadCAPath(c.CAPath)
-	}
-
-	return LoadSystemCAs()
-}
-
-// LoadCAFile loads a single PEM-encoded file from the path specified.
-func LoadCAFile(caFile string) (*x509.CertPool, error) {
-	pool := x509.NewCertPool()
-
-	pem, err := ioutil.ReadFile(caFile)
-	if err != nil {
-		return nil, fmt.Errorf("Error loading CA File: %s", err)
-	}
-
-	ok := pool.AppendCertsFromPEM(pem)
-	if !ok {
-		return nil, fmt.Errorf("Error loading CA File: Couldn't parse PEM in: %s", caFile)
-	}
-
-	return pool, nil
-}
-
-// LoadCAPath walks the provided path and loads all certificates encounted into
-// a pool.
-func LoadCAPath(caPath string) (*x509.CertPool, error) {
-	pool := x509.NewCertPool()
-	walkFn := func(path string, info os.FileInfo, err error) error {
-		if err != nil {
-			return err
-		}
-
-		if info.IsDir() {
-			return nil
-		}
-
-		pem, err := ioutil.ReadFile(path)
-		if err != nil {
-			return fmt.Errorf("Error loading file from CAPath: %s", err)
-		}
-
-		ok := pool.AppendCertsFromPEM(pem)
-		if !ok {
-			return fmt.Errorf("Error loading CA Path: Couldn't parse PEM in: %s", path)
-		}
-
-		return nil
-	}
-
-	err := filepath.Walk(caPath, walkFn)
-	if err != nil {
-		return nil, err
-	}
-
-	return pool, nil
-}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
deleted file mode 100644
index 66b1472..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_base.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// +build !darwin
-
-package rootcerts
-
-import "crypto/x509"
-
-// LoadSystemCAs does nothing on non-Darwin systems. We return nil so that
-// default behavior of standard TLS config libraries is triggered, which is to
-// load system certs.
-func LoadSystemCAs() (*x509.CertPool, error) {
-	return nil, nil
-}
diff --git a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go b/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
deleted file mode 100644
index a9a0406..0000000
--- a/vendor/github.com/hashicorp/go-rootcerts/rootcerts_darwin.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package rootcerts
-
-import (
-	"crypto/x509"
-	"os/exec"
-	"path"
-
-	"github.com/mitchellh/go-homedir"
-)
-
-// LoadSystemCAs has special behavior on Darwin systems to work around
-func LoadSystemCAs() (*x509.CertPool, error) {
-	pool := x509.NewCertPool()
-
-	for _, keychain := range certKeychains() {
-		err := addCertsFromKeychain(pool, keychain)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	return pool, nil
-}
-
-func addCertsFromKeychain(pool *x509.CertPool, keychain string) error {
-	cmd := exec.Command("/usr/bin/security", "find-certificate", "-a", "-p", keychain)
-	data, err := cmd.Output()
-	if err != nil {
-		return err
-	}
-
-	pool.AppendCertsFromPEM(data)
-
-	return nil
-}
-
-func certKeychains() []string {
-	keychains := []string{
-		"/System/Library/Keychains/SystemRootCertificates.keychain",
-		"/Library/Keychains/System.keychain",
-	}
-	home, err := homedir.Dir()
-	if err == nil {
-		loginKeychain := path.Join(home, "Library", "Keychains", "login.keychain")
-		keychains = append(keychains, loginKeychain)
-	}
-	return keychains
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE
deleted file mode 100644
index be2cc4d..0000000
--- a/vendor/github.com/hashicorp/golang-lru/LICENSE
+++ /dev/null
@@ -1,362 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. "Contributor"
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. "Contributor Version"
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor's Contribution.
-
-1.3. "Contribution"
-
-     means Covered Software of a particular Contributor.
-
-1.4. "Covered Software"
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. "Incompatible With Secondary Licenses"
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of
-        version 1.1 or earlier of the License, but not also under the terms of
-        a Secondary License.
-
-1.6. "Executable Form"
-
-     means any form of the work other than Source Code Form.
-
-1.7. "Larger Work"
-
-     means a work that combines Covered Software with other material, in a
-     separate file or files, that is not Covered Software.
-
-1.8. "License"
-
-     means this document.
-
-1.9. "Licensable"
-
-     means having the right to grant, to the maximum extent possible, whether
-     at the time of the initial grant or subsequently, any and all of the
-     rights conveyed by this License.
-
-1.10. "Modifications"
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to,
-        deletion from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. "Patent Claims" of a Contributor
-
-      means any patent claim(s), including without limitation, method,
-      process, and apparatus claims, in any patent Licensable by such
-      Contributor that would be infringed, but for the grant of the License,
-      by the making, using, selling, offering for sale, having made, import,
-      or transfer of either its Contributions or its Contributor Version.
-
-1.12. "Secondary License"
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. "Source Code Form"
-
-      means the form of the work preferred for making modifications.
-
-1.14. "You" (or "Your")
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, "You" includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, "control" means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or
-        as part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its
-        Contributions or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution
-     become effective for each Contribution on the date the Contributor first
-     distributes such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under
-     this License. No additional rights or licenses will be implied from the
-     distribution or licensing of Covered Software under this License.
-     Notwithstanding Section 2.1(b) above, no patent license is granted by a
-     Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party's
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of
-        its Contributions.
-
-     This License does not grant any rights in the trademarks, service marks,
-     or logos of any Contributor (except as may be necessary to comply with
-     the notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this
-     License (see Section 10.2) or under the terms of a Secondary License (if
-     permitted under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its
-     Contributions are its original creation(s) or it has sufficient rights to
-     grant the rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under
-     applicable copyright doctrines of fair use, fair dealing, or other
-     equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under
-     the terms of this License. You must inform recipients that the Source
-     Code Form of the Covered Software is governed by the terms of this
-     License, and how they can obtain a copy of this License. You may not
-     attempt to alter or restrict the recipients' rights in the Source Code
-     Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this
-        License, or sublicense it under different terms, provided that the
-        license for the Executable Form does not attempt to limit or alter the
-        recipients' rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for
-     the Covered Software. If the Larger Work is a combination of Covered
-     Software with a work governed by one or more Secondary Licenses, and the
-     Covered Software is not Incompatible With Secondary Licenses, this
-     License permits You to additionally distribute such Covered Software
-     under the terms of such Secondary License(s), so that the recipient of
-     the Larger Work may, at their option, further distribute the Covered
-     Software under the terms of either this License or such Secondary
-     License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices
-     (including copyright notices, patent notices, disclaimers of warranty, or
-     limitations of liability) contained within the Source Code Form of the
-     Covered Software, except that You may alter any license notices to the
-     extent required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on
-     behalf of any Contributor. You must make it absolutely clear that any
-     such warranty, support, indemnity, or liability obligation is offered by
-     You alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute,
-   judicial order, or regulation then You must: (a) comply with the terms of
-   this License to the maximum extent possible; and (b) describe the
-   limitations and the code they affect. Such description must be placed in a
-   text file included with all distributions of the Covered Software under
-   this License. Except to the extent prohibited by statute or regulation,
-   such description must be sufficiently detailed for a recipient of ordinary
-   skill to be able to understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing
-     basis, if such Contributor fails to notify You of the non-compliance by
-     some reasonable means prior to 60 days after You have come back into
-     compliance. Moreover, Your grants from a particular Contributor are
-     reinstated on an ongoing basis if such Contributor notifies You of the
-     non-compliance by some reasonable means, this is the first time You have
-     received notice of non-compliance with this License from such
-     Contributor, and You become compliant prior to 30 days after Your receipt
-     of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions,
-     counter-claims, and cross-claims) alleging that a Contributor Version
-     directly or indirectly infringes any patent, then the rights granted to
-     You by any and all Contributors for the Covered Software under Section
-     2.1 of this License shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an "as is" basis,
-   without warranty of any kind, either expressed, implied, or statutory,
-   including, without limitation, warranties that the Covered Software is free
-   of defects, merchantable, fit for a particular purpose or non-infringing.
-   The entire risk as to the quality and performance of the Covered Software
-   is with You. Should any Covered Software prove defective in any respect,
-   You (not any Contributor) assume the cost of any necessary servicing,
-   repair, or correction. This disclaimer of warranty constitutes an essential
-   part of this License. No use of  any Covered Software is authorized under
-   this License except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from
-   such party's negligence to the extent applicable law prohibits such
-   limitation. Some jurisdictions do not allow the exclusion or limitation of
-   incidental or consequential damages, so this exclusion and limitation may
-   not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts
-   of a jurisdiction where the defendant maintains its principal place of
-   business and such litigation shall be governed by laws of that
-   jurisdiction, without reference to its conflict-of-law provisions. Nothing
-   in this Section shall prevent a party's ability to bring cross-claims or
-   counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject
-   matter hereof. If any provision of this License is held to be
-   unenforceable, such provision shall be reformed only to the extent
-   necessary to make it enforceable. Any law or regulation which provides that
-   the language of a contract shall be construed against the drafter shall not
-   be used to construe this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version
-      of the License under which You originally received the Covered Software,
-      or under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a
-      modified version of this License if you rename the license and remove
-      any references to the name of the license steward (except to note that
-      such modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary
-      Licenses If You choose to distribute Source Code Form that is
-      Incompatible With Secondary Licenses under the terms of this version of
-      the License, the notice described in Exhibit B of this License must be
-      attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file,
-then You may include the notice in a location (such as a LICENSE file in a
-relevant directory) where a recipient would be likely to look for such a
-notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - "Incompatible With Secondary Licenses" Notice
-
-      This Source Code Form is "Incompatible
-      With Secondary Licenses", as defined by
-      the Mozilla Public License, v. 2.0.
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
deleted file mode 100644
index a86c853..0000000
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package simplelru
-
-import (
-	"container/list"
-	"errors"
-)
-
-// EvictCallback is used to get a callback when a cache entry is evicted
-type EvictCallback func(key interface{}, value interface{})
-
-// LRU implements a non-thread safe fixed size LRU cache
-type LRU struct {
-	size      int
-	evictList *list.List
-	items     map[interface{}]*list.Element
-	onEvict   EvictCallback
-}
-
-// entry is used to hold a value in the evictList
-type entry struct {
-	key   interface{}
-	value interface{}
-}
-
-// NewLRU constructs an LRU of the given size
-func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
-	if size <= 0 {
-		return nil, errors.New("Must provide a positive size")
-	}
-	c := &LRU{
-		size:      size,
-		evictList: list.New(),
-		items:     make(map[interface{}]*list.Element),
-		onEvict:   onEvict,
-	}
-	return c, nil
-}
-
-// Purge is used to completely clear the cache.
-func (c *LRU) Purge() {
-	for k, v := range c.items {
-		if c.onEvict != nil {
-			c.onEvict(k, v.Value.(*entry).value)
-		}
-		delete(c.items, k)
-	}
-	c.evictList.Init()
-}
-
-// Add adds a value to the cache.  Returns true if an eviction occurred.
-func (c *LRU) Add(key, value interface{}) (evicted bool) {
-	// Check for existing item
-	if ent, ok := c.items[key]; ok {
-		c.evictList.MoveToFront(ent)
-		ent.Value.(*entry).value = value
-		return false
-	}
-
-	// Add new item
-	ent := &entry{key, value}
-	entry := c.evictList.PushFront(ent)
-	c.items[key] = entry
-
-	evict := c.evictList.Len() > c.size
-	// Verify size not exceeded
-	if evict {
-		c.removeOldest()
-	}
-	return evict
-}
-
-// Get looks up a key's value from the cache.
-func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
-	if ent, ok := c.items[key]; ok {
-		c.evictList.MoveToFront(ent)
-		if ent.Value.(*entry) == nil {
-			return nil, false
-		}
-		return ent.Value.(*entry).value, true
-	}
-	return
-}
-
-// Contains checks if a key is in the cache, without updating the recent-ness
-// or deleting it for being stale.
-func (c *LRU) Contains(key interface{}) (ok bool) {
-	_, ok = c.items[key]
-	return ok
-}
-
-// Peek returns the key value (or undefined if not found) without updating
-// the "recently used"-ness of the key.
-func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
-	var ent *list.Element
-	if ent, ok = c.items[key]; ok {
-		return ent.Value.(*entry).value, true
-	}
-	return nil, ok
-}
-
-// Remove removes the provided key from the cache, returning if the
-// key was contained.
-func (c *LRU) Remove(key interface{}) (present bool) {
-	if ent, ok := c.items[key]; ok {
-		c.removeElement(ent)
-		return true
-	}
-	return false
-}
-
-// RemoveOldest removes the oldest item from the cache.
-func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
-	ent := c.evictList.Back()
-	if ent != nil {
-		c.removeElement(ent)
-		kv := ent.Value.(*entry)
-		return kv.key, kv.value, true
-	}
-	return nil, nil, false
-}
-
-// GetOldest returns the oldest entry
-func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
-	ent := c.evictList.Back()
-	if ent != nil {
-		kv := ent.Value.(*entry)
-		return kv.key, kv.value, true
-	}
-	return nil, nil, false
-}
-
-// Keys returns a slice of the keys in the cache, from oldest to newest.
-func (c *LRU) Keys() []interface{} {
-	keys := make([]interface{}, len(c.items))
-	i := 0
-	for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
-		keys[i] = ent.Value.(*entry).key
-		i++
-	}
-	return keys
-}
-
-// Len returns the number of items in the cache.
-func (c *LRU) Len() int {
-	return c.evictList.Len()
-}
-
-// Resize changes the cache size.
-func (c *LRU) Resize(size int) (evicted int) {
-	diff := c.Len() - size
-	if diff < 0 {
-		diff = 0
-	}
-	for i := 0; i < diff; i++ {
-		c.removeOldest()
-	}
-	c.size = size
-	return diff
-}
-
-// removeOldest removes the oldest item from the cache.
-func (c *LRU) removeOldest() {
-	ent := c.evictList.Back()
-	if ent != nil {
-		c.removeElement(ent)
-	}
-}
-
-// removeElement is used to remove a given list element from the cache
-func (c *LRU) removeElement(e *list.Element) {
-	c.evictList.Remove(e)
-	kv := e.Value.(*entry)
-	delete(c.items, kv.key)
-	if c.onEvict != nil {
-		c.onEvict(kv.key, kv.value)
-	}
-}
diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
deleted file mode 100644
index 92d7093..0000000
--- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package simplelru
-
-// LRUCache is the interface for simple LRU cache.
-type LRUCache interface {
-	// Adds a value to the cache, returns true if an eviction occurred and
-	// updates the "recently used"-ness of the key.
-	Add(key, value interface{}) bool
-
-	// Returns key's value from the cache and
-	// updates the "recently used"-ness of the key. #value, isFound
-	Get(key interface{}) (value interface{}, ok bool)
-
-	// Checks if a key exists in cache without updating the recent-ness.
-	Contains(key interface{}) (ok bool)
-
-	// Returns key's value without updating the "recently used"-ness of the key.
-	Peek(key interface{}) (value interface{}, ok bool)
-
-	// Removes a key from the cache.
-	Remove(key interface{}) bool
-
-	// Removes the oldest entry from cache.
-	RemoveOldest() (interface{}, interface{}, bool)
-
-	// Returns the oldest entry from the cache. #key, value, isFound
-	GetOldest() (interface{}, interface{}, bool)
-
-	// Returns a slice of the keys in the cache, from oldest to newest.
-	Keys() []interface{}
-
-	// Returns the number of items in the cache.
-	Len() int
-
-	// Clears all cache entries.
-	Purge()
-
-  // Resizes cache, returning number evicted
-  Resize(int) int
-}
diff --git a/vendor/github.com/hashicorp/serf/LICENSE b/vendor/github.com/hashicorp/serf/LICENSE
deleted file mode 100644
index c33dcc7..0000000
--- a/vendor/github.com/hashicorp/serf/LICENSE
+++ /dev/null
@@ -1,354 +0,0 @@
-Mozilla Public License, version 2.0
-
-1. Definitions
-
-1.1. “Contributor”
-
-     means each individual or legal entity that creates, contributes to the
-     creation of, or owns Covered Software.
-
-1.2. “Contributor Version”
-
-     means the combination of the Contributions of others (if any) used by a
-     Contributor and that particular Contributor’s Contribution.
-
-1.3. “Contribution”
-
-     means Covered Software of a particular Contributor.
-
-1.4. “Covered Software”
-
-     means Source Code Form to which the initial Contributor has attached the
-     notice in Exhibit A, the Executable Form of such Source Code Form, and
-     Modifications of such Source Code Form, in each case including portions
-     thereof.
-
-1.5. “Incompatible With Secondary Licenses”
-     means
-
-     a. that the initial Contributor has attached the notice described in
-        Exhibit B to the Covered Software; or
-
-     b. that the Covered Software was made available under the terms of version
-        1.1 or earlier of the License, but not also under the terms of a
-        Secondary License.
-
-1.6. “Executable Form”
-
-     means any form of the work other than Source Code Form.
-
-1.7. “Larger Work”
-
-     means a work that combines Covered Software with other material, in a separate
-     file or files, that is not Covered Software.
-
-1.8. “License”
-
-     means this document.
-
-1.9. “Licensable”
-
-     means having the right to grant, to the maximum extent possible, whether at the
-     time of the initial grant or subsequently, any and all of the rights conveyed by
-     this License.
-
-1.10. “Modifications”
-
-     means any of the following:
-
-     a. any file in Source Code Form that results from an addition to, deletion
-        from, or modification of the contents of Covered Software; or
-
-     b. any new file in Source Code Form that contains any Covered Software.
-
-1.11. “Patent Claims” of a Contributor
-
-      means any patent claim(s), including without limitation, method, process,
-      and apparatus claims, in any patent Licensable by such Contributor that
-      would be infringed, but for the grant of the License, by the making,
-      using, selling, offering for sale, having made, import, or transfer of
-      either its Contributions or its Contributor Version.
-
-1.12. “Secondary License”
-
-      means either the GNU General Public License, Version 2.0, the GNU Lesser
-      General Public License, Version 2.1, the GNU Affero General Public
-      License, Version 3.0, or any later versions of those licenses.
-
-1.13. “Source Code Form”
-
-      means the form of the work preferred for making modifications.
-
-1.14. “You” (or “Your”)
-
-      means an individual or a legal entity exercising rights under this
-      License. For legal entities, “You” includes any entity that controls, is
-      controlled by, or is under common control with You. For purposes of this
-      definition, “control” means (a) the power, direct or indirect, to cause
-      the direction or management of such entity, whether by contract or
-      otherwise, or (b) ownership of more than fifty percent (50%) of the
-      outstanding shares or beneficial ownership of such entity.
-
-
-2. License Grants and Conditions
-
-2.1. Grants
-
-     Each Contributor hereby grants You a world-wide, royalty-free,
-     non-exclusive license:
-
-     a. under intellectual property rights (other than patent or trademark)
-        Licensable by such Contributor to use, reproduce, make available,
-        modify, display, perform, distribute, and otherwise exploit its
-        Contributions, either on an unmodified basis, with Modifications, or as
-        part of a Larger Work; and
-
-     b. under Patent Claims of such Contributor to make, use, sell, offer for
-        sale, have made, import, and otherwise transfer either its Contributions
-        or its Contributor Version.
-
-2.2. Effective Date
-
-     The licenses granted in Section 2.1 with respect to any Contribution become
-     effective for each Contribution on the date the Contributor first distributes
-     such Contribution.
-
-2.3. Limitations on Grant Scope
-
-     The licenses granted in this Section 2 are the only rights granted under this
-     License. No additional rights or licenses will be implied from the distribution
-     or licensing of Covered Software under this License. Notwithstanding Section
-     2.1(b) above, no patent license is granted by a Contributor:
-
-     a. for any code that a Contributor has removed from Covered Software; or
-
-     b. for infringements caused by: (i) Your and any other third party’s
-        modifications of Covered Software, or (ii) the combination of its
-        Contributions with other software (except as part of its Contributor
-        Version); or
-
-     c. under Patent Claims infringed by Covered Software in the absence of its
-        Contributions.
-
-     This License does not grant any rights in the trademarks, service marks, or
-     logos of any Contributor (except as may be necessary to comply with the
-     notice requirements in Section 3.4).
-
-2.4. Subsequent Licenses
-
-     No Contributor makes additional grants as a result of Your choice to
-     distribute the Covered Software under a subsequent version of this License
-     (see Section 10.2) or under the terms of a Secondary License (if permitted
-     under the terms of Section 3.3).
-
-2.5. Representation
-
-     Each Contributor represents that the Contributor believes its Contributions
-     are its original creation(s) or it has sufficient rights to grant the
-     rights to its Contributions conveyed by this License.
-
-2.6. Fair Use
-
-     This License is not intended to limit any rights You have under applicable
-     copyright doctrines of fair use, fair dealing, or other equivalents.
-
-2.7. Conditions
-
-     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
-     Section 2.1.
-
-
-3. Responsibilities
-
-3.1. Distribution of Source Form
-
-     All distribution of Covered Software in Source Code Form, including any
-     Modifications that You create or to which You contribute, must be under the
-     terms of this License. You must inform recipients that the Source Code Form
-     of the Covered Software is governed by the terms of this License, and how
-     they can obtain a copy of this License. You may not attempt to alter or
-     restrict the recipients’ rights in the Source Code Form.
-
-3.2. Distribution of Executable Form
-
-     If You distribute Covered Software in Executable Form then:
-
-     a. such Covered Software must also be made available in Source Code Form,
-        as described in Section 3.1, and You must inform recipients of the
-        Executable Form how they can obtain a copy of such Source Code Form by
-        reasonable means in a timely manner, at a charge no more than the cost
-        of distribution to the recipient; and
-
-     b. You may distribute such Executable Form under the terms of this License,
-        or sublicense it under different terms, provided that the license for
-        the Executable Form does not attempt to limit or alter the recipients’
-        rights in the Source Code Form under this License.
-
-3.3. Distribution of a Larger Work
-
-     You may create and distribute a Larger Work under terms of Your choice,
-     provided that You also comply with the requirements of this License for the
-     Covered Software. If the Larger Work is a combination of Covered Software
-     with a work governed by one or more Secondary Licenses, and the Covered
-     Software is not Incompatible With Secondary Licenses, this License permits
-     You to additionally distribute such Covered Software under the terms of
-     such Secondary License(s), so that the recipient of the Larger Work may, at
-     their option, further distribute the Covered Software under the terms of
-     either this License or such Secondary License(s).
-
-3.4. Notices
-
-     You may not remove or alter the substance of any license notices (including
-     copyright notices, patent notices, disclaimers of warranty, or limitations
-     of liability) contained within the Source Code Form of the Covered
-     Software, except that You may alter any license notices to the extent
-     required to remedy known factual inaccuracies.
-
-3.5. Application of Additional Terms
-
-     You may choose to offer, and to charge a fee for, warranty, support,
-     indemnity or liability obligations to one or more recipients of Covered
-     Software. However, You may do so only on Your own behalf, and not on behalf
-     of any Contributor. You must make it absolutely clear that any such
-     warranty, support, indemnity, or liability obligation is offered by You
-     alone, and You hereby agree to indemnify every Contributor for any
-     liability incurred by such Contributor as a result of warranty, support,
-     indemnity or liability terms You offer. You may include additional
-     disclaimers of warranty and limitations of liability specific to any
-     jurisdiction.
-
-4. Inability to Comply Due to Statute or Regulation
-
-   If it is impossible for You to comply with any of the terms of this License
-   with respect to some or all of the Covered Software due to statute, judicial
-   order, or regulation then You must: (a) comply with the terms of this License
-   to the maximum extent possible; and (b) describe the limitations and the code
-   they affect. Such description must be placed in a text file included with all
-   distributions of the Covered Software under this License. Except to the
-   extent prohibited by statute or regulation, such description must be
-   sufficiently detailed for a recipient of ordinary skill to be able to
-   understand it.
-
-5. Termination
-
-5.1. The rights granted under this License will terminate automatically if You
-     fail to comply with any of its terms. However, if You become compliant,
-     then the rights granted under this License from a particular Contributor
-     are reinstated (a) provisionally, unless and until such Contributor
-     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
-     if such Contributor fails to notify You of the non-compliance by some
-     reasonable means prior to 60 days after You have come back into compliance.
-     Moreover, Your grants from a particular Contributor are reinstated on an
-     ongoing basis if such Contributor notifies You of the non-compliance by
-     some reasonable means, this is the first time You have received notice of
-     non-compliance with this License from such Contributor, and You become
-     compliant prior to 30 days after Your receipt of the notice.
-
-5.2. If You initiate litigation against any entity by asserting a patent
-     infringement claim (excluding declaratory judgment actions, counter-claims,
-     and cross-claims) alleging that a Contributor Version directly or
-     indirectly infringes any patent, then the rights granted to You by any and
-     all Contributors for the Covered Software under Section 2.1 of this License
-     shall terminate.
-
-5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
-     license agreements (excluding distributors and resellers) which have been
-     validly granted by You or Your distributors under this License prior to
-     termination shall survive termination.
-
-6. Disclaimer of Warranty
-
-   Covered Software is provided under this License on an “as is” basis, without
-   warranty of any kind, either expressed, implied, or statutory, including,
-   without limitation, warranties that the Covered Software is free of defects,
-   merchantable, fit for a particular purpose or non-infringing. The entire
-   risk as to the quality and performance of the Covered Software is with You.
-   Should any Covered Software prove defective in any respect, You (not any
-   Contributor) assume the cost of any necessary servicing, repair, or
-   correction. This disclaimer of warranty constitutes an essential part of this
-   License. No use of  any Covered Software is authorized under this License
-   except under this disclaimer.
-
-7. Limitation of Liability
-
-   Under no circumstances and under no legal theory, whether tort (including
-   negligence), contract, or otherwise, shall any Contributor, or anyone who
-   distributes Covered Software as permitted above, be liable to You for any
-   direct, indirect, special, incidental, or consequential damages of any
-   character including, without limitation, damages for lost profits, loss of
-   goodwill, work stoppage, computer failure or malfunction, or any and all
-   other commercial damages or losses, even if such party shall have been
-   informed of the possibility of such damages. This limitation of liability
-   shall not apply to liability for death or personal injury resulting from such
-   party’s negligence to the extent applicable law prohibits such limitation.
-   Some jurisdictions do not allow the exclusion or limitation of incidental or
-   consequential damages, so this exclusion and limitation may not apply to You.
-
-8. Litigation
-
-   Any litigation relating to this License may be brought only in the courts of
-   a jurisdiction where the defendant maintains its principal place of business
-   and such litigation shall be governed by laws of that jurisdiction, without
-   reference to its conflict-of-law provisions. Nothing in this Section shall
-   prevent a party’s ability to bring cross-claims or counter-claims.
-
-9. Miscellaneous
-
-   This License represents the complete agreement concerning the subject matter
-   hereof. If any provision of this License is held to be unenforceable, such
-   provision shall be reformed only to the extent necessary to make it
-   enforceable. Any law or regulation which provides that the language of a
-   contract shall be construed against the drafter shall not be used to construe
-   this License against a Contributor.
-
-
-10. Versions of the License
-
-10.1. New Versions
-
-      Mozilla Foundation is the license steward. Except as provided in Section
-      10.3, no one other than the license steward has the right to modify or
-      publish new versions of this License. Each version will be given a
-      distinguishing version number.
-
-10.2. Effect of New Versions
-
-      You may distribute the Covered Software under the terms of the version of
-      the License under which You originally received the Covered Software, or
-      under the terms of any subsequent version published by the license
-      steward.
-
-10.3. Modified Versions
-
-      If you create software not governed by this License, and you want to
-      create a new license for such software, you may create and use a modified
-      version of this License if you rename the license and remove any
-      references to the name of the license steward (except to note that such
-      modified license differs from this License).
-
-10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
-      If You choose to distribute Source Code Form that is Incompatible With
-      Secondary Licenses under the terms of this version of the License, the
-      notice described in Exhibit B of this License must be attached.
-
-Exhibit A - Source Code Form License Notice
-
-      This Source Code Form is subject to the
-      terms of the Mozilla Public License, v.
-      2.0. If a copy of the MPL was not
-      distributed with this file, You can
-      obtain one at
-      http://mozilla.org/MPL/2.0/.
-
-If it is not possible or desirable to put the notice in a particular file, then
-You may include the notice in a location (such as a LICENSE file in a relevant
-directory) where a recipient would be likely to look for such a notice.
-
-You may add additional accurate notices of copyright ownership.
-
-Exhibit B - “Incompatible With Secondary Licenses” Notice
-
-      This Source Code Form is “Incompatible
-      With Secondary Licenses”, as defined by
-      the Mozilla Public License, v. 2.0.
-
diff --git a/vendor/github.com/hashicorp/serf/coordinate/client.go b/vendor/github.com/hashicorp/serf/coordinate/client.go
deleted file mode 100644
index 3582ee4..0000000
--- a/vendor/github.com/hashicorp/serf/coordinate/client.go
+++ /dev/null
@@ -1,243 +0,0 @@
-package coordinate
-
-import (
-	"fmt"
-	"math"
-	"sort"
-	"sync"
-	"time"
-
-	"github.com/armon/go-metrics"
-)
-
-// Client manages the estimated network coordinate for a given node, and adjusts
-// it as the node observes round trip times and estimated coordinates from other
-// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
-// for more details.
-type Client struct {
-	// coord is the current estimate of the client's network coordinate.
-	coord *Coordinate
-
-	// origin is a coordinate sitting at the origin.
-	origin *Coordinate
-
-	// config contains the tuning parameters that govern the performance of
-	// the algorithm.
-	config *Config
-
-	// adjustmentIndex is the current index into the adjustmentSamples slice.
-	adjustmentIndex uint
-
-	// adjustment is used to store samples for the adjustment calculation.
-	adjustmentSamples []float64
-
-	// latencyFilterSamples is used to store the last several RTT samples,
-	// keyed by node name. We will use the config's LatencyFilterSamples
-	// value to determine how many samples we keep, per node.
-	latencyFilterSamples map[string][]float64
-
-	// stats is used to record events that occur when updating coordinates.
-	stats ClientStats
-
-	// mutex enables safe concurrent access to the client.
-	mutex sync.RWMutex
-}
-
-// ClientStats is used to record events that occur when updating coordinates.
-type ClientStats struct {
-	// Resets is incremented any time we reset our local coordinate because
-	// our calculations have resulted in an invalid state.
-	Resets int
-}
-
-// NewClient creates a new Client and verifies the configuration is valid.
-func NewClient(config *Config) (*Client, error) {
-	if !(config.Dimensionality > 0) {
-		return nil, fmt.Errorf("dimensionality must be >0")
-	}
-
-	return &Client{
-		coord:                NewCoordinate(config),
-		origin:               NewCoordinate(config),
-		config:               config,
-		adjustmentIndex:      0,
-		adjustmentSamples:    make([]float64, config.AdjustmentWindowSize),
-		latencyFilterSamples: make(map[string][]float64),
-	}, nil
-}
-
-// GetCoordinate returns a copy of the coordinate for this client.
-func (c *Client) GetCoordinate() *Coordinate {
-	c.mutex.RLock()
-	defer c.mutex.RUnlock()
-
-	return c.coord.Clone()
-}
-
-// SetCoordinate forces the client's coordinate to a known state.
-func (c *Client) SetCoordinate(coord *Coordinate) error {
-	c.mutex.Lock()
-	defer c.mutex.Unlock()
-
-	if err := c.checkCoordinate(coord); err != nil {
-		return err
-	}
-
-	c.coord = coord.Clone()
-	return nil
-}
-
-// ForgetNode removes any client state for the given node.
-func (c *Client) ForgetNode(node string) {
-	c.mutex.Lock()
-	defer c.mutex.Unlock()
-
-	delete(c.latencyFilterSamples, node)
-}
-
-// Stats returns a copy of stats for the client.
-func (c *Client) Stats() ClientStats {
-	c.mutex.Lock()
-	defer c.mutex.Unlock()
-
-	return c.stats
-}
-
-// checkCoordinate returns an error if the coordinate isn't compatible with
-// this client, or if the coordinate itself isn't valid. This assumes the mutex
-// has been locked already.
-func (c *Client) checkCoordinate(coord *Coordinate) error {
-	if !c.coord.IsCompatibleWith(coord) {
-		return fmt.Errorf("dimensions aren't compatible")
-	}
-
-	if !coord.IsValid() {
-		return fmt.Errorf("coordinate is invalid")
-	}
-
-	return nil
-}
-
-// latencyFilter applies a simple moving median filter with a new sample for
-// a node. This assumes that the mutex has been locked already.
-func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
-	samples, ok := c.latencyFilterSamples[node]
-	if !ok {
-		samples = make([]float64, 0, c.config.LatencyFilterSize)
-	}
-
-	// Add the new sample and trim the list, if needed.
-	samples = append(samples, rttSeconds)
-	if len(samples) > int(c.config.LatencyFilterSize) {
-		samples = samples[1:]
-	}
-	c.latencyFilterSamples[node] = samples
-
-	// Sort a copy of the samples and return the median.
-	sorted := make([]float64, len(samples))
-	copy(sorted, samples)
-	sort.Float64s(sorted)
-	return sorted[len(sorted)/2]
-}
-
-// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
-// assumes that the mutex has been locked already.
-func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
-	const zeroThreshold = 1.0e-6
-
-	dist := c.coord.DistanceTo(other).Seconds()
-	if rttSeconds < zeroThreshold {
-		rttSeconds = zeroThreshold
-	}
-	wrongness := math.Abs(dist-rttSeconds) / rttSeconds
-
-	totalError := c.coord.Error + other.Error
-	if totalError < zeroThreshold {
-		totalError = zeroThreshold
-	}
-	weight := c.coord.Error / totalError
-
-	c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
-	if c.coord.Error > c.config.VivaldiErrorMax {
-		c.coord.Error = c.config.VivaldiErrorMax
-	}
-
-	delta := c.config.VivaldiCC * weight
-	force := delta * (rttSeconds - dist)
-	c.coord = c.coord.ApplyForce(c.config, force, other)
-}
-
-// updateAdjustment updates the adjustment portion of the client's coordinate, if
-// the feature is enabled. This assumes that the mutex has been locked already.
-func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
-	if c.config.AdjustmentWindowSize == 0 {
-		return
-	}
-
-	// Note that the existing adjustment factors don't figure in to this
-	// calculation so we use the raw distance here.
-	dist := c.coord.rawDistanceTo(other)
-	c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
-	c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
-
-	sum := 0.0
-	for _, sample := range c.adjustmentSamples {
-		sum += sample
-	}
-	c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
-}
-
-// updateGravity applies a small amount of gravity to pull coordinates towards
-// the center of the coordinate system to combat drift. This assumes that the
-// mutex is locked already.
-func (c *Client) updateGravity() {
-	dist := c.origin.DistanceTo(c.coord).Seconds()
-	force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
-	c.coord = c.coord.ApplyForce(c.config, force, c.origin)
-}
-
-// Update takes other, a coordinate for another node, and rtt, a round trip
-// time observation for a ping to that node, and updates the estimated position of
-// the client's coordinate. Returns the updated coordinate.
-func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) (*Coordinate, error) {
-	c.mutex.Lock()
-	defer c.mutex.Unlock()
-
-	if err := c.checkCoordinate(other); err != nil {
-		return nil, err
-	}
-
-	// The code down below can handle zero RTTs, which we have seen in
-	// https://github.com/hashicorp/consul/issues/3789, presumably in
-	// environments with coarse-grained monotonic clocks (we are still
-	// trying to pin this down). In any event, this is ok from a code PoV
-	// so we don't need to alert operators with spammy messages. We did
-	// add a counter so this is still observable, though.
-	const maxRTT = 10 * time.Second
-	if rtt < 0 || rtt > maxRTT {
-		return nil, fmt.Errorf("round trip time not in valid range, duration %v is not a positive value less than %v ", rtt, maxRTT)
-	}
-	if rtt == 0 {
-		metrics.IncrCounter([]string{"serf", "coordinate", "zero-rtt"}, 1)
-	}
-
-	rttSeconds := c.latencyFilter(node, rtt.Seconds())
-	c.updateVivaldi(other, rttSeconds)
-	c.updateAdjustment(other, rttSeconds)
-	c.updateGravity()
-	if !c.coord.IsValid() {
-		c.stats.Resets++
-		c.coord = NewCoordinate(c.config)
-	}
-
-	return c.coord.Clone(), nil
-}
-
-// DistanceTo returns the estimated RTT from the client's coordinate to other, the
-// coordinate for another node.
-func (c *Client) DistanceTo(other *Coordinate) time.Duration {
-	c.mutex.RLock()
-	defer c.mutex.RUnlock()
-
-	return c.coord.DistanceTo(other)
-}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/config.go b/vendor/github.com/hashicorp/serf/coordinate/config.go
deleted file mode 100644
index b85a8ab..0000000
--- a/vendor/github.com/hashicorp/serf/coordinate/config.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package coordinate
-
-// Config is used to set the parameters of the Vivaldi-based coordinate mapping
-// algorithm.
-//
-// The following references are called out at various points in the documentation
-// here:
-//
-// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
-//     ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
-// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
-//     in the Wild." NSDI. Vol. 7. 2007.
-// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
-//     host-based network coordinate systems." Networking, IEEE/ACM Transactions
-//     on 18.1 (2010): 27-40.
-type Config struct {
-	// The dimensionality of the coordinate system. As discussed in [2], more
-	// dimensions improves the accuracy of the estimates up to a point. Per [2]
-	// we chose 8 dimensions plus a non-Euclidean height.
-	Dimensionality uint
-
-	// VivaldiErrorMax is the default error value when a node hasn't yet made
-	// any observations. It also serves as an upper limit on the error value in
-	// case observations cause the error value to increase without bound.
-	VivaldiErrorMax float64
-
-	// VivaldiCE is a tuning factor that controls the maximum impact an
-	// observation can have on a node's confidence. See [1] for more details.
-	VivaldiCE float64
-
-	// VivaldiCC is a tuning factor that controls the maximum impact an
-	// observation can have on a node's coordinate. See [1] for more details.
-	VivaldiCC float64
-
-	// AdjustmentWindowSize is a tuning factor that determines how many samples
-	// we retain to calculate the adjustment factor as discussed in [3]. Setting
-	// this to zero disables this feature.
-	AdjustmentWindowSize uint
-
-	// HeightMin is the minimum value of the height parameter. Since this
-	// always must be positive, it will introduce a small amount error, so
-	// the chosen value should be relatively small compared to "normal"
-	// coordinates.
-	HeightMin float64
-
-	// LatencyFilterSamples is the maximum number of samples that are retained
-	// per node, in order to compute a median. The intent is to ride out blips
-	// but still keep the delay low, since our time to probe any given node is
-	// pretty infrequent. See [2] for more details.
-	LatencyFilterSize uint
-
-	// GravityRho is a tuning factor that sets how much gravity has an effect
-	// to try to re-center coordinates. See [2] for more details.
-	GravityRho float64
-}
-
-// DefaultConfig returns a Config that has some default values suitable for
-// basic testing of the algorithm, but not tuned to any particular type of cluster.
-func DefaultConfig() *Config {
-	return &Config{
-		Dimensionality:       8,
-		VivaldiErrorMax:      1.5,
-		VivaldiCE:            0.25,
-		VivaldiCC:            0.25,
-		AdjustmentWindowSize: 20,
-		HeightMin:            10.0e-6,
-		LatencyFilterSize:    3,
-		GravityRho:           150.0,
-	}
-}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
deleted file mode 100644
index fbe792c..0000000
--- a/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package coordinate
-
-import (
-	"math"
-	"math/rand"
-	"time"
-)
-
-// Coordinate is a specialized structure for holding network coordinates for the
-// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
-// to enable this to be serialized. All values in here are in units of seconds.
-type Coordinate struct {
-	// Vec is the Euclidean portion of the coordinate. This is used along
-	// with the other fields to provide an overall distance estimate. The
-	// units here are seconds.
-	Vec []float64
-
-	// Err reflects the confidence in the given coordinate and is updated
-	// dynamically by the Vivaldi Client. This is dimensionless.
-	Error float64
-
-	// Adjustment is a distance offset computed based on a calculation over
-	// observations from all other nodes over a fixed window and is updated
-	// dynamically by the Vivaldi Client. The units here are seconds.
-	Adjustment float64
-
-	// Height is a distance offset that accounts for non-Euclidean effects
-	// which model the access links from nodes to the core Internet. The access
-	// links are usually set by bandwidth and congestion, and the core links
-	// usually follow distance based on geography.
-	Height float64
-}
-
-const (
-	// secondsToNanoseconds is used to convert float seconds to nanoseconds.
-	secondsToNanoseconds = 1.0e9
-
-	// zeroThreshold is used to decide if two coordinates are on top of each
-	// other.
-	zeroThreshold = 1.0e-6
-)
-
-// ErrDimensionalityConflict will be panic-d if you try to perform operations
-// with incompatible dimensions.
-type DimensionalityConflictError struct{}
-
-// Adds the error interface.
-func (e DimensionalityConflictError) Error() string {
-	return "coordinate dimensionality does not match"
-}
-
-// NewCoordinate creates a new coordinate at the origin, using the given config
-// to supply key initial values.
-func NewCoordinate(config *Config) *Coordinate {
-	return &Coordinate{
-		Vec:        make([]float64, config.Dimensionality),
-		Error:      config.VivaldiErrorMax,
-		Adjustment: 0.0,
-		Height:     config.HeightMin,
-	}
-}
-
-// Clone creates an independent copy of this coordinate.
-func (c *Coordinate) Clone() *Coordinate {
-	vec := make([]float64, len(c.Vec))
-	copy(vec, c.Vec)
-	return &Coordinate{
-		Vec:        vec,
-		Error:      c.Error,
-		Adjustment: c.Adjustment,
-		Height:     c.Height,
-	}
-}
-
-// componentIsValid returns false if a floating point value is a NaN or an
-// infinity.
-func componentIsValid(f float64) bool {
-	return !math.IsInf(f, 0) && !math.IsNaN(f)
-}
-
-// IsValid returns false if any component of a coordinate isn't valid, per the
-// componentIsValid() helper above.
-func (c *Coordinate) IsValid() bool {
-	for i := range c.Vec {
-		if !componentIsValid(c.Vec[i]) {
-			return false
-		}
-	}
-
-	return componentIsValid(c.Error) &&
-		componentIsValid(c.Adjustment) &&
-		componentIsValid(c.Height)
-}
-
-// IsCompatibleWith checks to see if the two coordinates are compatible
-// dimensionally. If this returns true then you are guaranteed to not get
-// any runtime errors operating on them.
-func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
-	return len(c.Vec) == len(other.Vec)
-}
-
-// ApplyForce returns the result of applying the force from the direction of the
-// other coordinate.
-func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
-	if !c.IsCompatibleWith(other) {
-		panic(DimensionalityConflictError{})
-	}
-
-	ret := c.Clone()
-	unit, mag := unitVectorAt(c.Vec, other.Vec)
-	ret.Vec = add(ret.Vec, mul(unit, force))
-	if mag > zeroThreshold {
-		ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
-		ret.Height = math.Max(ret.Height, config.HeightMin)
-	}
-	return ret
-}
-
-// DistanceTo returns the distance between this coordinate and the other
-// coordinate, including adjustments.
-func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
-	if !c.IsCompatibleWith(other) {
-		panic(DimensionalityConflictError{})
-	}
-
-	dist := c.rawDistanceTo(other)
-	adjustedDist := dist + c.Adjustment + other.Adjustment
-	if adjustedDist > 0.0 {
-		dist = adjustedDist
-	}
-	return time.Duration(dist * secondsToNanoseconds)
-}
-
-// rawDistanceTo returns the Vivaldi distance between this coordinate and the
-// other coordinate in seconds, not including adjustments. This assumes the
-// dimensions have already been checked to be compatible.
-func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
-	return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
-}
-
-// add returns the sum of vec1 and vec2. This assumes the dimensions have
-// already been checked to be compatible.
-func add(vec1 []float64, vec2 []float64) []float64 {
-	ret := make([]float64, len(vec1))
-	for i := range ret {
-		ret[i] = vec1[i] + vec2[i]
-	}
-	return ret
-}
-
-// diff returns the difference between the vec1 and vec2. This assumes the
-// dimensions have already been checked to be compatible.
-func diff(vec1 []float64, vec2 []float64) []float64 {
-	ret := make([]float64, len(vec1))
-	for i := range ret {
-		ret[i] = vec1[i] - vec2[i]
-	}
-	return ret
-}
-
-// mul returns vec multiplied by a scalar factor.
-func mul(vec []float64, factor float64) []float64 {
-	ret := make([]float64, len(vec))
-	for i := range vec {
-		ret[i] = vec[i] * factor
-	}
-	return ret
-}
-
-// magnitude computes the magnitude of the vec.
-func magnitude(vec []float64) float64 {
-	sum := 0.0
-	for i := range vec {
-		sum += vec[i] * vec[i]
-	}
-	return math.Sqrt(sum)
-}
-
-// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
-// positions are the same then a random unit vector is returned. We also return
-// the distance between the points for use in the later height calculation.
-func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
-	ret := diff(vec1, vec2)
-
-	// If the coordinates aren't on top of each other we can normalize.
-	if mag := magnitude(ret); mag > zeroThreshold {
-		return mul(ret, 1.0/mag), mag
-	}
-
-	// Otherwise, just return a random unit vector.
-	for i := range ret {
-		ret[i] = rand.Float64() - 0.5
-	}
-	if mag := magnitude(ret); mag > zeroThreshold {
-		return mul(ret, 1.0/mag), 0.0
-	}
-
-	// And finally just give up and make a unit vector along the first
-	// dimension. This should be exceedingly rare.
-	ret = make([]float64, len(ret))
-	ret[0] = 1.0
-	return ret, 0.0
-}
diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go
deleted file mode 100644
index 6fb033c..0000000
--- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go
+++ /dev/null
@@ -1,187 +0,0 @@
-package coordinate
-
-import (
-	"fmt"
-	"math"
-	"math/rand"
-	"time"
-)
-
-// GenerateClients returns a slice with nodes number of clients, all with the
-// given config.
-func GenerateClients(nodes int, config *Config) ([]*Client, error) {
-	clients := make([]*Client, nodes)
-	for i, _ := range clients {
-		client, err := NewClient(config)
-		if err != nil {
-			return nil, err
-		}
-
-		clients[i] = client
-	}
-	return clients, nil
-}
-
-// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
-// with the given spacing between them.
-func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
-	truth := make([][]time.Duration, nodes)
-	for i := range truth {
-		truth[i] = make([]time.Duration, nodes)
-	}
-
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			rtt := time.Duration(j-i) * spacing
-			truth[i][j], truth[j][i] = rtt, rtt
-		}
-	}
-	return truth
-}
-
-// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
-// grid with the given spacing between them.
-func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
-	truth := make([][]time.Duration, nodes)
-	for i := range truth {
-		truth[i] = make([]time.Duration, nodes)
-	}
-
-	n := int(math.Sqrt(float64(nodes)))
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			x1, y1 := float64(i%n), float64(i/n)
-			x2, y2 := float64(j%n), float64(j/n)
-			dx, dy := x2-x1, y2-y1
-			dist := math.Sqrt(dx*dx + dy*dy)
-			rtt := time.Duration(dist * float64(spacing))
-			truth[i][j], truth[j][i] = rtt, rtt
-		}
-	}
-	return truth
-}
-
-// GenerateSplit returns a truth matrix as if half the nodes are close together in
-// one location and half the nodes are close together in another. The lan factor
-// is used to separate the nodes locally and the wan factor represents the split
-// between the two sides.
-func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
-	truth := make([][]time.Duration, nodes)
-	for i := range truth {
-		truth[i] = make([]time.Duration, nodes)
-	}
-
-	split := nodes / 2
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			rtt := lan
-			if (i <= split && j > split) || (i > split && j <= split) {
-				rtt += wan
-			}
-			truth[i][j], truth[j][i] = rtt, rtt
-		}
-	}
-	return truth
-}
-
-// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
-// around a circle with the given radius. The first node is at the "center" of the
-// circle because it's equidistant from all the other nodes, but we place it at
-// double the radius, so it should show up above all the other nodes in height.
-func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
-	truth := make([][]time.Duration, nodes)
-	for i := range truth {
-		truth[i] = make([]time.Duration, nodes)
-	}
-
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			var rtt time.Duration
-			if i == 0 {
-				rtt = 2 * radius
-			} else {
-				t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
-				x1, y1 := math.Cos(t1), math.Sin(t1)
-				t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
-				x2, y2 := math.Cos(t2), math.Sin(t2)
-				dx, dy := x2-x1, y2-y1
-				dist := math.Sqrt(dx*dx + dy*dy)
-				rtt = time.Duration(dist * float64(radius))
-			}
-			truth[i][j], truth[j][i] = rtt, rtt
-		}
-	}
-	return truth
-}
-
-// GenerateRandom returns a truth matrix for a set of nodes with normally
-// distributed delays, with the given mean and deviation. The RNG is re-seeded
-// so you always get the same matrix for a given size.
-func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
-	rand.Seed(1)
-
-	truth := make([][]time.Duration, nodes)
-	for i := range truth {
-		truth[i] = make([]time.Duration, nodes)
-	}
-
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
-			rtt := time.Duration(rttSeconds * secondsToNanoseconds)
-			truth[i][j], truth[j][i] = rtt, rtt
-		}
-	}
-	return truth
-}
-
-// Simulate runs the given number of cycles using the given list of clients and
-// truth matrix. On each cycle, each client will pick a random node and observe
-// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
-// each simulation run to get deterministic results (for this algorithm and the
-// underlying algorithm which will use random numbers for position vectors when
-// starting out with everything at the origin).
-func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
-	rand.Seed(1)
-
-	nodes := len(clients)
-	for cycle := 0; cycle < cycles; cycle++ {
-		for i, _ := range clients {
-			if j := rand.Intn(nodes); j != i {
-				c := clients[j].GetCoordinate()
-				rtt := truth[i][j]
-				node := fmt.Sprintf("node_%d", j)
-				clients[i].Update(node, c, rtt)
-			}
-		}
-	}
-}
-
-// Stats is returned from the Evaluate function with a summary of the algorithm
-// performance.
-type Stats struct {
-	ErrorMax float64
-	ErrorAvg float64
-}
-
-// Evaluate uses the coordinates of the given clients to calculate estimated
-// distances and compares them with the given truth matrix, returning summary
-// stats.
-func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
-	nodes := len(clients)
-	count := 0
-	for i := 0; i < nodes; i++ {
-		for j := i + 1; j < nodes; j++ {
-			est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
-			actual := truth[i][j].Seconds()
-			error := math.Abs(est-actual) / actual
-			stats.ErrorMax = math.Max(stats.ErrorMax, error)
-			stats.ErrorAvg += error
-			count += 1
-		}
-	}
-
-	stats.ErrorAvg /= float64(count)
-	fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
-	return
-}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
deleted file mode 100644
index f9c841a..0000000
--- a/vendor/github.com/mitchellh/go-homedir/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
deleted file mode 100644
index d70706d..0000000
--- a/vendor/github.com/mitchellh/go-homedir/README.md
+++ /dev/null
@@ -1,14 +0,0 @@
-# go-homedir
-
-This is a Go library for detecting the user's home directory without
-the use of cgo, so the library can be used in cross-compilation environments.
-
-Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
-for a user, and `homedir.Expand()` to expand the `~` in a path to the home
-directory.
-
-**Why not just use `os/user`?** The built-in `os/user` package requires
-cgo on Darwin systems. This means that any Go code that uses that package
-cannot cross compile. But 99% of the time the use for `os/user` is just to
-retrieve the home directory, which we can do for the current user without
-cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod
deleted file mode 100644
index 7efa09a..0000000
--- a/vendor/github.com/mitchellh/go-homedir/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/go-homedir
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
deleted file mode 100644
index 2537853..0000000
--- a/vendor/github.com/mitchellh/go-homedir/homedir.go
+++ /dev/null
@@ -1,167 +0,0 @@
-package homedir
-
-import (
-	"bytes"
-	"errors"
-	"os"
-	"os/exec"
-	"path/filepath"
-	"runtime"
-	"strconv"
-	"strings"
-	"sync"
-)
-
-// DisableCache will disable caching of the home directory. Caching is enabled
-// by default.
-var DisableCache bool
-
-var homedirCache string
-var cacheLock sync.RWMutex
-
-// Dir returns the home directory for the executing user.
-//
-// This uses an OS-specific method for discovering the home directory.
-// An error is returned if a home directory cannot be detected.
-func Dir() (string, error) {
-	if !DisableCache {
-		cacheLock.RLock()
-		cached := homedirCache
-		cacheLock.RUnlock()
-		if cached != "" {
-			return cached, nil
-		}
-	}
-
-	cacheLock.Lock()
-	defer cacheLock.Unlock()
-
-	var result string
-	var err error
-	if runtime.GOOS == "windows" {
-		result, err = dirWindows()
-	} else {
-		// Unix-like system, so just assume Unix
-		result, err = dirUnix()
-	}
-
-	if err != nil {
-		return "", err
-	}
-	homedirCache = result
-	return result, nil
-}
-
-// Expand expands the path to include the home directory if the path
-// is prefixed with `~`. If it isn't prefixed with `~`, the path is
-// returned as-is.
-func Expand(path string) (string, error) {
-	if len(path) == 0 {
-		return path, nil
-	}
-
-	if path[0] != '~' {
-		return path, nil
-	}
-
-	if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
-		return "", errors.New("cannot expand user-specific home dir")
-	}
-
-	dir, err := Dir()
-	if err != nil {
-		return "", err
-	}
-
-	return filepath.Join(dir, path[1:]), nil
-}
-
-// Reset clears the cache, forcing the next call to Dir to re-detect
-// the home directory. This generally never has to be called, but can be
-// useful in tests if you're modifying the home directory via the HOME
-// env var or something.
-func Reset() {
-	cacheLock.Lock()
-	defer cacheLock.Unlock()
-	homedirCache = ""
-}
-
-func dirUnix() (string, error) {
-	homeEnv := "HOME"
-	if runtime.GOOS == "plan9" {
-		// On plan9, env vars are lowercase.
-		homeEnv = "home"
-	}
-
-	// First prefer the HOME environmental variable
-	if home := os.Getenv(homeEnv); home != "" {
-		return home, nil
-	}
-
-	var stdout bytes.Buffer
-
-	// If that fails, try OS specific commands
-	if runtime.GOOS == "darwin" {
-		cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
-		cmd.Stdout = &stdout
-		if err := cmd.Run(); err == nil {
-			result := strings.TrimSpace(stdout.String())
-			if result != "" {
-				return result, nil
-			}
-		}
-	} else {
-		cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
-		cmd.Stdout = &stdout
-		if err := cmd.Run(); err != nil {
-			// If the error is ErrNotFound, we ignore it. Otherwise, return it.
-			if err != exec.ErrNotFound {
-				return "", err
-			}
-		} else {
-			if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
-				// username:password:uid:gid:gecos:home:shell
-				passwdParts := strings.SplitN(passwd, ":", 7)
-				if len(passwdParts) > 5 {
-					return passwdParts[5], nil
-				}
-			}
-		}
-	}
-
-	// If all else fails, try the shell
-	stdout.Reset()
-	cmd := exec.Command("sh", "-c", "cd && pwd")
-	cmd.Stdout = &stdout
-	if err := cmd.Run(); err != nil {
-		return "", err
-	}
-
-	result := strings.TrimSpace(stdout.String())
-	if result == "" {
-		return "", errors.New("blank output when reading home directory")
-	}
-
-	return result, nil
-}
-
-func dirWindows() (string, error) {
-	// First prefer the HOME environmental variable
-	if home := os.Getenv("HOME"); home != "" {
-		return home, nil
-	}
-
-	// Prefer standard environment variable USERPROFILE
-	if home := os.Getenv("USERPROFILE"); home != "" {
-		return home, nil
-	}
-
-	drive := os.Getenv("HOMEDRIVE")
-	path := os.Getenv("HOMEPATH")
-	home := drive + path
-	if drive == "" || path == "" {
-		return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
-	}
-
-	return home, nil
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml
deleted file mode 100644
index 1689c7d..0000000
--- a/vendor/github.com/mitchellh/mapstructure/.travis.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-language: go
-
-go:
-  - "1.11.x"
-  - tip
-
-script:
-  - go test
diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
deleted file mode 100644
index 3b3cb72..0000000
--- a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
+++ /dev/null
@@ -1,21 +0,0 @@
-## 1.1.2
-
-* Fix error when decode hook decodes interface implementation into interface
-  type. [GH-140]
-
-## 1.1.1
-
-* Fix panic that can happen in `decodePtr`
-
-## 1.1.0
-
-* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133]
-* Support struct to struct decoding [GH-137]
-* If source map value is nil, then destination map value is nil (instead of empty)
-* If source slice value is nil, then destination slice value is nil (instead of empty)
-* If source pointer is nil, then destination pointer is set to nil (instead of
-  allocated zero value of type)
-
-## 1.0.0
-
-* Initial tagged stable release.
diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE
deleted file mode 100644
index f9c841a..0000000
--- a/vendor/github.com/mitchellh/mapstructure/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 Mitchell Hashimoto
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md
deleted file mode 100644
index 0018dc7..0000000
--- a/vendor/github.com/mitchellh/mapstructure/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
-
-mapstructure is a Go library for decoding generic map values to structures
-and vice versa, while providing helpful error handling.
-
-This library is most useful when decoding values from some data stream (JSON,
-Gob, etc.) where you don't _quite_ know the structure of the underlying data
-until you read a part of it. You can therefore read a `map[string]interface{}`
-and use this library to decode it into the proper underlying native Go
-structure.
-
-## Installation
-
-Standard `go get`:
-
-```
-$ go get github.com/mitchellh/mapstructure
-```
-
-## Usage & Example
-
-For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
-
-The `Decode` function has examples associated with it there.
-
-## But Why?!
-
-Go offers fantastic standard libraries for decoding formats such as JSON.
-The standard method is to have a struct pre-created, and populate that struct
-from the bytes of the encoded format. This is great, but the problem is if
-you have configuration or an encoding that changes slightly depending on
-specific fields. For example, consider this JSON:
-
-```json
-{
-  "type": "person",
-  "name": "Mitchell"
-}
-```
-
-Perhaps we can't populate a specific structure without first reading
-the "type" field from the JSON. We could always do two passes over the
-decoding of the JSON (reading the "type" first, and the rest later).
-However, it is much simpler to just decode this into a `map[string]interface{}`
-structure, read the "type" key, then use something like this library
-to decode it into the proper structure.
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
deleted file mode 100644
index 1f0abc6..0000000
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ /dev/null
@@ -1,217 +0,0 @@
-package mapstructure
-
-import (
-	"errors"
-	"fmt"
-	"net"
-	"reflect"
-	"strconv"
-	"strings"
-	"time"
-)
-
-// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
-// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
-func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
-	// Create variables here so we can reference them with the reflect pkg
-	var f1 DecodeHookFuncType
-	var f2 DecodeHookFuncKind
-
-	// Fill in the variables into this interface and the rest is done
-	// automatically using the reflect package.
-	potential := []interface{}{f1, f2}
-
-	v := reflect.ValueOf(h)
-	vt := v.Type()
-	for _, raw := range potential {
-		pt := reflect.ValueOf(raw).Type()
-		if vt.ConvertibleTo(pt) {
-			return v.Convert(pt).Interface()
-		}
-	}
-
-	return nil
-}
-
-// DecodeHookExec executes the given decode hook. This should be used
-// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
-// that took reflect.Kind instead of reflect.Type.
-func DecodeHookExec(
-	raw DecodeHookFunc,
-	from reflect.Type, to reflect.Type,
-	data interface{}) (interface{}, error) {
-	switch f := typedDecodeHook(raw).(type) {
-	case DecodeHookFuncType:
-		return f(from, to, data)
-	case DecodeHookFuncKind:
-		return f(from.Kind(), to.Kind(), data)
-	default:
-		return nil, errors.New("invalid decode hook signature")
-	}
-}
-
-// ComposeDecodeHookFunc creates a single DecodeHookFunc that
-// automatically composes multiple DecodeHookFuncs.
-//
-// The composed funcs are called in order, with the result of the
-// previous transformation.
-func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		var err error
-		for _, f1 := range fs {
-			data, err = DecodeHookExec(f1, f, t, data)
-			if err != nil {
-				return nil, err
-			}
-
-			// Modify the from kind to be correct with the new data
-			f = nil
-			if val := reflect.ValueOf(data); val.IsValid() {
-				f = val.Type()
-			}
-		}
-
-		return data, nil
-	}
-}
-
-// StringToSliceHookFunc returns a DecodeHookFunc that converts
-// string to []string by splitting on the given sep.
-func StringToSliceHookFunc(sep string) DecodeHookFunc {
-	return func(
-		f reflect.Kind,
-		t reflect.Kind,
-		data interface{}) (interface{}, error) {
-		if f != reflect.String || t != reflect.Slice {
-			return data, nil
-		}
-
-		raw := data.(string)
-		if raw == "" {
-			return []string{}, nil
-		}
-
-		return strings.Split(raw, sep), nil
-	}
-}
-
-// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
-// strings to time.Duration.
-func StringToTimeDurationHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(time.Duration(5)) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		return time.ParseDuration(data.(string))
-	}
-}
-
-// StringToIPHookFunc returns a DecodeHookFunc that converts
-// strings to net.IP
-func StringToIPHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(net.IP{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		ip := net.ParseIP(data.(string))
-		if ip == nil {
-			return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
-		}
-
-		return ip, nil
-	}
-}
-
-// StringToIPNetHookFunc returns a DecodeHookFunc that converts
-// strings to net.IPNet
-func StringToIPNetHookFunc() DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(net.IPNet{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		_, net, err := net.ParseCIDR(data.(string))
-		return net, err
-	}
-}
-
-// StringToTimeHookFunc returns a DecodeHookFunc that converts
-// strings to time.Time.
-func StringToTimeHookFunc(layout string) DecodeHookFunc {
-	return func(
-		f reflect.Type,
-		t reflect.Type,
-		data interface{}) (interface{}, error) {
-		if f.Kind() != reflect.String {
-			return data, nil
-		}
-		if t != reflect.TypeOf(time.Time{}) {
-			return data, nil
-		}
-
-		// Convert it by parsing
-		return time.Parse(layout, data.(string))
-	}
-}
-
-// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
-// the decoder.
-//
-// Note that this is significantly different from the WeaklyTypedInput option
-// of the DecoderConfig.
-func WeaklyTypedHook(
-	f reflect.Kind,
-	t reflect.Kind,
-	data interface{}) (interface{}, error) {
-	dataVal := reflect.ValueOf(data)
-	switch t {
-	case reflect.String:
-		switch f {
-		case reflect.Bool:
-			if dataVal.Bool() {
-				return "1", nil
-			}
-			return "0", nil
-		case reflect.Float32:
-			return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
-		case reflect.Int:
-			return strconv.FormatInt(dataVal.Int(), 10), nil
-		case reflect.Slice:
-			dataType := dataVal.Type()
-			elemKind := dataType.Elem().Kind()
-			if elemKind == reflect.Uint8 {
-				return string(dataVal.Interface().([]uint8)), nil
-			}
-		case reflect.Uint:
-			return strconv.FormatUint(dataVal.Uint(), 10), nil
-		}
-	}
-
-	return data, nil
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go
deleted file mode 100644
index 47a99e5..0000000
--- a/vendor/github.com/mitchellh/mapstructure/error.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package mapstructure
-
-import (
-	"errors"
-	"fmt"
-	"sort"
-	"strings"
-)
-
-// Error implements the error interface and can represents multiple
-// errors that occur in the course of a single decode.
-type Error struct {
-	Errors []string
-}
-
-func (e *Error) Error() string {
-	points := make([]string, len(e.Errors))
-	for i, err := range e.Errors {
-		points[i] = fmt.Sprintf("* %s", err)
-	}
-
-	sort.Strings(points)
-	return fmt.Sprintf(
-		"%d error(s) decoding:\n\n%s",
-		len(e.Errors), strings.Join(points, "\n"))
-}
-
-// WrappedErrors implements the errwrap.Wrapper interface to make this
-// return value more useful with the errwrap and go-multierror libraries.
-func (e *Error) WrappedErrors() []error {
-	if e == nil {
-		return nil
-	}
-
-	result := make([]error, len(e.Errors))
-	for i, e := range e.Errors {
-		result[i] = errors.New(e)
-	}
-
-	return result
-}
-
-func appendErrors(errors []string, err error) []string {
-	switch e := err.(type) {
-	case *Error:
-		return append(errors, e.Errors...)
-	default:
-		return append(errors, e.Error())
-	}
-}
diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod
deleted file mode 100644
index d2a7125..0000000
--- a/vendor/github.com/mitchellh/mapstructure/go.mod
+++ /dev/null
@@ -1 +0,0 @@
-module github.com/mitchellh/mapstructure
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
deleted file mode 100644
index 256ee63..0000000
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ /dev/null
@@ -1,1149 +0,0 @@
-// Package mapstructure exposes functionality to convert an arbitrary
-// map[string]interface{} into a native Go structure.
-//
-// The Go structure can be arbitrarily complex, containing slices,
-// other structs, etc. and the decoder will properly decode nested
-// maps and so on into the proper structures in the native Go struct.
-// See the examples to see what the decoder is capable of.
-package mapstructure
-
-import (
-	"encoding/json"
-	"errors"
-	"fmt"
-	"reflect"
-	"sort"
-	"strconv"
-	"strings"
-)
-
-// DecodeHookFunc is the callback function that can be used for
-// data transformations. See "DecodeHook" in the DecoderConfig
-// struct.
-//
-// The type should be DecodeHookFuncType or DecodeHookFuncKind.
-// Either is accepted. Types are a superset of Kinds (Types can return
-// Kinds) and are generally a richer thing to use, but Kinds are simpler
-// if you only need those.
-//
-// The reason DecodeHookFunc is multi-typed is for backwards compatibility:
-// we started with Kinds and then realized Types were the better solution,
-// but have a promise to not break backwards compat so we now support
-// both.
-type DecodeHookFunc interface{}
-
-// DecodeHookFuncType is a DecodeHookFunc which has complete information about
-// the source and target types.
-type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
-
-// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
-// source and target types.
-type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
-
-// DecoderConfig is the configuration that is used to create a new decoder
-// and allows customization of various aspects of decoding.
-type DecoderConfig struct {
-	// DecodeHook, if set, will be called before any decoding and any
-	// type conversion (if WeaklyTypedInput is on). This lets you modify
-	// the values before they're set down onto the resulting struct.
-	//
-	// If an error is returned, the entire decode will fail with that
-	// error.
-	DecodeHook DecodeHookFunc
-
-	// If ErrorUnused is true, then it is an error for there to exist
-	// keys in the original map that were unused in the decoding process
-	// (extra keys).
-	ErrorUnused bool
-
-	// ZeroFields, if set to true, will zero fields before writing them.
-	// For example, a map will be emptied before decoded values are put in
-	// it. If this is false, a map will be merged.
-	ZeroFields bool
-
-	// If WeaklyTypedInput is true, the decoder will make the following
-	// "weak" conversions:
-	//
-	//   - bools to string (true = "1", false = "0")
-	//   - numbers to string (base 10)
-	//   - bools to int/uint (true = 1, false = 0)
-	//   - strings to int/uint (base implied by prefix)
-	//   - int to bool (true if value != 0)
-	//   - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F,
-	//     FALSE, false, False. Anything else is an error)
-	//   - empty array = empty map and vice versa
-	//   - negative numbers to overflowed uint values (base 10)
-	//   - slice of maps to a merged map
-	//   - single values are converted to slices if required. Each
-	//     element is weakly decoded. For example: "4" can become []int{4}
-	//     if the target type is an int slice.
-	//
-	WeaklyTypedInput bool
-
-	// Metadata is the struct that will contain extra metadata about
-	// the decoding. If this is nil, then no metadata will be tracked.
-	Metadata *Metadata
-
-	// Result is a pointer to the struct that will contain the decoded
-	// value.
-	Result interface{}
-
-	// The tag name that mapstructure reads for field names. This
-	// defaults to "mapstructure"
-	TagName string
-}
-
-// A Decoder takes a raw interface value and turns it into structured
-// data, keeping track of rich error information along the way in case
-// anything goes wrong. Unlike the basic top-level Decode method, you can
-// more finely control how the Decoder behaves using the DecoderConfig
-// structure. The top-level Decode method is just a convenience that sets
-// up the most basic Decoder.
-type Decoder struct {
-	config *DecoderConfig
-}
-
-// Metadata contains information about decoding a structure that
-// is tedious or difficult to get otherwise.
-type Metadata struct {
-	// Keys are the keys of the structure which were successfully decoded
-	Keys []string
-
-	// Unused is a slice of keys that were found in the raw value but
-	// weren't decoded since there was no matching field in the result interface
-	Unused []string
-}
-
-// Decode takes an input structure and uses reflection to translate it to
-// the output structure. output must be a pointer to a map or struct.
-func Decode(input interface{}, output interface{}) error {
-	config := &DecoderConfig{
-		Metadata: nil,
-		Result:   output,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// WeakDecode is the same as Decode but is shorthand to enable
-// WeaklyTypedInput. See DecoderConfig for more info.
-func WeakDecode(input, output interface{}) error {
-	config := &DecoderConfig{
-		Metadata:         nil,
-		Result:           output,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// DecodeMetadata is the same as Decode, but is shorthand to
-// enable metadata collection. See DecoderConfig for more info.
-func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
-	config := &DecoderConfig{
-		Metadata: metadata,
-		Result:   output,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// WeakDecodeMetadata is the same as Decode, but is shorthand to
-// enable both WeaklyTypedInput and metadata collection. See
-// DecoderConfig for more info.
-func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error {
-	config := &DecoderConfig{
-		Metadata:         metadata,
-		Result:           output,
-		WeaklyTypedInput: true,
-	}
-
-	decoder, err := NewDecoder(config)
-	if err != nil {
-		return err
-	}
-
-	return decoder.Decode(input)
-}
-
-// NewDecoder returns a new decoder for the given configuration. Once
-// a decoder has been returned, the same configuration must not be used
-// again.
-func NewDecoder(config *DecoderConfig) (*Decoder, error) {
-	val := reflect.ValueOf(config.Result)
-	if val.Kind() != reflect.Ptr {
-		return nil, errors.New("result must be a pointer")
-	}
-
-	val = val.Elem()
-	if !val.CanAddr() {
-		return nil, errors.New("result must be addressable (a pointer)")
-	}
-
-	if config.Metadata != nil {
-		if config.Metadata.Keys == nil {
-			config.Metadata.Keys = make([]string, 0)
-		}
-
-		if config.Metadata.Unused == nil {
-			config.Metadata.Unused = make([]string, 0)
-		}
-	}
-
-	if config.TagName == "" {
-		config.TagName = "mapstructure"
-	}
-
-	result := &Decoder{
-		config: config,
-	}
-
-	return result, nil
-}
-
-// Decode decodes the given raw interface to the target pointer specified
-// by the configuration.
-func (d *Decoder) Decode(input interface{}) error {
-	return d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
-}
-
-// Decodes an unknown data type into a specific reflection value.
-func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error {
-	var inputVal reflect.Value
-	if input != nil {
-		inputVal = reflect.ValueOf(input)
-
-		// We need to check here if input is a typed nil. Typed nils won't
-		// match the "input == nil" below so we check that here.
-		if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() {
-			input = nil
-		}
-	}
-
-	if input == nil {
-		// If the data is nil, then we don't set anything, unless ZeroFields is set
-		// to true.
-		if d.config.ZeroFields {
-			outVal.Set(reflect.Zero(outVal.Type()))
-
-			if d.config.Metadata != nil && name != "" {
-				d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-			}
-		}
-		return nil
-	}
-
-	if !inputVal.IsValid() {
-		// If the input value is invalid, then we just set the value
-		// to be the zero value.
-		outVal.Set(reflect.Zero(outVal.Type()))
-		if d.config.Metadata != nil && name != "" {
-			d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-		}
-		return nil
-	}
-
-	if d.config.DecodeHook != nil {
-		// We have a DecodeHook, so let's pre-process the input.
-		var err error
-		input, err = DecodeHookExec(
-			d.config.DecodeHook,
-			inputVal.Type(), outVal.Type(), input)
-		if err != nil {
-			return fmt.Errorf("error decoding '%s': %s", name, err)
-		}
-	}
-
-	var err error
-	outputKind := getKind(outVal)
-	switch outputKind {
-	case reflect.Bool:
-		err = d.decodeBool(name, input, outVal)
-	case reflect.Interface:
-		err = d.decodeBasic(name, input, outVal)
-	case reflect.String:
-		err = d.decodeString(name, input, outVal)
-	case reflect.Int:
-		err = d.decodeInt(name, input, outVal)
-	case reflect.Uint:
-		err = d.decodeUint(name, input, outVal)
-	case reflect.Float32:
-		err = d.decodeFloat(name, input, outVal)
-	case reflect.Struct:
-		err = d.decodeStruct(name, input, outVal)
-	case reflect.Map:
-		err = d.decodeMap(name, input, outVal)
-	case reflect.Ptr:
-		err = d.decodePtr(name, input, outVal)
-	case reflect.Slice:
-		err = d.decodeSlice(name, input, outVal)
-	case reflect.Array:
-		err = d.decodeArray(name, input, outVal)
-	case reflect.Func:
-		err = d.decodeFunc(name, input, outVal)
-	default:
-		// If we reached this point then we weren't able to decode it
-		return fmt.Errorf("%s: unsupported type: %s", name, outputKind)
-	}
-
-	// If we reached here, then we successfully decoded SOMETHING, so
-	// mark the key as used if we're tracking metainput.
-	if d.config.Metadata != nil && name != "" {
-		d.config.Metadata.Keys = append(d.config.Metadata.Keys, name)
-	}
-
-	return err
-}
-
-// This decodes a basic type (bool, int, string, etc.) and sets the
-// value to "data" of that type.
-func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error {
-	if val.IsValid() && val.Elem().IsValid() {
-		return d.decode(name, data, val.Elem())
-	}
-
-	dataVal := reflect.ValueOf(data)
-
-	// If the input data is a pointer, and the assigned type is the dereference
-	// of that exact pointer, then indirect it so that we can assign it.
-	// Example: *string to string
-	if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() {
-		dataVal = reflect.Indirect(dataVal)
-	}
-
-	if !dataVal.IsValid() {
-		dataVal = reflect.Zero(val.Type())
-	}
-
-	dataValType := dataVal.Type()
-	if !dataValType.AssignableTo(val.Type()) {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got '%s'",
-			name, val.Type(), dataValType)
-	}
-
-	val.Set(dataVal)
-	return nil
-}
-
-func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-
-	converted := true
-	switch {
-	case dataKind == reflect.String:
-		val.SetString(dataVal.String())
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetString("1")
-		} else {
-			val.SetString("0")
-		}
-	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatInt(dataVal.Int(), 10))
-	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatUint(dataVal.Uint(), 10))
-	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
-		val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64))
-	case dataKind == reflect.Slice && d.config.WeaklyTypedInput,
-		dataKind == reflect.Array && d.config.WeaklyTypedInput:
-		dataType := dataVal.Type()
-		elemKind := dataType.Elem().Kind()
-		switch elemKind {
-		case reflect.Uint8:
-			var uints []uint8
-			if dataKind == reflect.Array {
-				uints = make([]uint8, dataVal.Len(), dataVal.Len())
-				for i := range uints {
-					uints[i] = dataVal.Index(i).Interface().(uint8)
-				}
-			} else {
-				uints = dataVal.Interface().([]uint8)
-			}
-			val.SetString(string(uints))
-		default:
-			converted = false
-		}
-	default:
-		converted = false
-	}
-
-	if !converted {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-	dataType := dataVal.Type()
-
-	switch {
-	case dataKind == reflect.Int:
-		val.SetInt(dataVal.Int())
-	case dataKind == reflect.Uint:
-		val.SetInt(int64(dataVal.Uint()))
-	case dataKind == reflect.Float32:
-		val.SetInt(int64(dataVal.Float()))
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetInt(1)
-		} else {
-			val.SetInt(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits())
-		if err == nil {
-			val.SetInt(i)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as int: %s", name, err)
-		}
-	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
-		jn := data.(json.Number)
-		i, err := jn.Int64()
-		if err != nil {
-			return fmt.Errorf(
-				"error decoding json.Number into %s: %s", name, err)
-		}
-		val.SetInt(i)
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-
-	switch {
-	case dataKind == reflect.Int:
-		i := dataVal.Int()
-		if i < 0 && !d.config.WeaklyTypedInput {
-			return fmt.Errorf("cannot parse '%s', %d overflows uint",
-				name, i)
-		}
-		val.SetUint(uint64(i))
-	case dataKind == reflect.Uint:
-		val.SetUint(dataVal.Uint())
-	case dataKind == reflect.Float32:
-		f := dataVal.Float()
-		if f < 0 && !d.config.WeaklyTypedInput {
-			return fmt.Errorf("cannot parse '%s', %f overflows uint",
-				name, f)
-		}
-		val.SetUint(uint64(f))
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetUint(1)
-		} else {
-			val.SetUint(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits())
-		if err == nil {
-			val.SetUint(i)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as uint: %s", name, err)
-		}
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-
-	switch {
-	case dataKind == reflect.Bool:
-		val.SetBool(dataVal.Bool())
-	case dataKind == reflect.Int && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Int() != 0)
-	case dataKind == reflect.Uint && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Uint() != 0)
-	case dataKind == reflect.Float32 && d.config.WeaklyTypedInput:
-		val.SetBool(dataVal.Float() != 0)
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		b, err := strconv.ParseBool(dataVal.String())
-		if err == nil {
-			val.SetBool(b)
-		} else if dataVal.String() == "" {
-			val.SetBool(false)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as bool: %s", name, err)
-		}
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataKind := getKind(dataVal)
-	dataType := dataVal.Type()
-
-	switch {
-	case dataKind == reflect.Int:
-		val.SetFloat(float64(dataVal.Int()))
-	case dataKind == reflect.Uint:
-		val.SetFloat(float64(dataVal.Uint()))
-	case dataKind == reflect.Float32:
-		val.SetFloat(dataVal.Float())
-	case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
-		if dataVal.Bool() {
-			val.SetFloat(1)
-		} else {
-			val.SetFloat(0)
-		}
-	case dataKind == reflect.String && d.config.WeaklyTypedInput:
-		f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits())
-		if err == nil {
-			val.SetFloat(f)
-		} else {
-			return fmt.Errorf("cannot parse '%s' as float: %s", name, err)
-		}
-	case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
-		jn := data.(json.Number)
-		i, err := jn.Float64()
-		if err != nil {
-			return fmt.Errorf(
-				"error decoding json.Number into %s: %s", name, err)
-		}
-		val.SetFloat(i)
-	default:
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
-	valType := val.Type()
-	valKeyType := valType.Key()
-	valElemType := valType.Elem()
-
-	// By default we overwrite keys in the current map
-	valMap := val
-
-	// If the map is nil or we're purposely zeroing fields, make a new map
-	if valMap.IsNil() || d.config.ZeroFields {
-		// Make a new map to hold our result
-		mapType := reflect.MapOf(valKeyType, valElemType)
-		valMap = reflect.MakeMap(mapType)
-	}
-
-	// Check input type and based on the input type jump to the proper func
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	switch dataVal.Kind() {
-	case reflect.Map:
-		return d.decodeMapFromMap(name, dataVal, val, valMap)
-
-	case reflect.Struct:
-		return d.decodeMapFromStruct(name, dataVal, val, valMap)
-
-	case reflect.Array, reflect.Slice:
-		if d.config.WeaklyTypedInput {
-			return d.decodeMapFromSlice(name, dataVal, val, valMap)
-		}
-
-		fallthrough
-
-	default:
-		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
-	}
-}
-
-func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	// Special case for BC reasons (covered by tests)
-	if dataVal.Len() == 0 {
-		val.Set(valMap)
-		return nil
-	}
-
-	for i := 0; i < dataVal.Len(); i++ {
-		err := d.decode(
-			fmt.Sprintf("%s[%d]", name, i),
-			dataVal.Index(i).Interface(), val)
-		if err != nil {
-			return err
-		}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	valType := val.Type()
-	valKeyType := valType.Key()
-	valElemType := valType.Elem()
-
-	// Accumulate errors
-	errors := make([]string, 0)
-
-	// If the input data is empty, then we just match what the input data is.
-	if dataVal.Len() == 0 {
-		if dataVal.IsNil() {
-			if !val.IsNil() {
-				val.Set(dataVal)
-			}
-		} else {
-			// Set to empty allocated value
-			val.Set(valMap)
-		}
-
-		return nil
-	}
-
-	for _, k := range dataVal.MapKeys() {
-		fieldName := fmt.Sprintf("%s[%s]", name, k)
-
-		// First decode the key into the proper type
-		currentKey := reflect.Indirect(reflect.New(valKeyType))
-		if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
-			errors = appendErrors(errors, err)
-			continue
-		}
-
-		// Next decode the data into the proper type
-		v := dataVal.MapIndex(k).Interface()
-		currentVal := reflect.Indirect(reflect.New(valElemType))
-		if err := d.decode(fieldName, v, currentVal); err != nil {
-			errors = appendErrors(errors, err)
-			continue
-		}
-
-		valMap.SetMapIndex(currentKey, currentVal)
-	}
-
-	// Set the built up map to the value
-	val.Set(valMap)
-
-	// If we had errors, return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
-	typ := dataVal.Type()
-	for i := 0; i < typ.NumField(); i++ {
-		// Get the StructField first since this is a cheap operation. If the
-		// field is unexported, then ignore it.
-		f := typ.Field(i)
-		if f.PkgPath != "" {
-			continue
-		}
-
-		// Next get the actual value of this field and verify it is assignable
-		// to the map value.
-		v := dataVal.Field(i)
-		if !v.Type().AssignableTo(valMap.Type().Elem()) {
-			return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem())
-		}
-
-		tagValue := f.Tag.Get(d.config.TagName)
-		tagParts := strings.Split(tagValue, ",")
-
-		// Determine the name of the key in the map
-		keyName := f.Name
-		if tagParts[0] != "" {
-			if tagParts[0] == "-" {
-				continue
-			}
-			keyName = tagParts[0]
-		}
-
-		// If "squash" is specified in the tag, we squash the field down.
-		squash := false
-		for _, tag := range tagParts[1:] {
-			if tag == "squash" {
-				squash = true
-				break
-			}
-		}
-		if squash && v.Kind() != reflect.Struct {
-			return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
-		}
-
-		switch v.Kind() {
-		// this is an embedded struct, so handle it differently
-		case reflect.Struct:
-			x := reflect.New(v.Type())
-			x.Elem().Set(v)
-
-			vType := valMap.Type()
-			vKeyType := vType.Key()
-			vElemType := vType.Elem()
-			mType := reflect.MapOf(vKeyType, vElemType)
-			vMap := reflect.MakeMap(mType)
-
-			err := d.decode(keyName, x.Interface(), vMap)
-			if err != nil {
-				return err
-			}
-
-			if squash {
-				for _, k := range vMap.MapKeys() {
-					valMap.SetMapIndex(k, vMap.MapIndex(k))
-				}
-			} else {
-				valMap.SetMapIndex(reflect.ValueOf(keyName), vMap)
-			}
-
-		default:
-			valMap.SetMapIndex(reflect.ValueOf(keyName), v)
-		}
-	}
-
-	if val.CanAddr() {
-		val.Set(valMap)
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error {
-	// If the input data is nil, then we want to just set the output
-	// pointer to be nil as well.
-	isNil := data == nil
-	if !isNil {
-		switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() {
-		case reflect.Chan,
-			reflect.Func,
-			reflect.Interface,
-			reflect.Map,
-			reflect.Ptr,
-			reflect.Slice:
-			isNil = v.IsNil()
-		}
-	}
-	if isNil {
-		if !val.IsNil() && val.CanSet() {
-			nilValue := reflect.New(val.Type()).Elem()
-			val.Set(nilValue)
-		}
-
-		return nil
-	}
-
-	// Create an element of the concrete (non pointer) type and decode
-	// into that. Then set the value of the pointer to this type.
-	valType := val.Type()
-	valElemType := valType.Elem()
-	if val.CanSet() {
-		realVal := val
-		if realVal.IsNil() || d.config.ZeroFields {
-			realVal = reflect.New(valElemType)
-		}
-
-		if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil {
-			return err
-		}
-
-		val.Set(realVal)
-	} else {
-		if err := d.decode(name, data, reflect.Indirect(val)); err != nil {
-			return err
-		}
-	}
-	return nil
-}
-
-func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error {
-	// Create an element of the concrete (non pointer) type and decode
-	// into that. Then set the value of the pointer to this type.
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	if val.Type() != dataVal.Type() {
-		return fmt.Errorf(
-			"'%s' expected type '%s', got unconvertible type '%s'",
-			name, val.Type(), dataVal.Type())
-	}
-	val.Set(dataVal)
-	return nil
-}
-
-func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataValKind := dataVal.Kind()
-	valType := val.Type()
-	valElemType := valType.Elem()
-	sliceType := reflect.SliceOf(valElemType)
-
-	valSlice := val
-	if valSlice.IsNil() || d.config.ZeroFields {
-		if d.config.WeaklyTypedInput {
-			switch {
-			// Slice and array we use the normal logic
-			case dataValKind == reflect.Slice, dataValKind == reflect.Array:
-				break
-
-			// Empty maps turn into empty slices
-			case dataValKind == reflect.Map:
-				if dataVal.Len() == 0 {
-					val.Set(reflect.MakeSlice(sliceType, 0, 0))
-					return nil
-				}
-				// Create slice of maps of other sizes
-				return d.decodeSlice(name, []interface{}{data}, val)
-
-			case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8:
-				return d.decodeSlice(name, []byte(dataVal.String()), val)
-
-			// All other types we try to convert to the slice type
-			// and "lift" it into it. i.e. a string becomes a string slice.
-			default:
-				// Just re-try this function with data as a slice.
-				return d.decodeSlice(name, []interface{}{data}, val)
-			}
-		}
-
-		// Check input type
-		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
-			return fmt.Errorf(
-				"'%s': source data must be an array or slice, got %s", name, dataValKind)
-
-		}
-
-		// If the input value is empty, then don't allocate since non-nil != nil
-		if dataVal.Len() == 0 {
-			return nil
-		}
-
-		// Make a new slice to hold our result, same size as the original data.
-		valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
-	}
-
-	// Accumulate any errors
-	errors := make([]string, 0)
-
-	for i := 0; i < dataVal.Len(); i++ {
-		currentData := dataVal.Index(i).Interface()
-		for valSlice.Len() <= i {
-			valSlice = reflect.Append(valSlice, reflect.Zero(valElemType))
-		}
-		currentField := valSlice.Index(i)
-
-		fieldName := fmt.Sprintf("%s[%d]", name, i)
-		if err := d.decode(fieldName, currentData, currentField); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	// Finally, set the value to the slice we built up
-	val.Set(valSlice)
-
-	// If there were errors, we return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-	dataValKind := dataVal.Kind()
-	valType := val.Type()
-	valElemType := valType.Elem()
-	arrayType := reflect.ArrayOf(valType.Len(), valElemType)
-
-	valArray := val
-
-	if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
-		// Check input type
-		if dataValKind != reflect.Array && dataValKind != reflect.Slice {
-			if d.config.WeaklyTypedInput {
-				switch {
-				// Empty maps turn into empty arrays
-				case dataValKind == reflect.Map:
-					if dataVal.Len() == 0 {
-						val.Set(reflect.Zero(arrayType))
-						return nil
-					}
-
-				// All other types we try to convert to the array type
-				// and "lift" it into it. i.e. a string becomes a string array.
-				default:
-					// Just re-try this function with data as a slice.
-					return d.decodeArray(name, []interface{}{data}, val)
-				}
-			}
-
-			return fmt.Errorf(
-				"'%s': source data must be an array or slice, got %s", name, dataValKind)
-
-		}
-		if dataVal.Len() > arrayType.Len() {
-			return fmt.Errorf(
-				"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
-
-		}
-
-		// Make a new array to hold our result, same size as the original data.
-		valArray = reflect.New(arrayType).Elem()
-	}
-
-	// Accumulate any errors
-	errors := make([]string, 0)
-
-	for i := 0; i < dataVal.Len(); i++ {
-		currentData := dataVal.Index(i).Interface()
-		currentField := valArray.Index(i)
-
-		fieldName := fmt.Sprintf("%s[%d]", name, i)
-		if err := d.decode(fieldName, currentData, currentField); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	// Finally, set the value to the array we built up
-	val.Set(valArray)
-
-	// If there were errors, we return those
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	return nil
-}
-
-func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
-	dataVal := reflect.Indirect(reflect.ValueOf(data))
-
-	// If the type of the value to write to and the data match directly,
-	// then we just set it directly instead of recursing into the structure.
-	if dataVal.Type() == val.Type() {
-		val.Set(dataVal)
-		return nil
-	}
-
-	dataValKind := dataVal.Kind()
-	switch dataValKind {
-	case reflect.Map:
-		return d.decodeStructFromMap(name, dataVal, val)
-
-	case reflect.Struct:
-		// Not the most efficient way to do this but we can optimize later if
-		// we want to. To convert from struct to struct we go to map first
-		// as an intermediary.
-		m := make(map[string]interface{})
-		mval := reflect.Indirect(reflect.ValueOf(&m))
-		if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil {
-			return err
-		}
-
-		result := d.decodeStructFromMap(name, mval, val)
-		return result
-
-	default:
-		return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind())
-	}
-}
-
-func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error {
-	dataValType := dataVal.Type()
-	if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface {
-		return fmt.Errorf(
-			"'%s' needs a map with string keys, has '%s' keys",
-			name, dataValType.Key().Kind())
-	}
-
-	dataValKeys := make(map[reflect.Value]struct{})
-	dataValKeysUnused := make(map[interface{}]struct{})
-	for _, dataValKey := range dataVal.MapKeys() {
-		dataValKeys[dataValKey] = struct{}{}
-		dataValKeysUnused[dataValKey.Interface()] = struct{}{}
-	}
-
-	errors := make([]string, 0)
-
-	// This slice will keep track of all the structs we'll be decoding.
-	// There can be more than one struct if there are embedded structs
-	// that are squashed.
-	structs := make([]reflect.Value, 1, 5)
-	structs[0] = val
-
-	// Compile the list of all the fields that we're going to be decoding
-	// from all the structs.
-	type field struct {
-		field reflect.StructField
-		val   reflect.Value
-	}
-	fields := []field{}
-	for len(structs) > 0 {
-		structVal := structs[0]
-		structs = structs[1:]
-
-		structType := structVal.Type()
-
-		for i := 0; i < structType.NumField(); i++ {
-			fieldType := structType.Field(i)
-			fieldKind := fieldType.Type.Kind()
-
-			// If "squash" is specified in the tag, we squash the field down.
-			squash := false
-			tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",")
-			for _, tag := range tagParts[1:] {
-				if tag == "squash" {
-					squash = true
-					break
-				}
-			}
-
-			if squash {
-				if fieldKind != reflect.Struct {
-					errors = appendErrors(errors,
-						fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind))
-				} else {
-					structs = append(structs, structVal.FieldByName(fieldType.Name))
-				}
-				continue
-			}
-
-			// Normal struct field, store it away
-			fields = append(fields, field{fieldType, structVal.Field(i)})
-		}
-	}
-
-	// for fieldType, field := range fields {
-	for _, f := range fields {
-		field, fieldValue := f.field, f.val
-		fieldName := field.Name
-
-		tagValue := field.Tag.Get(d.config.TagName)
-		tagValue = strings.SplitN(tagValue, ",", 2)[0]
-		if tagValue != "" {
-			fieldName = tagValue
-		}
-
-		rawMapKey := reflect.ValueOf(fieldName)
-		rawMapVal := dataVal.MapIndex(rawMapKey)
-		if !rawMapVal.IsValid() {
-			// Do a slower search by iterating over each key and
-			// doing case-insensitive search.
-			for dataValKey := range dataValKeys {
-				mK, ok := dataValKey.Interface().(string)
-				if !ok {
-					// Not a string key
-					continue
-				}
-
-				if strings.EqualFold(mK, fieldName) {
-					rawMapKey = dataValKey
-					rawMapVal = dataVal.MapIndex(dataValKey)
-					break
-				}
-			}
-
-			if !rawMapVal.IsValid() {
-				// There was no matching key in the map for the value in
-				// the struct. Just ignore.
-				continue
-			}
-		}
-
-		// Delete the key we're using from the unused map so we stop tracking
-		delete(dataValKeysUnused, rawMapKey.Interface())
-
-		if !fieldValue.IsValid() {
-			// This should never happen
-			panic("field is not valid")
-		}
-
-		// If we can't set the field, then it is unexported or something,
-		// and we just continue onwards.
-		if !fieldValue.CanSet() {
-			continue
-		}
-
-		// If the name is empty string, then we're at the root, and we
-		// don't dot-join the fields.
-		if name != "" {
-			fieldName = fmt.Sprintf("%s.%s", name, fieldName)
-		}
-
-		if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
-			errors = appendErrors(errors, err)
-		}
-	}
-
-	if d.config.ErrorUnused && len(dataValKeysUnused) > 0 {
-		keys := make([]string, 0, len(dataValKeysUnused))
-		for rawKey := range dataValKeysUnused {
-			keys = append(keys, rawKey.(string))
-		}
-		sort.Strings(keys)
-
-		err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
-		errors = appendErrors(errors, err)
-	}
-
-	if len(errors) > 0 {
-		return &Error{errors}
-	}
-
-	// Add the unused keys to the list of unused keys if we're tracking metadata
-	if d.config.Metadata != nil {
-		for rawKey := range dataValKeysUnused {
-			key := rawKey.(string)
-			if name != "" {
-				key = fmt.Sprintf("%s.%s", name, key)
-			}
-
-			d.config.Metadata.Unused = append(d.config.Metadata.Unused, key)
-		}
-	}
-
-	return nil
-}
-
-func getKind(val reflect.Value) reflect.Kind {
-	kind := val.Kind()
-
-	switch {
-	case kind >= reflect.Int && kind <= reflect.Int64:
-		return reflect.Int
-	case kind >= reflect.Uint && kind <= reflect.Uint64:
-		return reflect.Uint
-	case kind >= reflect.Float32 && kind <= reflect.Float64:
-		return reflect.Float32
-	default:
-		return kind
-	}
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
index 4b1c841..8350225 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/config/configmanager.go
@@ -29,7 +29,7 @@
 
 const (
 	defaultkvStoreConfigPath     = "config"
-	defaultkvStoreDataPathPrefix = "service/voltha"
+	defaultkvStoreDataPathPrefix = "service/voltha_voltha"
 	kvStorePathSeparator         = "/"
 )
 
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
index d6867a5..bf30a48 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/backend.go
@@ -75,8 +75,6 @@
 
 func (b *Backend) newClient(ctx context.Context, address string, timeout time.Duration) (kvstore.Client, error) {
 	switch b.StoreType {
-	case "consul":
-		return kvstore.NewConsulClient(ctx, address, timeout)
 	case "etcd":
 		return kvstore.NewEtcdClient(ctx, address, timeout, log.WarnLevel)
 	}
@@ -170,9 +168,6 @@
 			case codes.DataLoss:
 				alive = false
 			}
-
-			//} else {
-			// TODO: Implement for consul backend; would it be needed ever?
 		}
 	}
 
@@ -239,6 +234,21 @@
 	return err
 }
 
+// DeleteWithPrefix removes items having prefix key
+func (b *Backend) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+	span, ctx := log.CreateChildSpan(ctx, "etcd-delete-with-prefix")
+	defer span.Finish()
+
+	formattedPath := b.makePath(ctx, prefixKey)
+	logger.Debugw(ctx, "deleting-prefix-key", log.Fields{"key": prefixKey, "path": formattedPath})
+
+	err := b.Client.DeleteWithPrefix(ctx, formattedPath)
+
+	b.updateLiveness(ctx, b.isErrorIndicatingAliveKvstore(ctx, err))
+
+	return err
+}
+
 // CreateWatch starts watching events for the specified key
 func (b *Backend) CreateWatch(ctx context.Context, key string, withPrefix bool) chan *kvstore.Event {
 	span, ctx := log.CreateChildSpan(ctx, "etcd-create-watch")
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
index 480d476..b35f1f3 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/client.go
@@ -21,8 +21,6 @@
 )
 
 const (
-	// Default timeout in seconds when making a kvstore request
-	defaultKVGetTimeout = 5 * time.Second
 	// Maximum channel buffer between publisher/subscriber goroutines
 	maxClientChannelBufferSize = 10
 )
@@ -80,6 +78,7 @@
 	Get(ctx context.Context, key string) (*KVPair, error)
 	Put(ctx context.Context, key string, value interface{}) error
 	Delete(ctx context.Context, key string) error
+	DeleteWithPrefix(ctx context.Context, prefixKey string) error
 	Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
 	ReleaseReservation(ctx context.Context, key string) error
 	ReleaseAllReservations(ctx context.Context) error
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go
deleted file mode 100644
index 2593608..0000000
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/consulclient.go
+++ /dev/null
@@ -1,512 +0,0 @@
-/*
- * Copyright 2018-present Open Networking Foundation
-
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
-
- * http://www.apache.org/licenses/LICENSE-2.0
-
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package kvstore
-
-import (
-	"bytes"
-	"context"
-	"errors"
-	log "github.com/opencord/voltha-lib-go/v4/pkg/log"
-	"sync"
-	"time"
-	//log "ciena.com/coordinator/common"
-	consulapi "github.com/hashicorp/consul/api"
-)
-
-type channelContextMap struct {
-	ctx     context.Context
-	channel chan *Event
-	cancel  context.CancelFunc
-}
-
-// ConsulClient represents the consul KV store client
-type ConsulClient struct {
-	session                *consulapi.Session
-	sessionID              string
-	consul                 *consulapi.Client
-	doneCh                 *chan int
-	keyReservations        map[string]interface{}
-	watchedChannelsContext map[string][]*channelContextMap
-	writeLock              sync.Mutex
-}
-
-// NewConsulClient returns a new client for the Consul KV store
-func NewConsulClient(ctx context.Context, addr string, timeout time.Duration) (*ConsulClient, error) {
-	config := consulapi.DefaultConfig()
-	config.Address = addr
-	config.WaitTime = timeout
-	consul, err := consulapi.NewClient(config)
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-
-	doneCh := make(chan int, 1)
-	wChannelsContext := make(map[string][]*channelContextMap)
-	reservations := make(map[string]interface{})
-	return &ConsulClient{consul: consul, doneCh: &doneCh, watchedChannelsContext: wChannelsContext, keyReservations: reservations}, nil
-}
-
-// IsConnectionUp returns whether the connection to the Consul KV store is up
-func (c *ConsulClient) IsConnectionUp(ctx context.Context) bool {
-	logger.Error(ctx, "Unimplemented function")
-	return false
-}
-
-// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
-// wait for a response
-func (c *ConsulClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
-
-	deadline, _ := ctx.Deadline()
-	kv := c.consul.KV()
-	var queryOptions consulapi.QueryOptions
-	// Substract current time from deadline to get the waitTime duration
-	queryOptions.WaitTime = time.Until(deadline)
-
-	// For now we ignore meta data
-	kvps, _, err := kv.List(key, &queryOptions)
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-	m := make(map[string]*KVPair)
-	for _, kvp := range kvps {
-		m[string(kvp.Key)] = NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1)
-	}
-	return m, nil
-}
-
-// Get returns a key-value pair for a given key. Timeout defines how long the function will
-// wait for a response
-func (c *ConsulClient) Get(ctx context.Context, key string) (*KVPair, error) {
-
-	deadline, _ := ctx.Deadline()
-	kv := c.consul.KV()
-	var queryOptions consulapi.QueryOptions
-	// Substract current time from deadline to get the waitTime duration
-	queryOptions.WaitTime = time.Until(deadline)
-
-	// For now we ignore meta data
-	kvp, _, err := kv.Get(key, &queryOptions)
-	if err != nil {
-		logger.Error(ctx, err)
-		return nil, err
-	}
-	if kvp != nil {
-		return NewKVPair(string(kvp.Key), kvp.Value, string(kvp.Session), 0, -1), nil
-	}
-
-	return nil, nil
-}
-
-// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the consul API
-// accepts only a []byte as a value for a put operation. Timeout defines how long the function will
-// wait for a response
-func (c *ConsulClient) Put(ctx context.Context, key string, value interface{}) error {
-
-	// Validate that we can create a byte array from the value as consul API expects a byte array
-	var val []byte
-	var er error
-	if val, er = ToByte(value); er != nil {
-		logger.Error(ctx, er)
-		return er
-	}
-
-	// Create a key value pair
-	kvp := consulapi.KVPair{Key: key, Value: val}
-	kv := c.consul.KV()
-	var writeOptions consulapi.WriteOptions
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	_, err := kv.Put(&kvp, &writeOptions)
-	if err != nil {
-		logger.Error(ctx, err)
-		return err
-	}
-	return nil
-}
-
-// Delete removes a key from the KV store. Timeout defines how long the function will
-// wait for a response
-func (c *ConsulClient) Delete(ctx context.Context, key string) error {
-	kv := c.consul.KV()
-	var writeOptions consulapi.WriteOptions
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	_, err := kv.Delete(key, &writeOptions)
-	if err != nil {
-		logger.Error(ctx, err)
-		return err
-	}
-	return nil
-}
-
-func (c *ConsulClient) deleteSession(ctx context.Context) {
-	if c.sessionID != "" {
-		logger.Debug(ctx, "cleaning-up-session")
-		session := c.consul.Session()
-		_, err := session.Destroy(c.sessionID, nil)
-		if err != nil {
-			logger.Errorw(ctx, "error-cleaning-session", log.Fields{"session": c.sessionID, "error": err})
-		}
-	}
-	c.sessionID = ""
-	c.session = nil
-}
-
-func (c *ConsulClient) createSession(ctx context.Context, ttl time.Duration, retries int) (*consulapi.Session, string, error) {
-	session := c.consul.Session()
-	entry := &consulapi.SessionEntry{
-		Behavior: consulapi.SessionBehaviorDelete,
-		TTL:      ttl.String(),
-	}
-
-	for {
-		id, meta, err := session.Create(entry, nil)
-		if err != nil {
-			logger.Errorw(ctx, "create-session-error", log.Fields{"error": err})
-			if retries == 0 {
-				return nil, "", err
-			}
-		} else if meta.RequestTime == 0 {
-			logger.Errorw(ctx, "create-session-bad-meta-data", log.Fields{"meta-data": meta})
-			if retries == 0 {
-				return nil, "", errors.New("bad-meta-data")
-			}
-		} else if id == "" {
-			logger.Error(ctx, "create-session-nil-id")
-			if retries == 0 {
-				return nil, "", errors.New("ID-nil")
-			}
-		} else {
-			return session, id, nil
-		}
-		// If retry param is -1 we will retry indefinitely
-		if retries > 0 {
-			retries--
-		}
-		logger.Debug(ctx, "retrying-session-create-after-a-second-delay")
-		time.Sleep(time.Duration(1) * time.Second)
-	}
-}
-
-// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
-// string types
-func isEqual(val1 interface{}, val2 interface{}) bool {
-	b1, err := ToByte(val1)
-	b2, er := ToByte(val2)
-	if err == nil && er == nil {
-		return bytes.Equal(b1, b2)
-	}
-	return val1 == val2
-}
-
-// Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
-// the consul API accepts only a []byte.  Timeout defines how long the function will wait for a response.  TTL
-// defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
-// If the key is acquired then the value returned will be the value passed in.  If the key is already acquired
-// then the value assigned to that key will be returned.
-func (c *ConsulClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
-
-	// Validate that we can create a byte array from the value as consul API expects a byte array
-	var val []byte
-	var er error
-	if val, er = ToByte(value); er != nil {
-		logger.Error(ctx, er)
-		return nil, er
-	}
-
-	// Cleanup any existing session and recreate new ones.  A key is reserved against a session
-	if c.sessionID != "" {
-		c.deleteSession(ctx)
-	}
-
-	// Clear session if reservation is not successful
-	reservationSuccessful := false
-	defer func() {
-		if !reservationSuccessful {
-			logger.Debug(ctx, "deleting-session")
-			c.deleteSession(ctx)
-		}
-	}()
-
-	session, sessionID, err := c.createSession(ctx, ttl, -1)
-	if err != nil {
-		logger.Errorw(ctx, "no-session-created", log.Fields{"error": err})
-		return "", errors.New("no-session-created")
-	}
-	logger.Debugw(ctx, "session-created", log.Fields{"session-id": sessionID})
-	c.sessionID = sessionID
-	c.session = session
-
-	// Try to grap the Key using the session
-	kv := c.consul.KV()
-	kvp := consulapi.KVPair{Key: key, Value: val, Session: c.sessionID}
-	result, _, err := kv.Acquire(&kvp, nil)
-	if err != nil {
-		logger.Errorw(ctx, "error-acquiring-keys", log.Fields{"error": err})
-		return nil, err
-	}
-
-	logger.Debugw(ctx, "key-acquired", log.Fields{"key": key, "status": result})
-
-	// Irrespective whether we were successful in acquiring the key, let's read it back and see if it's us.
-	m, err := c.Get(ctx, key)
-	if err != nil {
-		return nil, err
-	}
-	if m != nil {
-		logger.Debugw(ctx, "response-received", log.Fields{"key": m.Key, "m.value": string(m.Value.([]byte)), "value": value})
-		if m.Key == key && isEqual(m.Value, value) {
-			// My reservation is successful - register it.  For now, support is only for 1 reservation per key
-			// per session.
-			reservationSuccessful = true
-			c.writeLock.Lock()
-			c.keyReservations[key] = m.Value
-			c.writeLock.Unlock()
-			return m.Value, nil
-		}
-		// My reservation has failed.  Return the owner of that key
-		return m.Value, nil
-	}
-	return nil, nil
-}
-
-// ReleaseAllReservations releases all key reservations previously made (using Reserve API)
-func (c *ConsulClient) ReleaseAllReservations(ctx context.Context) error {
-	kv := c.consul.KV()
-	var kvp consulapi.KVPair
-	var result bool
-	var err error
-
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-
-	for key, value := range c.keyReservations {
-		kvp = consulapi.KVPair{Key: key, Value: value.([]byte), Session: c.sessionID}
-		result, _, err = kv.Release(&kvp, nil)
-		if err != nil {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
-			return err
-		}
-		if !result {
-			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key})
-		}
-		delete(c.keyReservations, key)
-	}
-	return nil
-}
-
-// ReleaseReservation releases reservation for a specific key.
-func (c *ConsulClient) ReleaseReservation(ctx context.Context, key string) error {
-	var ok bool
-	var reservedValue interface{}
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	if reservedValue, ok = c.keyReservations[key]; !ok {
-		return errors.New("key-not-reserved:" + key)
-	}
-	// Release the reservation
-	kv := c.consul.KV()
-	kvp := consulapi.KVPair{Key: key, Value: reservedValue.([]byte), Session: c.sessionID}
-
-	result, _, er := kv.Release(&kvp, nil)
-	if er != nil {
-		return er
-	}
-	// Remove that key entry on success
-	if result {
-		delete(c.keyReservations, key)
-		return nil
-	}
-	return errors.New("key-cannot-be-unreserved")
-}
-
-// RenewReservation renews a reservation.  A reservation will go stale after the specified TTL (Time To Live)
-// period specified when reserving the key
-func (c *ConsulClient) RenewReservation(ctx context.Context, key string) error {
-	// In the case of Consul, renew reservation of a reserve key only require renewing the client session.
-
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-
-	// Verify the key was reserved
-	if _, ok := c.keyReservations[key]; !ok {
-		return errors.New("key-not-reserved")
-	}
-
-	if c.session == nil {
-		return errors.New("no-session-exist")
-	}
-
-	var writeOptions consulapi.WriteOptions
-	if _, _, err := c.session.Renew(c.sessionID, &writeOptions); err != nil {
-		return err
-	}
-	return nil
-}
-
-// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
-// listen to receive Events.
-func (c *ConsulClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
-
-	// Create a new channel
-	ch := make(chan *Event, maxClientChannelBufferSize)
-
-	// Create a context to track this request
-	watchContext, cFunc := context.WithCancel(context.Background())
-
-	// Save the channel and context reference for later
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	ccm := channelContextMap{channel: ch, ctx: watchContext, cancel: cFunc}
-	c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key], &ccm)
-
-	// Launch a go routine to listen for updates
-	go c.listenForKeyChange(watchContext, key, ch)
-
-	return ch
-}
-
-// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
-// may be multiple listeners on the same key.  The previously created channel serves as a key
-func (c *ConsulClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
-	// First close the context
-	var ok bool
-	var watchedChannelsContexts []*channelContextMap
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	if watchedChannelsContexts, ok = c.watchedChannelsContext[key]; !ok {
-		logger.Errorw(ctx, "key-has-no-watched-context-or-channel", log.Fields{"key": key})
-		return
-	}
-	// Look for the channels
-	var pos = -1
-	for i, chCtxMap := range watchedChannelsContexts {
-		if chCtxMap.channel == ch {
-			logger.Debug(ctx, "channel-found")
-			chCtxMap.cancel()
-			//close the channel
-			close(ch)
-			pos = i
-			break
-		}
-	}
-	// Remove that entry if present
-	if pos >= 0 {
-		c.watchedChannelsContext[key] = append(c.watchedChannelsContext[key][:pos], c.watchedChannelsContext[key][pos+1:]...)
-	}
-	logger.Debugw(ctx, "watched-channel-exiting", log.Fields{"key": key, "channel": c.watchedChannelsContext[key]})
-}
-
-func (c *ConsulClient) isKVEqual(kv1 *consulapi.KVPair, kv2 *consulapi.KVPair) bool {
-	if (kv1 == nil) && (kv2 == nil) {
-		return true
-	} else if (kv1 == nil) || (kv2 == nil) {
-		return false
-	}
-	// Both the KV should be non-null here
-	if kv1.Key != kv2.Key ||
-		!bytes.Equal(kv1.Value, kv2.Value) ||
-		kv1.Session != kv2.Session ||
-		kv1.LockIndex != kv2.LockIndex ||
-		kv1.ModifyIndex != kv2.ModifyIndex {
-		return false
-	}
-	return true
-}
-
-func (c *ConsulClient) listenForKeyChange(ctx context.Context, key string, ch chan *Event) {
-	logger.Debugw(ctx, "start-watching-channel", log.Fields{"key": key, "channel": ch})
-
-	defer c.CloseWatch(ctx, key, ch)
-	kv := c.consul.KV()
-	var queryOptions consulapi.QueryOptions
-	queryOptions.WaitTime = defaultKVGetTimeout
-
-	// Get the existing value, if any
-	previousKVPair, meta, err := kv.Get(key, &queryOptions)
-	if err != nil {
-		logger.Debug(ctx, err)
-	}
-	lastIndex := meta.LastIndex
-
-	// Wait for change.  Push any change onto the channel and keep waiting for new update
-	//var waitOptions consulapi.QueryOptions
-	var pair *consulapi.KVPair
-	//watchContext, _ := context.WithCancel(context.Background())
-	waitOptions := queryOptions.WithContext(ctx)
-	for {
-		//waitOptions = consulapi.QueryOptions{WaitIndex: lastIndex}
-		waitOptions.WaitIndex = lastIndex
-		pair, meta, err = kv.Get(key, waitOptions)
-		select {
-		case <-ctx.Done():
-			logger.Debug(ctx, "done-event-received-exiting")
-			return
-		default:
-			if err != nil {
-				logger.Warnw(ctx, "error-from-watch", log.Fields{"error": err})
-				ch <- NewEvent(CONNECTIONDOWN, key, []byte(""), -1)
-			} else {
-				logger.Debugw(ctx, "index-state", log.Fields{"lastindex": lastIndex, "newindex": meta.LastIndex, "key": key})
-			}
-		}
-		if err != nil {
-			logger.Debug(ctx, err)
-			// On error, block for 10 milliseconds to prevent endless loop
-			time.Sleep(10 * time.Millisecond)
-		} else if meta.LastIndex <= lastIndex {
-			logger.Info(ctx, "no-index-change-or-negative")
-		} else {
-			logger.Debugw(ctx, "update-received", log.Fields{"pair": pair})
-			if pair == nil {
-				ch <- NewEvent(DELETE, key, []byte(""), -1)
-			} else if !c.isKVEqual(pair, previousKVPair) {
-				// Push the change onto the channel if the data has changed
-				// For now just assume it's a PUT change
-				logger.Debugw(ctx, "pair-details", log.Fields{"session": pair.Session, "key": pair.Key, "value": pair.Value})
-				ch <- NewEvent(PUT, pair.Key, pair.Value, -1)
-			}
-			previousKVPair = pair
-			lastIndex = meta.LastIndex
-		}
-	}
-}
-
-// Close closes the KV store client
-func (c *ConsulClient) Close(ctx context.Context) {
-	var writeOptions consulapi.WriteOptions
-	// Inform any goroutine it's time to say goodbye.
-	c.writeLock.Lock()
-	defer c.writeLock.Unlock()
-	if c.doneCh != nil {
-		close(*c.doneCh)
-	}
-
-	// Clear the sessionID
-	if _, err := c.consul.Session().Destroy(c.sessionID, &writeOptions); err != nil {
-		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
-	}
-}
-
-func (c *ConsulClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
-	return nil
-}
-
-func (c *ConsulClient) ReleaseLock(lockName string) error {
-	return nil
-}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
index aa5adbf..98f0559 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/etcdclient.go
@@ -24,6 +24,7 @@
 
 	"github.com/opencord/voltha-lib-go/v4/pkg/log"
 	v3Client "go.etcd.io/etcd/clientv3"
+
 	v3Concurrency "go.etcd.io/etcd/clientv3/concurrency"
 	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
 )
@@ -39,15 +40,10 @@
 	lockToMutexLock     sync.Mutex
 }
 
-// NewEtcdClient returns a new client for the Etcd KV store
-func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
-	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
-
-	c, err := v3Client.New(v3Client.Config{
-		Endpoints:   []string{addr},
-		DialTimeout: timeout,
-		LogConfig:   &logconfig,
-	})
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, config *v3Client.Config) (*EtcdClient, error) {
+	c, err := v3Client.New(*config)
 	if err != nil {
 		logger.Error(ctx, err)
 		return nil, err
@@ -61,6 +57,18 @@
 		lockToSessionMap: lockSessionMap}, nil
 }
 
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	logconfig := log.ConstructZapConfig(log.JSON, level, log.Fields{})
+
+	return NewEtcdCustomClient(
+		ctx,
+		&v3Client.Config{
+			Endpoints:   []string{addr},
+			DialTimeout: timeout,
+			LogConfig:   &logconfig})
+}
+
 // IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
 // it is assumed the connection is down or unreachable.
 func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
@@ -157,6 +165,17 @@
 	return nil
 }
 
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+	//delete the prefix
+	if _, err := c.ectdAPI.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+	return nil
+}
+
 // Reserve is invoked to acquire a key and set it to a given value. Value can only be a string or []byte since
 // the etcd API accepts only a string.  Timeout defines how long the function will wait for a response.  TTL
 // defines how long that reservation is valid.  When TTL expires the key is unreserved by the KV store itself.
diff --git a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
index 64e7d30..70bd977 100644
--- a/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
+++ b/vendor/github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore/kvutils.go
@@ -15,7 +15,10 @@
  */
 package kvstore
 
-import "fmt"
+import (
+	"bytes"
+	"fmt"
+)
 
 // ToString converts an interface value to a string.  The interface should either be of
 // a string type or []byte.  Otherwise, an error is returned.
@@ -42,3 +45,14 @@
 		return nil, fmt.Errorf("unexpected-type-%T", t)
 	}
 }
+
+// Helper function to verify mostly whether the content of two interface types are the same.  Focus is []byte and
+// string types
+func isEqual(val1 interface{}, val2 interface{}) bool {
+	b1, err := ToByte(val1)
+	b2, er := ToByte(val2)
+	if err == nil && er == nil {
+		return bytes.Equal(b1, b2)
+	}
+	return val1 == val2
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
index 0956330..a370497 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/common/common.pb.go
@@ -99,6 +99,8 @@
 	OperStatus_ACTIVE OperStatus_Types = 4
 	// The device has failed and cannot fulfill its intended role
 	OperStatus_FAILED OperStatus_Types = 5
+	// The device is reconciling
+	OperStatus_RECONCILING OperStatus_Types = 6
 )
 
 var OperStatus_Types_name = map[int32]string{
@@ -108,15 +110,17 @@
 	3: "TESTING",
 	4: "ACTIVE",
 	5: "FAILED",
+	6: "RECONCILING",
 }
 
 var OperStatus_Types_value = map[string]int32{
-	"UNKNOWN":    0,
-	"DISCOVERED": 1,
-	"ACTIVATING": 2,
-	"TESTING":    3,
-	"ACTIVE":     4,
-	"FAILED":     5,
+	"UNKNOWN":     0,
+	"DISCOVERED":  1,
+	"ACTIVATING":  2,
+	"TESTING":     3,
+	"ACTIVE":      4,
+	"FAILED":      5,
+	"RECONCILING": 6,
 }
 
 func (x OperStatus_Types) String() string {
@@ -165,18 +169,21 @@
 	OperationResp_OPERATION_SUCCESS     OperationResp_OperationReturnCode = 0
 	OperationResp_OPERATION_FAILURE     OperationResp_OperationReturnCode = 1
 	OperationResp_OPERATION_UNSUPPORTED OperationResp_OperationReturnCode = 2
+	OperationResp_OPERATION_IN_PROGRESS OperationResp_OperationReturnCode = 3
 )
 
 var OperationResp_OperationReturnCode_name = map[int32]string{
 	0: "OPERATION_SUCCESS",
 	1: "OPERATION_FAILURE",
 	2: "OPERATION_UNSUPPORTED",
+	3: "OPERATION_IN_PROGRESS",
 }
 
 var OperationResp_OperationReturnCode_value = map[string]int32{
 	"OPERATION_SUCCESS":     0,
 	"OPERATION_FAILURE":     1,
 	"OPERATION_UNSUPPORTED": 2,
+	"OPERATION_IN_PROGRESS": 3,
 }
 
 func (x OperationResp_OperationReturnCode) String() string {
@@ -596,43 +603,44 @@
 func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
 
 var fileDescriptor_c2e3fd231961e826 = []byte{
-	// 598 bytes of a gzipped FileDescriptorProto
+	// 619 bytes of a gzipped FileDescriptorProto
 	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5f, 0x4f, 0xdb, 0x3e,
-	0x14, 0x6d, 0xd2, 0x96, 0x1f, 0xbd, 0xa5, 0x21, 0x3f, 0x03, 0x53, 0x87, 0x26, 0xad, 0xca, 0x0b,
-	0x6c, 0x62, 0xad, 0xc4, 0x78, 0xdd, 0x43, 0x48, 0x3c, 0x66, 0x01, 0x4e, 0xe5, 0x24, 0x45, 0xf0,
-	0xb0, 0x2a, 0x34, 0xa6, 0x44, 0xa2, 0x71, 0x94, 0xb8, 0x48, 0x7c, 0xd2, 0x7d, 0x9d, 0xc9, 0x4e,
-	0xf9, 0x37, 0xf5, 0x25, 0xf1, 0xb9, 0xe7, 0xe4, 0x1e, 0xdf, 0xe3, 0x18, 0xf6, 0x1f, 0xc5, 0x83,
-	0xbc, 0x4f, 0xa6, 0x45, 0x29, 0xa4, 0xa8, 0x46, 0x33, 0xb1, 0x58, 0x88, 0x7c, 0xa8, 0x11, 0xda,
-	0xa8, 0x91, 0xb3, 0x0b, 0x26, 0xf1, 0x91, 0x05, 0x66, 0x96, 0xf6, 0x8d, 0x81, 0x71, 0xd8, 0x61,
-	0x66, 0x96, 0x3a, 0x07, 0xd0, 0x24, 0x7e, 0x85, 0x06, 0xd0, 0xce, 0x24, 0x5f, 0x54, 0x7d, 0x63,
-	0xd0, 0x3c, 0xec, 0x1e, 0xc3, 0x70, 0xd5, 0x82, 0xf8, 0xac, 0x26, 0x9c, 0x7b, 0x00, 0x37, 0x5d,
-	0x64, 0x79, 0x28, 0x13, 0xc9, 0x9d, 0x1b, 0x68, 0x47, 0x4f, 0x05, 0xaf, 0x50, 0x17, 0xfe, 0x8b,
-	0xe9, 0x39, 0x0d, 0xae, 0xa8, 0xdd, 0x40, 0x08, 0xac, 0x31, 0xc3, 0x63, 0x16, 0x4c, 0x48, 0x48,
-	0x02, 0x8a, 0x7d, 0xdb, 0x50, 0x02, 0x4c, 0xdd, 0xd3, 0x0b, 0xec, 0xdb, 0x26, 0xda, 0x82, 0x4d,
-	0x9f, 0x84, 0x35, 0x6a, 0xa2, 0x3d, 0xf8, 0xdf, 0x0f, 0xae, 0xe8, 0x45, 0xe0, 0xfa, 0x84, 0x9e,
-	0x4d, 0xc9, 0xa5, 0x7b, 0x86, 0xed, 0x96, 0x33, 0x07, 0x08, 0x0a, 0x5e, 0x2a, 0xa3, 0x65, 0xe5,
-	0x5c, 0xaf, 0x75, 0xb2, 0x00, 0x7c, 0x12, 0x7a, 0xc1, 0x04, 0x33, 0xed, 0x62, 0x01, 0xb8, 0x5e,
-	0x44, 0x26, 0x6e, 0x44, 0xe8, 0x99, 0x6d, 0x2a, 0x71, 0x84, 0x43, 0x0d, 0x9a, 0x08, 0x60, 0x43,
-	0x93, 0xd8, 0x6e, 0xa9, 0xf5, 0x4f, 0x97, 0x28, 0xff, 0xb6, 0x83, 0xa1, 0xe7, 0x89, 0x3c, 0xe7,
-	0x33, 0xb9, 0xf2, 0x3a, 0x59, 0xeb, 0xb5, 0x0d, 0xdd, 0x98, 0x32, 0xec, 0x7a, 0xbf, 0xd4, 0xc6,
-	0x6d, 0x03, 0xf5, 0xa0, 0xf3, 0x0a, 0x4d, 0xe7, 0x8f, 0x01, 0x3d, 0xb5, 0xe1, 0x44, 0x66, 0x22,
-	0x67, 0xbc, 0x2a, 0xd0, 0x0f, 0x68, 0xcd, 0x44, 0xca, 0x75, 0xcc, 0xd6, 0xf1, 0x97, 0xe7, 0x30,
-	0xdf, 0x89, 0xde, 0x22, 0xb9, 0x2c, 0x73, 0x4f, 0xa4, 0x9c, 0xe9, 0xcf, 0xd0, 0x01, 0x6c, 0x27,
-	0x69, 0x9a, 0x29, 0x2e, 0x79, 0x98, 0x66, 0xf9, 0x9d, 0xe8, 0x9b, 0xfa, 0xc0, 0xac, 0xd7, 0x32,
-	0xc9, 0xef, 0x84, 0xf3, 0x1b, 0x76, 0xd6, 0x74, 0x51, 0xb9, 0x06, 0x63, 0xcc, 0xdc, 0x88, 0x04,
-	0x74, 0x1a, 0xc6, 0x9e, 0x87, 0xc3, 0xd0, 0x6e, 0xbc, 0x2f, 0xab, 0x10, 0x62, 0xa6, 0xa6, 0xf9,
-	0x08, 0x7b, 0xaf, 0xe5, 0x98, 0x86, 0xf1, 0x78, 0x1c, 0xb0, 0x48, 0x1d, 0x97, 0x73, 0x04, 0x9d,
-	0x49, 0xf2, 0xb0, 0xe4, 0x2a, 0x14, 0xe7, 0x33, 0xb4, 0xd4, 0x1b, 0x75, 0xa0, 0x8d, 0x2f, 0xc7,
-	0xd1, 0xb5, 0xdd, 0x58, 0x1d, 0x67, 0xe4, 0x52, 0x0f, 0xdb, 0x86, 0x43, 0xc1, 0xd2, 0xea, 0xb0,
-	0xe0, 0xb3, 0xec, 0x2e, 0xe3, 0xe5, 0xbf, 0x3f, 0x1b, 0x3a, 0x82, 0xf6, 0xa3, 0x52, 0xe8, 0x71,
-	0xac, 0xe3, 0x0f, 0xcf, 0xc1, 0xbc, 0x98, 0x0c, 0xd5, 0x83, 0xd5, 0x22, 0x47, 0xc2, 0x56, 0x3d,
-	0x94, 0xa6, 0x2b, 0x64, 0x43, 0x33, 0xe4, 0x52, 0xb7, 0xeb, 0x31, 0xb5, 0x44, 0x03, 0xe8, 0xc6,
-	0x79, 0xb5, 0x2c, 0x0a, 0x51, 0x4a, 0x9e, 0xea, 0xae, 0x3d, 0xf6, 0xb6, 0x84, 0x76, 0xa1, 0x8d,
-	0xcb, 0x52, 0x94, 0xfd, 0xa6, 0xe6, 0x6a, 0x80, 0xf6, 0x61, 0xd3, 0xcf, 0x2a, 0x99, 0xe4, 0x33,
-	0xde, 0x6f, 0x69, 0xe2, 0x05, 0x7f, 0xfd, 0x04, 0x5b, 0x11, 0xaf, 0xe4, 0xa5, 0x48, 0xf9, 0x39,
-	0x7f, 0xaa, 0xd4, 0x8c, 0x49, 0x91, 0x4d, 0x25, 0xaf, 0xa4, 0xdd, 0x38, 0xc5, 0xb0, 0x23, 0xca,
-	0xf9, 0x50, 0x14, 0x3c, 0x9f, 0x89, 0x32, 0x1d, 0xd6, 0xf7, 0xee, 0x66, 0x38, 0xcf, 0xe4, 0xfd,
-	0xf2, 0x56, 0xcd, 0x33, 0x7a, 0xe6, 0x46, 0x35, 0xf7, 0x6d, 0x75, 0x27, 0x1f, 0x4f, 0x46, 0x73,
-	0xb1, 0xba, 0x99, 0xb7, 0x1b, 0xba, 0xf8, 0xfd, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x32,
-	0x70, 0x38, 0xb8, 0x03, 0x00, 0x00,
+	0x14, 0x6d, 0x9b, 0xb6, 0x3f, 0x7a, 0x4b, 0x43, 0x7e, 0x06, 0xa6, 0x0e, 0x4d, 0x5a, 0x95, 0x17,
+	0xd8, 0xc4, 0x5a, 0x89, 0xf1, 0xba, 0x87, 0x90, 0x78, 0x9d, 0x05, 0x38, 0x91, 0x93, 0x14, 0x8d,
+	0x97, 0x2a, 0x34, 0x06, 0x32, 0xd1, 0x38, 0x4a, 0x5c, 0x34, 0xbe, 0xf6, 0x3e, 0xc1, 0x64, 0xa7,
+	0xfc, 0x9b, 0x78, 0x49, 0x7c, 0xee, 0x39, 0xb9, 0x47, 0xe7, 0x3a, 0x17, 0xf6, 0xee, 0xc5, 0x9d,
+	0xbc, 0x4d, 0xe6, 0x45, 0x29, 0xa4, 0xa8, 0x26, 0x0b, 0xb1, 0x5c, 0x8a, 0x7c, 0xac, 0x11, 0xea,
+	0xd6, 0xc8, 0xde, 0x81, 0x16, 0xf1, 0x90, 0x09, 0xad, 0x2c, 0x1d, 0x36, 0x47, 0xcd, 0x83, 0x1e,
+	0x6b, 0x65, 0xa9, 0xbd, 0x0f, 0x06, 0xf1, 0x2a, 0x34, 0x82, 0x4e, 0x26, 0xf9, 0xb2, 0x1a, 0x36,
+	0x47, 0xc6, 0x41, 0xff, 0x08, 0xc6, 0xeb, 0x16, 0xc4, 0x63, 0x35, 0x61, 0xdf, 0x02, 0x38, 0xe9,
+	0x32, 0xcb, 0x43, 0x99, 0x48, 0x6e, 0x5f, 0x42, 0x27, 0x7a, 0x28, 0x78, 0x85, 0xfa, 0xf0, 0x5f,
+	0x4c, 0x4f, 0xa9, 0x7f, 0x41, 0xad, 0x06, 0x42, 0x60, 0x06, 0x0c, 0x07, 0xcc, 0x9f, 0x91, 0x90,
+	0xf8, 0x14, 0x7b, 0x56, 0x53, 0x09, 0x30, 0x75, 0x4e, 0xce, 0xb0, 0x67, 0xb5, 0xd0, 0x26, 0x6c,
+	0x78, 0x24, 0xac, 0x91, 0x81, 0x76, 0xe1, 0x7f, 0xcf, 0xbf, 0xa0, 0x67, 0xbe, 0xe3, 0x11, 0x3a,
+	0x9d, 0x93, 0x73, 0x67, 0x8a, 0xad, 0xb6, 0xfd, 0x1b, 0xc0, 0x2f, 0x78, 0xa9, 0x8c, 0x56, 0x95,
+	0xfd, 0xeb, 0x4d, 0x27, 0x13, 0xc0, 0x23, 0xa1, 0xeb, 0xcf, 0x30, 0xd3, 0x2e, 0x26, 0x80, 0xe3,
+	0x46, 0x64, 0xe6, 0x44, 0x84, 0x4e, 0xad, 0x96, 0x12, 0x47, 0x38, 0xd4, 0xc0, 0x40, 0x00, 0x5d,
+	0x4d, 0x62, 0xab, 0xad, 0xce, 0xdf, 0x1d, 0xa2, 0xfc, 0x3b, 0x68, 0x0b, 0xfa, 0x0c, 0xbb, 0x3e,
+	0x75, 0xc9, 0x99, 0x12, 0x76, 0x6d, 0x0c, 0x03, 0x57, 0xe4, 0x39, 0x5f, 0xc8, 0xb5, 0xf9, 0xf1,
+	0x9b, 0xe6, 0x5b, 0xd0, 0x8f, 0x29, 0xc3, 0x8e, 0xfb, 0x43, 0x25, 0xb1, 0x9a, 0x68, 0x00, 0xbd,
+	0x67, 0xd8, 0xb2, 0xff, 0x34, 0x61, 0xa0, 0x12, 0x24, 0x32, 0x13, 0x39, 0xe3, 0x55, 0x81, 0xbe,
+	0x41, 0x7b, 0x21, 0x52, 0xae, 0xe7, 0x6e, 0x1e, 0x7d, 0x7a, 0x9c, 0xee, 0x2b, 0xd1, 0x4b, 0x24,
+	0x57, 0x65, 0xee, 0x8a, 0x94, 0x33, 0xfd, 0x19, 0xda, 0x87, 0xad, 0x24, 0x4d, 0x33, 0xc5, 0x25,
+	0x77, 0xf3, 0x2c, 0xbf, 0x16, 0xc3, 0x96, 0xbe, 0x41, 0xf3, 0xb9, 0x4c, 0xf2, 0x6b, 0x61, 0x3f,
+	0xc0, 0xf6, 0x1b, 0x5d, 0xd4, 0xa0, 0xfd, 0x00, 0x33, 0x27, 0x22, 0x3e, 0x9d, 0x87, 0xb1, 0xeb,
+	0xe2, 0x30, 0xb4, 0x1a, 0xaf, 0xcb, 0x6a, 0x2a, 0x31, 0x53, 0x69, 0xde, 0xc3, 0xee, 0x73, 0x39,
+	0xa6, 0x61, 0x1c, 0x04, 0x3e, 0x8b, 0xf4, 0xfd, 0xbd, 0xa2, 0x08, 0x9d, 0x07, 0xcc, 0x9f, 0x32,
+	0xd5, 0xcc, 0xb0, 0x0f, 0xa1, 0x37, 0x4b, 0xee, 0x56, 0x5c, 0xcd, 0xcb, 0xfe, 0x08, 0x6d, 0xf5,
+	0x46, 0x3d, 0xe8, 0xe0, 0xf3, 0x20, 0xfa, 0x69, 0x35, 0xd6, 0x57, 0x1f, 0x39, 0xd4, 0xc5, 0x56,
+	0xd3, 0xa6, 0x60, 0x6a, 0x75, 0x58, 0xf0, 0x45, 0x76, 0x9d, 0xf1, 0xf2, 0xdf, 0x1f, 0x13, 0x1d,
+	0x42, 0xe7, 0x5e, 0x29, 0x74, 0x52, 0xf3, 0xe8, 0xdd, 0xe3, 0xcc, 0x9e, 0x4c, 0xc6, 0xea, 0xc1,
+	0x6a, 0x91, 0x2d, 0x61, 0xb3, 0xce, 0xab, 0xe9, 0x0a, 0x59, 0x60, 0x84, 0x5c, 0xea, 0x76, 0x03,
+	0xa6, 0x8e, 0x68, 0x04, 0xfd, 0x38, 0xaf, 0x56, 0x45, 0x21, 0x4a, 0xc9, 0x53, 0xdd, 0x75, 0xc0,
+	0x5e, 0x96, 0xd0, 0x0e, 0x74, 0x70, 0x59, 0x8a, 0x72, 0x68, 0x68, 0xae, 0x06, 0x68, 0x0f, 0x36,
+	0xbc, 0xac, 0x92, 0x49, 0xbe, 0xe0, 0xc3, 0xb6, 0x26, 0x9e, 0xf0, 0xe7, 0x0f, 0xb0, 0x19, 0xf1,
+	0x4a, 0x9e, 0x8b, 0x94, 0x9f, 0xf2, 0x87, 0x4a, 0x65, 0x4c, 0x8a, 0x6c, 0x2e, 0x79, 0x25, 0xad,
+	0xc6, 0x09, 0x86, 0x6d, 0x51, 0xde, 0x8c, 0x45, 0xc1, 0xf3, 0x85, 0x28, 0xd3, 0x71, 0xbd, 0xa3,
+	0x97, 0xe3, 0x9b, 0x4c, 0xde, 0xae, 0xae, 0x54, 0x9e, 0xc9, 0x23, 0x37, 0xa9, 0xb9, 0x2f, 0xeb,
+	0xfd, 0xbd, 0x3f, 0x9e, 0xdc, 0x88, 0xf5, 0x16, 0x5f, 0x75, 0x75, 0xf1, 0xeb, 0xdf, 0x00, 0x00,
+	0x00, 0xff, 0xff, 0x4d, 0x6f, 0x2b, 0x79, 0xe4, 0x03, 0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
index 752eecb..a0957f1 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/inter_container/inter_container.pb.go
@@ -84,6 +84,7 @@
 const OperStatus_TESTING = OperStatus_Types(common.OperStatus_TESTING)
 const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
 const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
+const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
 
 // ConnectStatus_Types from public import voltha_protos/common.proto
 type ConnectStatus_Types = common.ConnectStatus_Types
@@ -104,6 +105,7 @@
 const OperationResp_OPERATION_SUCCESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_SUCCESS)
 const OperationResp_OPERATION_FAILURE = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_FAILURE)
 const OperationResp_OPERATION_UNSUPPORTED = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_UNSUPPORTED)
+const OperationResp_OPERATION_IN_PROGRESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_IN_PROGRESS)
 
 // ValueType_Type from public import voltha_protos/common.proto
 type ValueType_Type = common.ValueType_Type
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go
index f51a7b7..2dc1826 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/events.pb.go
@@ -116,11 +116,12 @@
 type EventSubCategory_Types int32
 
 const (
-	EventSubCategory_PON EventSubCategory_Types = 0
-	EventSubCategory_OLT EventSubCategory_Types = 1
-	EventSubCategory_ONT EventSubCategory_Types = 2
-	EventSubCategory_ONU EventSubCategory_Types = 3
-	EventSubCategory_NNI EventSubCategory_Types = 4
+	EventSubCategory_PON  EventSubCategory_Types = 0
+	EventSubCategory_OLT  EventSubCategory_Types = 1
+	EventSubCategory_ONT  EventSubCategory_Types = 2
+	EventSubCategory_ONU  EventSubCategory_Types = 3
+	EventSubCategory_NNI  EventSubCategory_Types = 4
+	EventSubCategory_NONE EventSubCategory_Types = 5
 )
 
 var EventSubCategory_Types_name = map[int32]string{
@@ -129,14 +130,16 @@
 	2: "ONT",
 	3: "ONU",
 	4: "NNI",
+	5: "NONE",
 }
 
 var EventSubCategory_Types_value = map[string]int32{
-	"PON": 0,
-	"OLT": 1,
-	"ONT": 2,
-	"ONU": 3,
-	"NNI": 4,
+	"PON":  0,
+	"OLT":  1,
+	"ONT":  2,
+	"ONU":  3,
+	"NNI":  4,
+	"NONE": 5,
 }
 
 func (x EventSubCategory_Types) String() string {
@@ -1149,85 +1152,86 @@
 func init() { proto.RegisterFile("voltha_protos/events.proto", fileDescriptor_e63e6c07044fd2c4) }
 
 var fileDescriptor_e63e6c07044fd2c4 = []byte{
-	// 1274 bytes of a gzipped FileDescriptorProto
-	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdf, 0x6e, 0xdb, 0xb6,
-	0x17, 0xb6, 0xe4, 0xff, 0x47, 0x4e, 0xa2, 0xb0, 0xbf, 0xdf, 0xe6, 0xba, 0x5b, 0x9b, 0x7a, 0xd8,
-	0x10, 0xb4, 0xa8, 0x8c, 0x69, 0x05, 0x1a, 0xa4, 0x18, 0xb6, 0xd6, 0xf5, 0x1a, 0xa1, 0x8b, 0xed,
-	0x29, 0x4e, 0x80, 0xee, 0xc6, 0x60, 0x24, 0xc6, 0x11, 0x62, 0x5b, 0x02, 0x49, 0x1b, 0xcd, 0x03,
-	0xec, 0x7a, 0x0f, 0xb2, 0xe7, 0xd8, 0xdd, 0xde, 0x60, 0x18, 0xf6, 0x12, 0x7b, 0x80, 0x81, 0x7f,
-	0x64, 0x4b, 0x6e, 0x8a, 0x5e, 0x04, 0xbb, 0x12, 0x79, 0x78, 0x3e, 0x9e, 0xef, 0x7c, 0xe2, 0x39,
-	0x24, 0xb4, 0x96, 0xf1, 0x94, 0x5f, 0xe2, 0x71, 0x42, 0x63, 0x1e, 0xb3, 0x0e, 0x59, 0x92, 0x39,
-	0x67, 0x8e, 0x9c, 0xa1, 0x8a, 0x5a, 0x6b, 0x35, 0xf3, 0x3e, 0x33, 0xc2, 0xb1, 0xf2, 0x68, 0x7d,
-	0x36, 0x89, 0xe3, 0xc9, 0x94, 0x74, 0x70, 0x12, 0x75, 0xf0, 0x7c, 0x1e, 0x73, 0xcc, 0xa3, 0x78,
-	0xae, 0xf1, 0xad, 0x07, 0x7a, 0x55, 0xce, 0xce, 0x17, 0x17, 0x1d, 0x1e, 0xcd, 0x08, 0xe3, 0x78,
-	0x96, 0x68, 0x87, 0x8d, 0xe0, 0x41, 0x3c, 0x9b, 0xc5, 0x73, 0xb5, 0xd6, 0x7e, 0x0e, 0x3b, 0xdd,
-	0x78, 0x7e, 0x11, 0x4d, 0x7a, 0x82, 0xd2, 0xe8, 0x3a, 0x21, 0xed, 0x7d, 0x28, 0x8b, 0x2f, 0x43,
-	0x55, 0x28, 0xe2, 0x30, 0xb4, 0x0b, 0x08, 0xa0, 0x42, 0xc9, 0x2c, 0x5e, 0x12, 0xdb, 0x10, 0xe3,
-	0x45, 0x12, 0x62, 0x4e, 0x6c, 0xb3, 0x7d, 0x09, 0x56, 0x06, 0x8c, 0xbe, 0x86, 0x12, 0xbf, 0x4e,
-	0x48, 0xd3, 0xd8, 0x33, 0xf6, 0xb7, 0xdd, 0xcf, 0x1d, 0x15, 0xd6, 0xd9, 0xd8, 0xdf, 0x91, 0x9b,
-	0xfb, 0xd2, 0x15, 0x21, 0x28, 0x5d, 0x62, 0x76, 0xd9, 0x34, 0xf7, 0x8c, 0xfd, 0xba, 0x2f, 0xc7,
-	0xc2, 0x16, 0x62, 0x8e, 0x9b, 0x45, 0x65, 0x13, 0xe3, 0xf6, 0x23, 0x68, 0xbc, 0x49, 0xa2, 0x35,
-	0xc7, 0x56, 0xca, 0xb1, 0x0e, 0x65, 0x36, 0x8d, 0x02, 0x62, 0x17, 0x50, 0x05, 0x4c, 0xce, 0x6c,
-	0xa3, 0xfd, 0x9b, 0x09, 0xdb, 0xc7, 0x84, 0xd3, 0x28, 0x38, 0x26, 0x1c, 0xbf, 0xc2, 0x1c, 0xa3,
-	0xff, 0x41, 0x99, 0x47, 0x7c, 0xaa, 0xa8, 0xd5, 0x7d, 0x35, 0x41, 0xdb, 0x02, 0x20, 0x43, 0x1b,
-	0xbe, 0xc9, 0x19, 0x7a, 0x04, 0xbb, 0xd3, 0x78, 0x12, 0x05, 0x78, 0x3a, 0x0e, 0xc9, 0x32, 0x0a,
-	0xc8, 0x38, 0x0a, 0x35, 0x8b, 0x1d, 0xbd, 0xf0, 0x4a, 0xda, 0xbd, 0x10, 0xdd, 0x83, 0x3a, 0x23,
-	0x34, 0xc2, 0xd3, 0xf1, 0x3c, 0x6e, 0x96, 0xa4, 0x4f, 0x4d, 0x19, 0xfa, 0xb1, 0x58, 0x5c, 0x6f,
-	0x50, 0x56, 0x8b, 0x61, 0x8a, 0xfc, 0x16, 0xaa, 0x41, 0x3c, 0xe7, 0xe4, 0x1d, 0x6f, 0x56, 0xf6,
-	0x8a, 0xfb, 0x96, 0xfb, 0x45, 0x2a, 0x54, 0x9e, 0xb4, 0xd0, 0x4d, 0x78, 0xf5, 0xe6, 0x9c, 0x5e,
-	0xfb, 0x29, 0x46, 0xa8, 0xb3, 0x58, 0x44, 0x61, 0xb3, 0xaa, 0xd4, 0x11, 0xe3, 0xd6, 0x21, 0x34,
-	0xb2, 0xce, 0xc8, 0x86, 0xe2, 0x15, 0xb9, 0xd6, 0xc9, 0x8a, 0xa1, 0x10, 0x60, 0x89, 0xa7, 0x0b,
-	0xa2, 0x85, 0x56, 0x93, 0x43, 0xf3, 0xc0, 0x68, 0xff, 0x6a, 0x80, 0xad, 0x02, 0x9f, 0x09, 0xdb,
-	0x10, 0x47, 0x94, 0xa1, 0xef, 0xa0, 0x3a, 0x93, 0x36, 0xd6, 0x34, 0x24, 0xc7, 0x2f, 0xf3, 0x1c,
-	0xd7, 0xae, 0xda, 0xc0, 0x34, 0x4b, 0x8d, 0x12, 0x8c, 0xb2, 0x0b, 0x1f, 0x63, 0x64, 0x66, 0x19,
-	0xfd, 0x6e, 0xc0, 0xae, 0x02, 0x7b, 0xf3, 0x8b, 0x98, 0xce, 0xe4, 0x61, 0x47, 0x2e, 0xd4, 0x44,
-	0x45, 0xc8, 0x93, 0x21, 0xb6, 0xb1, 0xdc, 0x4f, 0x6e, 0xd6, 0xcd, 0x5f, 0xf9, 0xa1, 0xef, 0xd7,
-	0x69, 0x98, 0x32, 0x8d, 0xaf, 0xf2, 0x90, 0xcc, 0xfe, 0xff, 0x41, 0x1e, 0x7f, 0x19, 0x50, 0x4b,
-	0x0f, 0x2d, 0x72, 0x72, 0xb5, 0xd1, 0x4a, 0x79, 0x64, 0x0f, 0x75, 0xae, 0x30, 0xd6, 0x67, 0xd3,
-	0x94, 0x67, 0xf3, 0x10, 0x6a, 0x09, 0x25, 0x17, 0xd1, 0x3b, 0xc2, 0x9a, 0x45, 0x99, 0xcb, 0xfd,
-	0xcd, 0x3d, 0x9c, 0xa1, 0x76, 0x50, 0x39, 0xac, 0xfc, 0x5b, 0xa7, 0xb0, 0x95, 0x5b, 0xba, 0x21,
-	0x0b, 0x27, 0x9b, 0x85, 0xe5, 0x36, 0x3f, 0xf4, 0xbb, 0xb3, 0xf9, 0xfd, 0x62, 0x40, 0x3d, 0x8d,
-	0xed, 0xde, 0x22, 0x41, 0x55, 0x7c, 0x07, 0x00, 0xb2, 0x90, 0xc7, 0xba, 0xf6, 0x45, 0x8a, 0x77,
-	0x3f, 0xf8, 0xbb, 0xfc, 0xba, 0x74, 0x16, 0xff, 0xbb, 0xfd, 0x8f, 0x01, 0x96, 0xaa, 0x4b, 0x25,
-	0xf5, 0x03, 0xb0, 0x28, 0x61, 0xf1, 0x82, 0xaa, 0xfa, 0x53, 0x59, 0x42, 0x6a, 0xf2, 0x42, 0x51,
-	0xe7, 0xba, 0x3c, 0x65, 0x1f, 0x1e, 0xcf, 0xf1, 0x2c, 0x2d, 0x8c, 0x9d, 0x70, 0xbd, 0x51, 0x1f,
-	0xcf, 0x08, 0xda, 0x03, 0x2b, 0x24, 0x2c, 0xa0, 0x51, 0x22, 0xc2, 0xea, 0x6e, 0x90, 0x35, 0xa1,
-	0xc3, 0x75, 0x3d, 0x97, 0x24, 0xeb, 0xbd, 0x94, 0x75, 0x86, 0xd4, 0xcd, 0xc5, 0x7c, 0xab, 0xc2,
-	0xfd, 0xd3, 0x84, 0x9a, 0x3f, 0xec, 0xaa, 0x9c, 0x6d, 0x28, 0xd2, 0x24, 0x48, 0x81, 0x34, 0x09,
-	0xd0, 0x43, 0x68, 0xc4, 0x09, 0xa1, 0x52, 0x2d, 0x21, 0x83, 0xc2, 0x5b, 0x2b, 0x9b, 0x17, 0xa2,
-	0x26, 0x54, 0x19, 0xa1, 0x82, 0xa3, 0xce, 0x2b, 0x9d, 0xa2, 0xbb, 0x50, 0x63, 0x1c, 0x07, 0x57,
-	0x02, 0x58, 0xd2, 0x4b, 0x62, 0xee, 0x85, 0x9b, 0xea, 0x96, 0xdf, 0x53, 0x77, 0x43, 0xb1, 0xca,
-	0xfb, 0x8a, 0x3d, 0x5b, 0x2b, 0x56, 0x95, 0x8a, 0xad, 0xae, 0x8a, 0x34, 0x9f, 0x0f, 0xf4, 0xbe,
-	0x27, 0x50, 0x61, 0x1c, 0xf3, 0x05, 0x6b, 0xd6, 0xe4, 0x31, 0xfd, 0xbf, 0xa3, 0xef, 0xb2, 0x41,
-	0x9a, 0x95, 0x4f, 0x58, 0xe2, 0x6b, 0xa7, 0x5b, 0xa9, 0xbb, 0x84, 0x2d, 0xc9, 0xa4, 0x8b, 0x39,
-	0x99, 0xc4, 0xf4, 0xba, 0x4d, 0xd2, 0x1b, 0x67, 0x17, 0xb6, 0xba, 0x83, 0xe3, 0xe3, 0xd3, 0xbe,
-	0xd7, 0x7d, 0x31, 0xf2, 0x06, 0x7d, 0xbb, 0x80, 0x76, 0xc0, 0xea, 0xf5, 0xcf, 0x3c, 0x7f, 0xd0,
-	0x3f, 0xee, 0xf5, 0x47, 0xb6, 0x81, 0xb6, 0xa0, 0xde, 0xfb, 0xe9, 0xd4, 0x1b, 0xca, 0xa9, 0x89,
-	0x2c, 0xa8, 0x9e, 0xf4, 0xfc, 0x33, 0xaf, 0xdb, 0xb3, 0x8b, 0x68, 0x1b, 0x60, 0xe8, 0x0f, 0xba,
-	0xbd, 0x93, 0x13, 0xaf, 0xff, 0xda, 0x2e, 0xa1, 0x06, 0xd4, 0x4e, 0x7a, 0xdd, 0x53, 0xdf, 0x1b,
-	0xbd, 0xb5, 0xcb, 0xed, 0x23, 0xb0, 0x65, 0xdc, 0x93, 0xc5, 0xf9, 0x2a, 0xf4, 0xd3, 0xcc, 0x85,
-	0x3c, 0x94, 0x01, 0xab, 0x50, 0x1c, 0xfc, 0x28, 0x02, 0x89, 0x81, 0x0c, 0x21, 0x07, 0xa7, 0x76,
-	0x51, 0x0c, 0xfa, 0x7d, 0xcf, 0x2e, 0xb5, 0x2f, 0xa0, 0xbe, 0xbe, 0x2f, 0xdf, 0xa6, 0x5b, 0xd8,
-	0xd0, 0xe8, 0x0e, 0xfa, 0x3f, 0x78, 0xaf, 0xc7, 0xbd, 0x33, 0x41, 0xae, 0x20, 0xb8, 0xbe, 0x19,
-	0x7a, 0x7a, 0x6a, 0x08, 0x7a, 0xab, 0xa9, 0x6b, 0x9b, 0x02, 0xf0, 0xaa, 0x27, 0xa8, 0x6b, 0x8f,
-	0xa2, 0x00, 0xf8, 0xc3, 0xae, 0x9e, 0x96, 0xda, 0x7f, 0x9b, 0x60, 0xc9, 0x40, 0x47, 0x04, 0x87,
-	0x84, 0x8a, 0xc2, 0x5e, 0x55, 0x9d, 0x19, 0x85, 0xe8, 0x19, 0xd4, 0x02, 0x9d, 0x89, 0x94, 0x79,
-	0xdb, 0xbd, 0x97, 0xfe, 0xee, 0x9c, 0xc2, 0xba, 0x3b, 0xac, 0x9c, 0xd1, 0x0b, 0x68, 0xb0, 0xc5,
-	0xf9, 0x78, 0x05, 0x2e, 0x4a, 0xf0, 0xfd, 0x1c, 0x38, 0x23, 0x93, 0xc6, 0x5b, 0x6c, 0x6d, 0x42,
-	0x8f, 0x75, 0x53, 0x2a, 0x49, 0xe8, 0xa7, 0x39, 0xe8, 0x7b, 0x1d, 0xe9, 0x21, 0x34, 0xc4, 0x77,
-	0xbc, 0x24, 0x94, 0x89, 0x93, 0xab, 0x8e, 0xb6, 0x25, 0x6c, 0x67, 0xca, 0x84, 0x9e, 0x41, 0x9d,
-	0xe2, 0x88, 0x91, 0x70, 0xcc, 0x99, 0x3c, 0xd9, 0x96, 0xdb, 0x72, 0xd4, 0xf3, 0xcb, 0x49, 0x9f,
-	0x5f, 0xce, 0x28, 0x7d, 0x7e, 0xf9, 0x35, 0xe5, 0x3c, 0x62, 0xe8, 0xb9, 0xa8, 0x9a, 0x24, 0xa6,
-	0x5c, 0x41, 0xab, 0x1f, 0x85, 0x42, 0xea, 0x3e, 0x62, 0xed, 0x3f, 0x4c, 0x28, 0xab, 0x32, 0x7f,
-	0x0c, 0x95, 0x4b, 0xa9, 0xb2, 0xbe, 0x02, 0xef, 0xe4, 0x32, 0x52, 0x3f, 0xc0, 0xd7, 0x2e, 0xe8,
-	0x00, 0x1a, 0x81, 0x7c, 0x7a, 0xa9, 0x36, 0xa7, 0x5b, 0xfb, 0x9d, 0x1b, 0x9e, 0x65, 0x47, 0x05,
-	0xdf, 0x0a, 0x32, 0x0f, 0xb9, 0x0e, 0xd4, 0xaf, 0x92, 0x48, 0xc3, 0x8a, 0x12, 0x66, 0x6f, 0x36,
-	0xf4, 0xa3, 0x82, 0x5f, 0xbb, 0x4a, 0x6f, 0x37, 0x17, 0x60, 0x05, 0x70, 0xa5, 0xda, 0x96, 0xbb,
-	0xbb, 0x89, 0x70, 0x8f, 0x0a, 0x7e, 0xfd, 0x6a, 0x75, 0x61, 0x1c, 0x40, 0x23, 0xdb, 0x85, 0xa5,
-	0xdc, 0x19, 0x7a, 0x99, 0xe6, 0x29, 0xe8, 0x65, 0xfa, 0xb2, 0xa0, 0x47, 0x93, 0x40, 0xc3, 0x2a,
-	0x79, 0x7a, 0x69, 0x07, 0x11, 0xf4, 0x68, 0x12, 0xc8, 0xf1, 0xcb, 0x06, 0x80, 0xea, 0xf4, 0xe2,
-	0x5f, 0xbe, 0xec, 0xc1, 0x9d, 0x98, 0x4e, 0x9c, 0x38, 0x21, 0xf3, 0x20, 0xa6, 0xa1, 0x46, 0xfe,
-	0xec, 0x4c, 0x22, 0x7e, 0xb9, 0x38, 0x17, 0x2d, 0xa5, 0x93, 0xae, 0x75, 0xd4, 0xda, 0x13, 0xfd,
-	0x72, 0x5e, 0x3e, 0xed, 0x4c, 0x62, 0x6d, 0x3b, 0xaf, 0x48, 0xe3, 0x37, 0xff, 0x06, 0x00, 0x00,
-	0xff, 0xff, 0x7b, 0x2c, 0xa9, 0x18, 0xdb, 0x0b, 0x00, 0x00,
+	// 1282 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xdb, 0x6e, 0xdb, 0x46,
+	0x13, 0x16, 0xa9, 0xf3, 0x50, 0xb6, 0xe9, 0xcd, 0xff, 0xb7, 0x8a, 0xd2, 0x26, 0x8e, 0x8a, 0x16,
+	0x46, 0x82, 0x50, 0x28, 0x5b, 0x20, 0x86, 0x83, 0x1e, 0x12, 0x85, 0x8d, 0x89, 0xd4, 0x94, 0x4a,
+	0xcb, 0x06, 0xd2, 0x1b, 0x61, 0x4d, 0xae, 0x65, 0xc2, 0x92, 0x48, 0x70, 0x57, 0x42, 0xfc, 0x00,
+	0xbd, 0xee, 0x83, 0xf4, 0x39, 0x7a, 0xd7, 0x37, 0x28, 0x8a, 0xbe, 0x44, 0x1f, 0xa0, 0xd8, 0x03,
+	0x25, 0x52, 0x71, 0x90, 0x0b, 0xa3, 0x57, 0xdc, 0x9d, 0x9d, 0x6f, 0xe7, 0x9b, 0x8f, 0x3b, 0xb3,
+	0x0b, 0x9d, 0x65, 0x3c, 0x65, 0x97, 0x78, 0x9c, 0xa4, 0x31, 0x8b, 0x69, 0x8f, 0x2c, 0xc9, 0x9c,
+	0x51, 0x4b, 0xcc, 0x50, 0x4d, 0xae, 0x75, 0xda, 0x45, 0x9f, 0x19, 0x61, 0x58, 0x7a, 0x74, 0x3e,
+	0x99, 0xc4, 0xf1, 0x64, 0x4a, 0x7a, 0x38, 0x89, 0x7a, 0x78, 0x3e, 0x8f, 0x19, 0x66, 0x51, 0x3c,
+	0x57, 0xf8, 0xce, 0x03, 0xb5, 0x2a, 0x66, 0xe7, 0x8b, 0x8b, 0x1e, 0x8b, 0x66, 0x84, 0x32, 0x3c,
+	0x4b, 0x94, 0xc3, 0x46, 0xf0, 0x20, 0x9e, 0xcd, 0xe2, 0xb9, 0x5c, 0xeb, 0x3e, 0x83, 0x9d, 0x7e,
+	0x3c, 0xbf, 0x88, 0x26, 0x0e, 0xa7, 0x34, 0xba, 0x4e, 0x48, 0x77, 0x1f, 0xaa, 0xfc, 0x4b, 0x51,
+	0x1d, 0xca, 0x38, 0x0c, 0xcd, 0x12, 0x02, 0xa8, 0xa5, 0x64, 0x16, 0x2f, 0x89, 0xa9, 0xf1, 0xf1,
+	0x22, 0x09, 0x31, 0x23, 0xa6, 0xde, 0xbd, 0x04, 0x23, 0x07, 0x46, 0x5f, 0x42, 0x85, 0x5d, 0x27,
+	0xa4, 0xad, 0xed, 0x69, 0xfb, 0xdb, 0xf6, 0xa7, 0x96, 0x0c, 0x6b, 0x6d, 0xec, 0x6f, 0x89, 0xcd,
+	0x7d, 0xe1, 0x8a, 0x10, 0x54, 0x2e, 0x31, 0xbd, 0x6c, 0xeb, 0x7b, 0xda, 0x7e, 0xd3, 0x17, 0x63,
+	0x6e, 0x0b, 0x31, 0xc3, 0xed, 0xb2, 0xb4, 0xf1, 0x71, 0xf7, 0x11, 0xb4, 0x5e, 0x27, 0xd1, 0x9a,
+	0x63, 0x27, 0xe3, 0xd8, 0x84, 0x2a, 0x9d, 0x46, 0x01, 0x31, 0x4b, 0xa8, 0x06, 0x3a, 0xa3, 0xa6,
+	0xd6, 0xfd, 0x4d, 0x87, 0xed, 0x63, 0xc2, 0xd2, 0x28, 0x38, 0x26, 0x0c, 0xbf, 0xc4, 0x0c, 0xa3,
+	0xff, 0x41, 0x95, 0x45, 0x6c, 0x2a, 0xa9, 0x35, 0x7d, 0x39, 0x41, 0xdb, 0x1c, 0x20, 0x42, 0x6b,
+	0xbe, 0xce, 0x28, 0x7a, 0x04, 0xbb, 0xd3, 0x78, 0x12, 0x05, 0x78, 0x3a, 0x0e, 0xc9, 0x32, 0x0a,
+	0xc8, 0x38, 0x0a, 0x15, 0x8b, 0x1d, 0xb5, 0xf0, 0x52, 0xd8, 0xdd, 0x10, 0xdd, 0x83, 0x26, 0x25,
+	0x69, 0x84, 0xa7, 0xe3, 0x79, 0xdc, 0xae, 0x08, 0x9f, 0x86, 0x34, 0x78, 0x31, 0x5f, 0x5c, 0x6f,
+	0x50, 0x95, 0x8b, 0x61, 0x86, 0xfc, 0x06, 0xea, 0x41, 0x3c, 0x67, 0xe4, 0x2d, 0x6b, 0xd7, 0xf6,
+	0xca, 0xfb, 0x86, 0xfd, 0x59, 0x26, 0x54, 0x91, 0x34, 0xd7, 0x8d, 0x7b, 0x39, 0x73, 0x96, 0x5e,
+	0xfb, 0x19, 0x86, 0xab, 0xb3, 0x58, 0x44, 0x61, 0xbb, 0x2e, 0xd5, 0xe1, 0xe3, 0xce, 0x21, 0xb4,
+	0xf2, 0xce, 0xc8, 0x84, 0xf2, 0x15, 0xb9, 0x56, 0xc9, 0xf2, 0x21, 0x17, 0x60, 0x89, 0xa7, 0x0b,
+	0xa2, 0x84, 0x96, 0x93, 0x43, 0xfd, 0x40, 0xeb, 0xfe, 0xaa, 0x81, 0x29, 0x03, 0x9f, 0x71, 0xdb,
+	0x10, 0x47, 0x29, 0x45, 0xdf, 0x41, 0x7d, 0x26, 0x6c, 0xb4, 0xad, 0x09, 0x8e, 0x9f, 0x17, 0x39,
+	0xae, 0x5d, 0x95, 0x81, 0x2a, 0x96, 0x0a, 0xc5, 0x19, 0xe5, 0x17, 0x3e, 0xc4, 0x48, 0xcf, 0x33,
+	0xfa, 0x5d, 0x83, 0x5d, 0x09, 0x76, 0xe7, 0x17, 0x71, 0x3a, 0x13, 0x87, 0x1d, 0xd9, 0xd0, 0xe0,
+	0x15, 0x21, 0x4e, 0x06, 0xdf, 0xc6, 0xb0, 0x3f, 0xba, 0x59, 0x37, 0x7f, 0xe5, 0x87, 0xbe, 0x5f,
+	0xa7, 0xa1, 0x8b, 0x34, 0xbe, 0x28, 0x42, 0x72, 0xfb, 0xff, 0x07, 0x79, 0xfc, 0xa5, 0x41, 0x23,
+	0x3b, 0xb4, 0xc8, 0x2a, 0xd4, 0x46, 0x27, 0xe3, 0x91, 0x3f, 0xd4, 0x85, 0xc2, 0x58, 0x9f, 0x4d,
+	0x5d, 0x9c, 0xcd, 0x43, 0x68, 0x24, 0x29, 0xb9, 0x88, 0xde, 0x12, 0xda, 0x2e, 0x8b, 0x5c, 0xee,
+	0x6f, 0xee, 0x61, 0x0d, 0x95, 0x83, 0xcc, 0x61, 0xe5, 0xdf, 0x39, 0x85, 0xad, 0xc2, 0xd2, 0x0d,
+	0x59, 0x58, 0xf9, 0x2c, 0x0c, 0xbb, 0xfd, 0xbe, 0xdf, 0x9d, 0xcf, 0xef, 0x17, 0x0d, 0x9a, 0x59,
+	0x6c, 0xfb, 0x16, 0x09, 0xca, 0xe2, 0x3b, 0x00, 0x10, 0x85, 0x3c, 0x56, 0xb5, 0xcf, 0x53, 0xbc,
+	0xfb, 0xde, 0xdf, 0xe5, 0x37, 0x85, 0x33, 0xff, 0xdf, 0xdd, 0x7f, 0x34, 0x30, 0x64, 0x5d, 0x4a,
+	0xa9, 0x1f, 0x80, 0x91, 0x12, 0x1a, 0x2f, 0x52, 0x59, 0x7f, 0x32, 0x4b, 0xc8, 0x4c, 0x6e, 0xc8,
+	0xeb, 0x5c, 0x95, 0xa7, 0xe8, 0xc3, 0xe3, 0x39, 0x9e, 0x65, 0x85, 0xb1, 0x13, 0xae, 0x37, 0xf2,
+	0xf0, 0x8c, 0xa0, 0x3d, 0x30, 0x42, 0x42, 0x83, 0x34, 0x4a, 0x78, 0x58, 0xd5, 0x0d, 0xf2, 0x26,
+	0x74, 0xb8, 0xae, 0xe7, 0x8a, 0x60, 0xbd, 0x97, 0xb1, 0xce, 0x91, 0xba, 0xb9, 0x98, 0x6f, 0x55,
+	0xb8, 0x7f, 0xea, 0xd0, 0xf0, 0x87, 0x7d, 0x99, 0xb3, 0x09, 0xe5, 0x34, 0x09, 0x32, 0x60, 0x9a,
+	0x04, 0xe8, 0x21, 0xb4, 0xe2, 0x84, 0xa4, 0x42, 0x2d, 0x2e, 0x83, 0xc4, 0x1b, 0x2b, 0x9b, 0x1b,
+	0xa2, 0x36, 0xd4, 0x29, 0x49, 0x39, 0x47, 0x95, 0x57, 0x36, 0x45, 0x77, 0xa1, 0x41, 0x19, 0x0e,
+	0xae, 0x38, 0xb0, 0xa2, 0x96, 0xf8, 0xdc, 0x0d, 0x37, 0xd5, 0xad, 0xbe, 0xa3, 0xee, 0x86, 0x62,
+	0xb5, 0x77, 0x15, 0x7b, 0xba, 0x56, 0xac, 0x2e, 0x14, 0x5b, 0x5d, 0x15, 0x59, 0x3e, 0xef, 0xe9,
+	0x7d, 0x4f, 0xa0, 0x46, 0x19, 0x66, 0x0b, 0xda, 0x6e, 0x88, 0x63, 0xfa, 0x7f, 0x4b, 0xdd, 0x65,
+	0x83, 0x2c, 0x2b, 0x9f, 0xd0, 0xc4, 0x57, 0x4e, 0xb7, 0x52, 0x77, 0x09, 0x5b, 0x82, 0x49, 0x1f,
+	0x33, 0x32, 0x89, 0xd3, 0xeb, 0x2e, 0xc9, 0x6e, 0x9c, 0x5d, 0xd8, 0xea, 0x0f, 0x8e, 0x8f, 0x4f,
+	0x3d, 0xb7, 0xff, 0x7c, 0xe4, 0x0e, 0x3c, 0xb3, 0x84, 0x76, 0xc0, 0x70, 0xbc, 0x33, 0xd7, 0x1f,
+	0x78, 0xc7, 0x8e, 0x37, 0x32, 0x35, 0xb4, 0x05, 0x4d, 0xe7, 0xa7, 0x53, 0x77, 0x28, 0xa6, 0x3a,
+	0x32, 0xa0, 0x7e, 0xe2, 0xf8, 0x67, 0x6e, 0xdf, 0x31, 0xcb, 0x68, 0x1b, 0x60, 0xe8, 0x0f, 0xfa,
+	0xce, 0xc9, 0x89, 0xeb, 0xbd, 0x32, 0x2b, 0xa8, 0x05, 0x8d, 0x13, 0xa7, 0x7f, 0xea, 0xbb, 0xa3,
+	0x37, 0x66, 0xb5, 0xeb, 0x83, 0x29, 0xe2, 0x9e, 0x2c, 0xce, 0x57, 0xa1, 0xbf, 0xcd, 0x5d, 0xc8,
+	0x43, 0x11, 0xb0, 0x0e, 0xe5, 0xc1, 0x8f, 0x3c, 0x10, 0x1f, 0x88, 0x10, 0x62, 0x70, 0x6a, 0x96,
+	0xf9, 0xc0, 0xf3, 0x5c, 0xb3, 0x82, 0x1a, 0x50, 0xf1, 0x06, 0x9e, 0x63, 0x56, 0xbb, 0x17, 0xd0,
+	0x5c, 0xdf, 0x9c, 0x6f, 0xb2, 0xcd, 0x4c, 0x68, 0xf5, 0x07, 0xde, 0x0f, 0xee, 0xab, 0xb1, 0x73,
+	0xc6, 0x69, 0x96, 0x38, 0xeb, 0xd7, 0x43, 0x57, 0x4d, 0x35, 0x4e, 0x74, 0x35, 0xb5, 0x4d, 0x9d,
+	0x03, 0x5e, 0x3a, 0x3c, 0x09, 0xe5, 0x51, 0xe6, 0x00, 0x7f, 0xd8, 0x57, 0xd3, 0x4a, 0xf7, 0x6f,
+	0x1d, 0x0c, 0x11, 0xe8, 0x88, 0xe0, 0x90, 0xa4, 0xbc, 0xc4, 0x57, 0xf5, 0xa7, 0x47, 0x21, 0x7a,
+	0x0a, 0x8d, 0x40, 0xe5, 0x24, 0x04, 0xdf, 0xb6, 0xef, 0x65, 0x3f, 0xbe, 0xa0, 0xb5, 0xea, 0x13,
+	0x2b, 0x67, 0xf4, 0x1c, 0x5a, 0x74, 0x71, 0x3e, 0x5e, 0x81, 0xcb, 0x02, 0x7c, 0xbf, 0x00, 0xce,
+	0x09, 0xa6, 0xf0, 0x06, 0x5d, 0x9b, 0xd0, 0x63, 0xd5, 0x9e, 0x2a, 0x02, 0xfa, 0x71, 0x01, 0xfa,
+	0x4e, 0x6f, 0x7a, 0x08, 0x2d, 0xfe, 0x1d, 0x2f, 0x49, 0x4a, 0xf9, 0x19, 0x96, 0x87, 0xdc, 0xe0,
+	0xb6, 0x33, 0x69, 0x42, 0x4f, 0xa1, 0x99, 0xe2, 0x88, 0x92, 0x70, 0xcc, 0xa8, 0x38, 0xe3, 0x86,
+	0xdd, 0xb1, 0xe4, 0x43, 0xcc, 0xca, 0x1e, 0x62, 0xd6, 0x28, 0x7b, 0x88, 0xf9, 0x0d, 0xe9, 0x3c,
+	0xa2, 0xe8, 0x19, 0xaf, 0x9f, 0x24, 0x4e, 0x99, 0x84, 0xd6, 0x3f, 0x08, 0x85, 0xcc, 0x7d, 0x44,
+	0xbb, 0x7f, 0xe8, 0x50, 0x95, 0x05, 0xff, 0x18, 0x6a, 0x97, 0x42, 0x65, 0x75, 0x19, 0xde, 0x29,
+	0x64, 0x24, 0x7f, 0x80, 0xaf, 0x5c, 0xd0, 0x01, 0xb4, 0x02, 0xf1, 0x08, 0x93, 0x0d, 0x4f, 0x35,
+	0xf9, 0x3b, 0x37, 0x3c, 0xd0, 0x8e, 0x4a, 0xbe, 0x11, 0xe4, 0x9e, 0x74, 0x3d, 0x68, 0x5e, 0x25,
+	0x91, 0x82, 0x95, 0x05, 0xcc, 0xdc, 0x6c, 0xed, 0x47, 0x25, 0xbf, 0x71, 0x95, 0xdd, 0x73, 0x36,
+	0xc0, 0x0a, 0x60, 0x0b, 0xb5, 0x0d, 0x7b, 0x77, 0x13, 0x61, 0x1f, 0x95, 0xfc, 0xe6, 0xd5, 0xea,
+	0xea, 0x38, 0x80, 0x56, 0xbe, 0x1f, 0x0b, 0xb9, 0x73, 0xf4, 0x72, 0x6d, 0x94, 0xd3, 0xcb, 0x75,
+	0x68, 0x4e, 0x2f, 0x4d, 0x02, 0x05, 0xab, 0x15, 0xe9, 0x65, 0xbd, 0x84, 0xd3, 0x4b, 0x93, 0x40,
+	0x8c, 0x5f, 0xb4, 0x00, 0x64, 0xcf, 0xe7, 0xff, 0xf2, 0x85, 0x03, 0x77, 0xe2, 0x74, 0x62, 0xc5,
+	0x09, 0x99, 0x07, 0x71, 0x1a, 0x2a, 0xe4, 0xcf, 0xd6, 0x24, 0x62, 0x97, 0x8b, 0x73, 0xde, 0x5c,
+	0x7a, 0xd9, 0x5a, 0x4f, 0xae, 0x3d, 0x51, 0x6f, 0xe8, 0xe5, 0xd7, 0xbd, 0x49, 0xac, 0x6c, 0xe7,
+	0x35, 0x61, 0xfc, 0xea, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4e, 0xd5, 0xe2, 0x3b, 0xe5, 0x0b,
+	0x00, 0x00,
 }
diff --git a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
index 2ec3452..49b2a5e 100644
--- a/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
+++ b/vendor/github.com/opencord/voltha-protos/v4/go/voltha/voltha.pb.go
@@ -104,6 +104,7 @@
 const OperStatus_TESTING = OperStatus_Types(common.OperStatus_TESTING)
 const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
 const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
+const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
 
 // ConnectStatus_Types from public import voltha_protos/common.proto
 type ConnectStatus_Types = common.ConnectStatus_Types
@@ -124,6 +125,7 @@
 const OperationResp_OPERATION_SUCCESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_SUCCESS)
 const OperationResp_OPERATION_FAILURE = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_FAILURE)
 const OperationResp_OPERATION_UNSUPPORTED = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_UNSUPPORTED)
+const OperationResp_OPERATION_IN_PROGRESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_IN_PROGRESS)
 
 // ValueType_Type from public import voltha_protos/common.proto
 type ValueType_Type = common.ValueType_Type
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 92a3a0a..8f439c4 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,7 +1,5 @@
 # github.com/Shopify/sarama v1.25.0
 github.com/Shopify/sarama
-# github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878
-github.com/armon/go-metrics
 # github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a
 github.com/coreos/go-systemd/journal
 # github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea
@@ -40,20 +38,8 @@
 github.com/googleapis/gnostic/OpenAPIv2
 github.com/googleapis/gnostic/compiler
 github.com/googleapis/gnostic/extensions
-# github.com/hashicorp/consul/api v1.2.0
-github.com/hashicorp/consul/api
-# github.com/hashicorp/go-cleanhttp v0.5.1
-github.com/hashicorp/go-cleanhttp
-# github.com/hashicorp/go-immutable-radix v1.1.0
-github.com/hashicorp/go-immutable-radix
-# github.com/hashicorp/go-rootcerts v1.0.1
-github.com/hashicorp/go-rootcerts
 # github.com/hashicorp/go-uuid v1.0.1
 github.com/hashicorp/go-uuid
-# github.com/hashicorp/golang-lru v0.5.3
-github.com/hashicorp/golang-lru/simplelru
-# github.com/hashicorp/serf v0.8.4
-github.com/hashicorp/serf/coordinate
 # github.com/imdario/mergo v0.3.7
 github.com/imdario/mergo
 # github.com/jcmturner/gofork v1.0.0
@@ -75,20 +61,16 @@
 github.com/klauspost/compress/snappy
 github.com/klauspost/compress/zstd
 github.com/klauspost/compress/zstd/internal/xxhash
-# github.com/mitchellh/go-homedir v1.1.0
-github.com/mitchellh/go-homedir
-# github.com/mitchellh/mapstructure v1.1.2
-github.com/mitchellh/mapstructure
 # github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
 github.com/modern-go/concurrent
 # github.com/modern-go/reflect2 v1.0.1
 github.com/modern-go/reflect2
-# github.com/opencord/voltha-lib-go/v4 v4.0.6
+# github.com/opencord/voltha-lib-go/v4 v4.2.1
 github.com/opencord/voltha-lib-go/v4/pkg/config
 github.com/opencord/voltha-lib-go/v4/pkg/db
 github.com/opencord/voltha-lib-go/v4/pkg/db/kvstore
 github.com/opencord/voltha-lib-go/v4/pkg/log
-# github.com/opencord/voltha-protos/v4 v4.0.11
+# github.com/opencord/voltha-protos/v4 v4.0.16
 github.com/opencord/voltha-protos/v4/go/common
 github.com/opencord/voltha-protos/v4/go/ext/config
 github.com/opencord/voltha-protos/v4/go/extension
diff --git a/voltctl.v1.config b/voltctl.v1.config
new file mode 100644
index 0000000..94307b8
--- /dev/null
+++ b/voltctl.v1.config
@@ -0,0 +1,31 @@
+
+# Copyright 2019-present Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+server: localhost:55555
+kafka: localhost:9092
+kvstore: localhost:2379
+tls:
+  useTls: false
+  caCert: ""
+  cert: ""
+  key: ""
+  verify: "" 
+grpc:
+  timeout: 5m0s
+  maxCallRecvMsgSize: 4M
+kvstoreconfig:
+  timeout: 5s
+
diff --git a/voltctl.v2.config b/voltctl.v2.config
new file mode 100644
index 0000000..0cd740b
--- /dev/null
+++ b/voltctl.v2.config
@@ -0,0 +1,30 @@
+# Copyright 2021-present Ciena Corporation
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v2
+server: localhost:55555
+kafka: localhost:9092
+kvstore: localhost:2379
+tls:
+  useTls: false
+  caCert: ""
+  cert: ""
+  key: ""
+  verify: false
+grpc:
+  connectTimeout: 5s
+  timeout: 5m0s
+  maxCallRecvMsgSize: 4M
+kvstoreconfig:
+  timeout: 5s
+