cord-776 create build / runtime containers for autmation uservices

Change-Id: I246973192adef56a250ffe93a5f65fff488840c1
diff --git a/automation/Dockerfile b/automation/Dockerfile
index eaf8abe..32f1513 100644
--- a/automation/Dockerfile
+++ b/automation/Dockerfile
@@ -1,4 +1,4 @@
-## Copyright 2016 Open Networking Laboratory
+## Copyright 2017 Open Networking Laboratory
 ##
 ## Licensed under the Apache License, Version 2.0 (the "License");
 ## you may not use this file except in compliance with the License.
@@ -11,23 +11,13 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM golang:1.6-alpine
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-RUN apk --update add openssh-client git
-
+RUN mkdir /service
 WORKDIR /go
-RUN go get github.com/tools/godep
 ADD . /go/src/gerrit.opencord.org/maas/cord-maas-automation
-
-WORKDIR /go/src/gerrit.opencord.org/maas/cord-maas-automation
-RUN /go/bin/godep restore || true
-
-WORKDIR /go
-RUN go install gerrit.opencord.org/maas/cord-maas-automation
-
-RUN mkdir -p /root/.ssh
-COPY ssh-config /root/.ssh/config
+RUN go build -o /service/entry-point gerrit.opencord.org/maas/cord-maas-automation
 
 LABEL org.label-schema.name="automation" \
       org.label-schema.description="Provides automation of the compute node deployment and provisioning process" \
@@ -35,4 +25,5 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/cord-maas-automation"]
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/automation/Dockerfile.release b/automation/Dockerfile.release
new file mode 100644
index 0000000..6e24b05
--- /dev/null
+++ b/automation/Dockerfile.release
@@ -0,0 +1,26 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+ADD entry-point /service/entry-point
+
+LABEL org.label-schema.name="automation" \
+      org.label-schema.description="Provides automation of the compute node deployment and provisioning process" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/automation/Godeps/Godeps.json b/automation/Godeps/Godeps.json
deleted file mode 100644
index c601bc6..0000000
--- a/automation/Godeps/Godeps.json
+++ /dev/null
@@ -1,33 +0,0 @@
-{
-	"ImportPath": "_/Users/dbainbri/src/develop/mf",
-	"GoVersion": "go1.6",
-	"GodepVersion": "v72",
-	"Packages": [
-		"github.com/ciena/maas-flow"
-	],
-	"Deps": [
-		{
-			"ImportPath": "github.com/juju/gomaasapi",
-			"Rev": "e173bc8d8d3304ff11b0ded5f6d4eea0cb560a40"
-		},
-		{
-			"ImportPath": "gopkg.in/mgo.v2/bson",
-			"Comment": "r2015.12.06-2-g03c9f3e",
-			"Rev": "03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64"
-		},
-		{
-			"ImportPath": "github.com/hashicorp/consul",
-			"Comment": "v0.6.4-341-g22938ab",
-			"Rev": "22938ab8b3ca069e490a4897a8ec13308ac61605"
-		},
-                {
-                        "ImportPath": "github.com/kelseyhightower/envconfig",
-                        "Comment": "1.1.0-17-g91921eb",
-                        "Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-                },
-                {
-                        "ImportPath": "github.com/Sirupsen/logrus",
-                        "Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-                }
-	]
-}
diff --git a/automation/Makefile b/automation/Makefile
index d3aa83a..275f611 100644
--- a/automation/Makefile
+++ b/automation/Makefile
@@ -1,17 +1,38 @@
+IMAGE_NAME=cord-maas-automation
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
+
 .PHONY: help
 help:
-	@echo "image     - create docker image for the MAAS deploy flow utility"
-	@echo "save      - save the docker image for the MAAS deployment flow utility to a local tar file"
-	@echo "clean     - remove any generated files"
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
 	@echo "help      - this message"
 
-.PHONY: image
-image:
-	docker build -t cord/maas-automation:0.1-prerelease .
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
 
-save: image
-	docker save -o cord_maas-automation.1-prerelease.tar cord/maas-automation:0.1-prerelease
+.PHONY: build
+build:
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
 
-.PHONT: clean
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
+
+.PHONY: clean
 clean:
-	rm -f cord_maas-automation.1-prerelease.tar
+	@echo ""
diff --git a/automation/maas-flow.go b/automation/maas-flow.go
index 5d09dc4..e4dc44a 100644
--- a/automation/maas-flow.go
+++ b/automation/maas-flow.go
@@ -27,15 +27,17 @@
 	maas "github.com/juju/gomaasapi"
 )
 
+const appName = "AUTOMATION"
+
 type Config struct {
-	PowerHelperUser   string        `default:"cord" envconfig:"POWER_HELPER_USER"`
-	PowerHelperHost   string        `default:"127.0.0.1" envconfig:"POWER_HELPER_HOST"`
-	PowerHelperScript string        `default:"" envconfig:"POWER_HELPER_SCRIPT"`
-	ProvisionUrl      string        `default:"" envconfig:"PROVISION_URL"`
-	ProvisionTtl      string        `default:"1h" envconfig:"PROVISION_TTL"`
-	LogLevel          string        `default:"warning" envconfig:"LOG_LEVEL"`
-	LogFormat         string        `default:"text" envconfig:"LOG_FORMAT"`
-	ApiKey            string        `envconfig:"MAAS_API_KEY" desc:"API key to access MAAS server"`
+	PowerHelperUser   string        `default:"cord" envconfig:"POWER_HELPER_USER" desc:"user when integrating with virtual box power mgmt"`
+	PowerHelperHost   string        `default:"127.0.0.1" envconfig:"POWER_HELPER_HOST" desc:"virtual box host"`
+	PowerHelperScript string        `default:"" envconfig:"POWER_HELPER_SCRIPT" desc:"script for virtual box power mgmt support"`
+	ProvisionUrl      string        `default:"" envconfig:"PROVISION_URL" desc:"connection string to connect to provisioner uservice"`
+	ProvisionTtl      string        `default:"1h" envconfig:"PROVISION_TTL" desc:"duration to wait for a provisioning request to complete, before considered a failure"`
+	LogLevel          string        `default:"warning" envconfig:"LOG_LEVEL" desc:"detail level for logging"`
+	LogFormat         string        `default:"text" envconfig:"LOG_FORMAT" desc:"log output format, text or json"`
+	ApiKey            string        `envconfig:"MAAS_API_KEY" required:"true" desc:"API key to access MAAS server"`
 	ApiKeyFile        string        `default:"/secrets/maas_api_key" envconfig:"MAAS_API_KEY_FILE" desc:"file to hold the secret"`
 	ShowApiKey        bool          `default:"false" envconfig:"MAAS_SHOW_API_KEY" desc:"Show API in clear text in logs"`
 	MaasUrl           string        `default:"http://localhost/MAAS" envconfig:"MAAS_URL" desc:"URL to access MAAS server"`
@@ -89,12 +91,24 @@
 }
 
 var log = logrus.New()
+var appFlags = flag.NewFlagSet("", flag.ContinueOnError)
 
 func main() {
 
-	flag.Parse()
 	config := Config{}
-	err := envconfig.Process("AUTOMATION", &config)
+	appFlags.Usage = func() {
+		envconfig.Usage(appName, &config)
+	}
+	if err := appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+
+	err := envconfig.Process(appName, &config)
+
 	if err != nil {
 		log.Fatalf("Unable to parse configuration options : %s", err)
 	}
diff --git a/automation/ssh-config b/automation/ssh-config
deleted file mode 100644
index 990a43d..0000000
--- a/automation/ssh-config
+++ /dev/null
@@ -1,3 +0,0 @@
-Host *
-   StrictHostKeyChecking no
-   UserKnownHostsFile=/dev/null
diff --git a/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/automation/vendor/github.com/Sirupsen/logrus/LICENSE b/automation/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/automation/vendor/github.com/Sirupsen/logrus/README.md b/automation/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go b/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/doc.go b/automation/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/automation/vendor/github.com/Sirupsen/logrus/entry.go b/automation/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/exported.go b/automation/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/formatter.go b/automation/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/hooks.go b/automation/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go b/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/logger.go b/automation/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/logrus.go b/automation/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go b/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/writer.go b/automation/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/LICENSE b/automation/vendor/github.com/juju/ansiterm/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/ansiterm/Makefile b/automation/vendor/github.com/juju/ansiterm/Makefile
new file mode 100644
index 0000000..212fdcb
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/Makefile
@@ -0,0 +1,14 @@
+# Copyright 2016 Canonical Ltd.
+# Licensed under the LGPLv3, see LICENCE file for details.
+
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/ansiterm > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/ansiterm/README.md b/automation/vendor/github.com/juju/ansiterm/README.md
new file mode 100644
index 0000000..5674387
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/README.md
@@ -0,0 +1,323 @@
+
+# ansiterm
+    import "github.com/juju/ansiterm"
+
+Package ansiterm provides a Writer that writes out the ANSI escape
+codes for color and styles.
+
+
+
+
+
+
+
+## type Color
+``` go
+type Color int
+```
+Color represents one of the standard 16 ANSI colors.
+
+
+
+``` go
+const (
+    Default Color
+    Black
+    Red
+    Green
+    Yellow
+    Blue
+    Magenta
+    Cyan
+    Gray
+    DarkGray
+    BrightRed
+    BrightGreen
+    BrightYellow
+    BrightBlue
+    BrightMagenta
+    BrightCyan
+    White
+)
+```
+
+
+
+
+
+
+
+
+### func (Color) String
+``` go
+func (c Color) String() string
+```
+String returns the name of the color.
+
+
+
+## type Context
+``` go
+type Context struct {
+    Foreground Color
+    Background Color
+    Styles     []Style
+}
+```
+Context provides a way to specify both foreground and background colors
+along with other styles and write text to a Writer with those colors and
+styles.
+
+
+
+
+
+
+
+
+
+### func Background
+``` go
+func Background(color Color) *Context
+```
+Background is a convenience function that creates a Context with the
+specified color as the background color.
+
+
+### func Foreground
+``` go
+func Foreground(color Color) *Context
+```
+Foreground is a convenience function that creates a Context with the
+specified color as the foreground color.
+
+
+### func Styles
+``` go
+func Styles(styles ...Style) *Context
+```
+Styles is a convenience function that creates a Context with the
+specified styles set.
+
+
+
+
+### func (\*Context) Fprint
+``` go
+func (c *Context) Fprint(w sgrWriter, args ...interface{})
+```
+Fprint will set the sgr values of the writer to the specified foreground,
+background and styles, then formats using the default formats for its
+operands and writes to w. Spaces are added between operands when neither is
+a string. It returns the number of bytes written and any write error
+encountered.
+
+
+
+### func (\*Context) Fprintf
+``` go
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
+```
+Fprintf will set the sgr values of the writer to the specified
+foreground, background and styles, then write the formatted string,
+then reset the writer.
+
+
+
+### func (\*Context) SetBackground
+``` go
+func (c *Context) SetBackground(color Color) *Context
+```
+SetBackground sets the background to the specified color.
+
+
+
+### func (\*Context) SetForeground
+``` go
+func (c *Context) SetForeground(color Color) *Context
+```
+SetForeground sets the foreground to the specified color.
+
+
+
+### func (\*Context) SetStyle
+``` go
+func (c *Context) SetStyle(styles ...Style) *Context
+```
+SetStyle replaces the styles with the new values.
+
+
+
+## type Style
+``` go
+type Style int
+```
+
+
+``` go
+const (
+    Bold Style
+    Faint
+    Italic
+    Underline
+    Blink
+    Reverse
+    Strikethrough
+    Conceal
+)
+```
+
+
+
+
+
+
+
+
+### func (Style) String
+``` go
+func (s Style) String() string
+```
+
+
+## type TabWriter
+``` go
+type TabWriter struct {
+    Writer
+    // contains filtered or unexported fields
+}
+```
+TabWriter is a filter that inserts padding around tab-delimited
+columns in its input to align them in the output.
+
+It also setting of colors and styles over and above the standard
+tabwriter package.
+
+
+
+
+
+
+
+
+
+### func NewTabWriter
+``` go
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+NewTabWriter returns a writer that is able to set colors and styels.
+The ansi escape codes are stripped for width calculations.
+
+
+
+
+### func (\*TabWriter) Flush
+``` go
+func (t *TabWriter) Flush() error
+```
+Flush should be called after the last call to Write to ensure
+that any data buffered in the Writer is written to output. Any
+incomplete escape sequence at the end is considered
+complete for formatting purposes.
+
+
+
+### func (\*TabWriter) Init
+``` go
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+A Writer must be initialized with a call to Init. The first parameter (output)
+specifies the filter output. The remaining parameters control the formatting:
+
+
+	minwidth	minimal cell width including any padding
+	tabwidth	width of tab characters (equivalent number of spaces)
+	padding		padding added to a cell before computing its width
+	padchar		ASCII char used for padding
+			if padchar == '\t', the Writer will assume that the
+			width of a '\t' in the formatted output is tabwidth,
+			and cells are left-aligned independent of align_left
+			(for correct-looking results, tabwidth must correspond
+			to the tab width in the viewer displaying the result)
+	flags		formatting control
+
+
+
+## type Writer
+``` go
+type Writer struct {
+    io.Writer
+    // contains filtered or unexported fields
+}
+```
+Writer allows colors and styles to be specified. If the io.Writer
+is not a terminal capable of color, all attempts to set colors or
+styles are no-ops.
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a Writer that allows the caller to specify colors and
+styles. If the io.Writer is not a terminal capable of color, all attempts
+to set colors or styles are no-ops.
+
+
+
+
+### func (\*Writer) ClearStyle
+``` go
+func (w *Writer) ClearStyle(s Style)
+```
+ClearStyle clears the text style.
+
+
+
+### func (\*Writer) Reset
+``` go
+func (w *Writer) Reset()
+```
+Reset returns the default foreground and background colors with no styles.
+
+
+
+### func (\*Writer) SetBackground
+``` go
+func (w *Writer) SetBackground(c Color)
+```
+SetBackground sets the background color.
+
+
+
+### func (\*Writer) SetForeground
+``` go
+func (w *Writer) SetForeground(c Color)
+```
+SetForeground sets the foreground color.
+
+
+
+### func (\*Writer) SetStyle
+``` go
+func (w *Writer) SetStyle(s Style)
+```
+SetStyle sets the text style.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/ansiterm/attribute.go b/automation/vendor/github.com/juju/ansiterm/attribute.go
new file mode 100644
index 0000000..f2daa48
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/attribute.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type attribute int
+
+const (
+	unknownAttribute attribute = -1
+	reset            attribute = 0
+)
+
+// sgr returns the escape sequence for the Select Graphic Rendition
+// for the attribute.
+func (a attribute) sgr() string {
+	if a < 0 {
+		return ""
+	}
+	return fmt.Sprintf("\x1b[%dm", a)
+}
+
+type attributes []attribute
+
+func (a attributes) Len() int           { return len(a) }
+func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
+func (a attributes) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// sgr returns the combined escape sequence for the Select Graphic Rendition
+// for the sequence of attributes.
+func (a attributes) sgr() string {
+	switch len(a) {
+	case 0:
+		return ""
+	case 1:
+		return a[0].sgr()
+	default:
+		sort.Sort(a)
+		var values []string
+		for _, attr := range a {
+			values = append(values, fmt.Sprint(attr))
+		}
+		return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
+	}
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/color.go b/automation/vendor/github.com/juju/ansiterm/color.go
new file mode 100644
index 0000000..0a97de3
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/color.go
@@ -0,0 +1,119 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Color = iota
+	Default
+	Black
+	Red
+	Green
+	Yellow
+	Blue
+	Magenta
+	Cyan
+	Gray
+	DarkGray
+	BrightRed
+	BrightGreen
+	BrightYellow
+	BrightBlue
+	BrightMagenta
+	BrightCyan
+	White
+)
+
+// Color represents one of the standard 16 ANSI colors.
+type Color int
+
+// String returns the name of the color.
+func (c Color) String() string {
+	switch c {
+	case Default:
+		return "default"
+	case Black:
+		return "black"
+	case Red:
+		return "red"
+	case Green:
+		return "green"
+	case Yellow:
+		return "yellow"
+	case Blue:
+		return "blue"
+	case Magenta:
+		return "magenta"
+	case Cyan:
+		return "cyan"
+	case Gray:
+		return "gray"
+	case DarkGray:
+		return "darkgray"
+	case BrightRed:
+		return "brightred"
+	case BrightGreen:
+		return "brightgreen"
+	case BrightYellow:
+		return "brightyellow"
+	case BrightBlue:
+		return "brightblue"
+	case BrightMagenta:
+		return "brightmagenta"
+	case BrightCyan:
+		return "brightcyan"
+	case White:
+		return "white"
+	default:
+		return ""
+	}
+}
+
+func (c Color) foreground() attribute {
+	switch c {
+	case Default:
+		return 39
+	case Black:
+		return 30
+	case Red:
+		return 31
+	case Green:
+		return 32
+	case Yellow:
+		return 33
+	case Blue:
+		return 34
+	case Magenta:
+		return 35
+	case Cyan:
+		return 36
+	case Gray:
+		return 37
+	case DarkGray:
+		return 90
+	case BrightRed:
+		return 91
+	case BrightGreen:
+		return 92
+	case BrightYellow:
+		return 93
+	case BrightBlue:
+		return 94
+	case BrightMagenta:
+		return 95
+	case BrightCyan:
+		return 96
+	case White:
+		return 97
+	default:
+		return unknownAttribute
+	}
+}
+
+func (c Color) background() attribute {
+	value := c.foreground()
+	if value != unknownAttribute {
+		return value + 10
+	}
+	return value
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/context.go b/automation/vendor/github.com/juju/ansiterm/context.go
new file mode 100644
index 0000000..e61a867
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/context.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Context provides a way to specify both foreground and background colors
+// along with other styles and write text to a Writer with those colors and
+// styles.
+type Context struct {
+	Foreground Color
+	Background Color
+	Styles     []Style
+}
+
+// Foreground is a convenience function that creates a Context with the
+// specified color as the foreground color.
+func Foreground(color Color) *Context {
+	return &Context{Foreground: color}
+}
+
+// Background is a convenience function that creates a Context with the
+// specified color as the background color.
+func Background(color Color) *Context {
+	return &Context{Background: color}
+}
+
+// Styles is a convenience function that creates a Context with the
+// specified styles set.
+func Styles(styles ...Style) *Context {
+	return &Context{Styles: styles}
+}
+
+// SetForeground sets the foreground to the specified color.
+func (c *Context) SetForeground(color Color) *Context {
+	c.Foreground = color
+	return c
+}
+
+// SetBackground sets the background to the specified color.
+func (c *Context) SetBackground(color Color) *Context {
+	c.Background = color
+	return c
+}
+
+// SetStyle replaces the styles with the new values.
+func (c *Context) SetStyle(styles ...Style) *Context {
+	c.Styles = styles
+	return c
+}
+
+type sgrWriter interface {
+	io.Writer
+	writeSGR(value sgr)
+}
+
+// Fprintf will set the sgr values of the writer to the specified
+// foreground, background and styles, then write the formatted string,
+// then reset the writer.
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprintf(w, format, args...)
+	w.writeSGR(reset)
+}
+
+// Fprint will set the sgr values of the writer to the specified foreground,
+// background and styles, then formats using the default formats for its
+// operands and writes to w. Spaces are added between operands when neither is
+// a string. It returns the number of bytes written and any write error
+// encountered.
+func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprint(w, args...)
+	w.writeSGR(reset)
+}
+
+func (c *Context) sgr() string {
+	var values attributes
+	if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
+		values = append(values, foreground)
+	}
+	if background := c.Background.background(); background != unknownAttribute {
+		values = append(values, background)
+	}
+	for _, style := range c.Styles {
+		if value := style.enable(); value != unknownAttribute {
+			values = append(values, value)
+		}
+	}
+	return values.sgr()
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/doc.go b/automation/vendor/github.com/juju/ansiterm/doc.go
new file mode 100644
index 0000000..7827007
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// Package ansiterm provides a Writer that writes out the ANSI escape
+// codes for color and styles.
+package ansiterm
diff --git a/automation/vendor/github.com/juju/ansiterm/style.go b/automation/vendor/github.com/juju/ansiterm/style.go
new file mode 100644
index 0000000..0be42da
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/style.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Style = iota
+	Bold
+	Faint
+	Italic
+	Underline
+	Blink
+	Reverse
+	Strikethrough
+	Conceal
+)
+
+type Style int
+
+func (s Style) String() string {
+	switch s {
+	case Bold:
+		return "bold"
+	case Faint:
+		return "faint"
+	case Italic:
+		return "italic"
+	case Underline:
+		return "underline"
+	case Blink:
+		return "blink"
+	case Reverse:
+		return "reverse"
+	case Strikethrough:
+		return "strikethrough"
+	case Conceal:
+		return "conceal"
+	default:
+		return ""
+	}
+}
+
+func (s Style) enable() attribute {
+	switch s {
+	case Bold:
+		return 1
+	case Faint:
+		return 2
+	case Italic:
+		return 3
+	case Underline:
+		return 4
+	case Blink:
+		return 5
+	case Reverse:
+		return 7
+	case Conceal:
+		return 8
+	case Strikethrough:
+		return 9
+	default:
+		return unknownAttribute
+	}
+}
+
+func (s Style) disable() attribute {
+	value := s.enable()
+	if value != unknownAttribute {
+		return value + 20
+	}
+	return value
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/tabwriter.go b/automation/vendor/github.com/juju/ansiterm/tabwriter.go
new file mode 100644
index 0000000..1ff6faa
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/tabwriter.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+
+	"github.com/juju/ansiterm/tabwriter"
+)
+
+// NewTabWriter returns a writer that is able to set colors and styels.
+// The ansi escape codes are stripped for width calculations.
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
+
+// TabWriter is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// It also setting of colors and styles over and above the standard
+// tabwriter package.
+type TabWriter struct {
+	Writer
+	tw tabwriter.Writer
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (t *TabWriter) Flush() error {
+	return t.tw.Flush()
+}
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (t *TabWriter) SetColumnAlignRight(column int) {
+	t.tw.SetColumnAlignRight(column)
+}
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	writer, colorCapable := colorEnabledWriter(output)
+	t.Writer = Writer{
+		Writer:  t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
+		noColor: !colorCapable,
+	}
+	return t
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
new file mode 100644
index 0000000..98949d0
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is mostly a copy of the go standard library text/tabwriter. With
+// the additional stripping of ansi control characters for width calculations.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+	"bytes"
+	"io"
+	"unicode/utf8"
+
+	"github.com/lunixbochs/vtclean"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+	size  int  // cell size in bytes
+	width int  // cell width in runes
+	htab  bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+	// configuration
+	output   io.Writer
+	minwidth int
+	tabwidth int
+	padding  int
+	padbytes [8]byte
+	flags    uint
+
+	// current state
+	buf       bytes.Buffer // collected text excluding tabs or line breaks
+	pos       int          // buffer position up to which cell.width of incomplete cell has been computed
+	cell      cell         // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+	endChar   byte         // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+	lines     [][]cell     // list of lines; each line is a list of cells
+	widths    []int        // list of column widths in runes - re-used during formatting
+	alignment map[int]uint // column alignment
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+	b.buf.Reset()
+	b.pos = 0
+	b.cell = cell{}
+	b.endChar = 0
+	b.lines = b.lines[0:0]
+	b.widths = b.widths[0:0]
+	b.alignment = make(map[int]uint)
+	b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+//   (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+//   position pos; html tags and entities are excluded from this width if html
+//   filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+//   which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+//   formatting; it is kept in Writer because it's re-used
+//
+//                    |<---------- size ---------->|
+//                    |                            |
+//                    |<- width ->|<- ignored ->|  |
+//                    |           |             |  |
+// [---processed---tab------------<tag>...</tag>...]
+// ^                  ^                         ^
+// |                  |                         |
+// buf                start of incomplete cell  pos
+
+// Formatting can be controlled with these flags.
+const (
+	// Ignore html tags and treat entities (starting with '&'
+	// and ending in ';') as single characters (width = 1).
+	FilterHTML uint = 1 << iota
+
+	// Strip Escape characters bracketing escaped text segments
+	// instead of passing them through unchanged with the text.
+	StripEscape
+
+	// Force right-alignment of cell content.
+	// Default is left-alignment.
+	AlignRight
+
+	// Handle empty columns as if they were not present in
+	// the input in the first place.
+	DiscardEmptyColumns
+
+	// Always use tabs for indentation columns (i.e., padding of
+	// leading empty cells on the left) independent of padchar.
+	TabIndent
+
+	// Print a vertical bar ('|') between columns (after formatting).
+	// Discarded columns appear as zero-width columns ("||").
+	Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	if minwidth < 0 || tabwidth < 0 || padding < 0 {
+		panic("negative minwidth, tabwidth, or padding")
+	}
+	b.output = output
+	b.minwidth = minwidth
+	b.tabwidth = tabwidth
+	b.padding = padding
+	for i := range b.padbytes {
+		b.padbytes[i] = padchar
+	}
+	if padchar == '\t' {
+		// tab padding enforces left-alignment
+		flags &^= AlignRight
+	}
+	b.flags = flags
+
+	b.reset()
+
+	return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+	pos := 0
+	for i, line := range b.lines {
+		print("(", i, ") ")
+		for _, c := range line {
+			print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+			pos += c.size
+		}
+		print("\n")
+	}
+	print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+	err error
+}
+
+func (b *Writer) write0(buf []byte) {
+	n, err := b.output.Write(buf)
+	if n != len(buf) && err == nil {
+		err = io.ErrShortWrite
+	}
+	if err != nil {
+		panic(osError{err})
+	}
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+	for n > len(src) {
+		b.write0(src)
+		n -= len(src)
+	}
+	b.write0(src[0:n])
+}
+
+var (
+	newline = []byte{'\n'}
+	tabs    = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+	if b.padbytes[0] == '\t' || useTabs {
+		// padding is done with tabs
+		if b.tabwidth == 0 {
+			return // tabs have no width - can't do any padding
+		}
+		// make cellw the smallest multiple of b.tabwidth
+		cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+		n := cellw - textw // amount of padding
+		if n < 0 {
+			panic("internal error")
+		}
+		b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+		return
+	}
+
+	// padding is done with non-tab characters
+	b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	for i := line0; i < line1; i++ {
+		line := b.lines[i]
+
+		// if TabIndent is set, use tabs to pad leading empty cells
+		useTabs := b.flags&TabIndent != 0
+
+		for j, c := range line {
+			if j > 0 && b.flags&Debug != 0 {
+				// indicate column break
+				b.write0(vbar)
+			}
+
+			if c.size == 0 {
+				// empty cell
+				if j < len(b.widths) {
+					b.writePadding(c.width, b.widths[j], useTabs)
+				}
+			} else {
+				// non-empty cell
+				useTabs = false
+				alignColumnRight := b.alignment[j] == AlignRight
+				if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+				} else if alignColumnRight && j < len(b.widths) {
+					// just this column
+					internalSize := b.widths[j] - b.padding
+					if j < len(b.widths) {
+						b.writePadding(c.width, internalSize, false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					if b.padding > 0 {
+						b.writePadding(0, b.padding, false)
+					}
+					pos += c.size
+				} else { // align right
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+				}
+			}
+		}
+
+		if i+1 == len(b.lines) {
+			// last buffered line - we don't have a newline, so just write
+			// any outstanding buffered data
+			b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+			pos += b.cell.size
+		} else {
+			// not the last line - write newline
+			b.write0(newline)
+		}
+	}
+	return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	column := len(b.widths)
+	for this := line0; this < line1; this++ {
+		line := b.lines[this]
+
+		if column < len(line)-1 {
+			// cell exists in this column => this line
+			// has more cells than the previous line
+			// (the last cell per line is ignored because cells are
+			// tab-terminated; the last cell per line describes the
+			// text before the newline/formfeed and does not belong
+			// to a column)
+
+			// print unprinted lines until beginning of block
+			pos = b.writeLines(pos, line0, this)
+			line0 = this
+
+			// column block begin
+			width := b.minwidth // minimal column width
+			discardable := true // true if all cells in this column are empty and "soft"
+			for ; this < line1; this++ {
+				line = b.lines[this]
+				if column < len(line)-1 {
+					// cell exists in this column
+					c := line[column]
+					// update width
+					if w := c.width + b.padding; w > width {
+						width = w
+					}
+					// update discardable
+					if c.width > 0 || c.htab {
+						discardable = false
+					}
+				} else {
+					break
+				}
+			}
+			// column block end
+
+			// discard empty columns if necessary
+			if discardable && b.flags&DiscardEmptyColumns != 0 {
+				width = 0
+			}
+
+			// format and print all columns to the right of this column
+			// (we know the widths of this column and all columns to the left)
+			b.widths = append(b.widths, width) // push width
+			pos = b.format(pos, line0, this)
+			b.widths = b.widths[0 : len(b.widths)-1] // pop width
+			line0 = this
+		}
+	}
+
+	// print unprinted lines until end
+	return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+	b.buf.Write(text)
+	b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+	// ---- Changes here -----
+	newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
+	cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
+	b.cell.width += utf8.RuneCount([]byte(cleaned))
+	// --- end of changes ----
+	b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+	switch ch {
+	case Escape:
+		b.endChar = Escape
+	case '<':
+		b.endChar = '>'
+	case '&':
+		b.endChar = ';'
+	}
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+	switch b.endChar {
+	case Escape:
+		b.updateWidth()
+		if b.flags&StripEscape == 0 {
+			b.cell.width -= 2 // don't count the Escape chars
+		}
+	case '>': // tag of zero width
+	case ';':
+		b.cell.width++ // entity, count as one rune
+	}
+	b.pos = b.buf.Len()
+	b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+	b.cell.htab = htab
+	line := &b.lines[len(b.lines)-1]
+	*line = append(*line, b.cell)
+	b.cell = cell{}
+	return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+	if e := recover(); e != nil {
+		if nerr, ok := e.(osError); ok {
+			*err = nerr.err
+			return
+		}
+		panic("tabwriter: panic during " + op)
+	}
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+	defer b.reset() // even in the presence of errors
+	defer handlePanic(&err, "Flush")
+
+	// add current cell if not empty
+	if b.cell.size > 0 {
+		if b.endChar != 0 {
+			// inside escape - terminate it even if incomplete
+			b.endEscape()
+		}
+		b.terminateCell(false)
+	}
+
+	// format contents of buffer
+	b.format(0, 0, len(b.lines))
+
+	return
+}
+
+var hbar = []byte("---\n")
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (b *Writer) SetColumnAlignRight(column int) {
+	b.alignment[column] = AlignRight
+}
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+	defer handlePanic(&err, "Write")
+
+	// split text into cells
+	n = 0
+	for i, ch := range buf {
+		if b.endChar == 0 {
+			// outside escape
+			switch ch {
+			case '\t', '\v', '\n', '\f':
+				// end of cell
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i + 1 // ch consumed
+				ncells := b.terminateCell(ch == '\t')
+				if ch == '\n' || ch == '\f' {
+					// terminate line
+					b.addLine()
+					if ch == '\f' || ncells == 1 {
+						// A '\f' always forces a flush. Otherwise, if the previous
+						// line has only one cell which does not have an impact on
+						// the formatting of the following lines (the last cell per
+						// line is ignored by format()), thus we can flush the
+						// Writer contents.
+						if err = b.Flush(); err != nil {
+							return
+						}
+						if ch == '\f' && b.flags&Debug != 0 {
+							// indicate section break
+							b.write0(hbar)
+						}
+					}
+				}
+
+			case Escape:
+				// start of escaped sequence
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i
+				if b.flags&StripEscape != 0 {
+					n++ // strip Escape
+				}
+				b.startEscape(Escape)
+
+			case '<', '&':
+				// possibly an html tag/entity
+				if b.flags&FilterHTML != 0 {
+					// begin of tag/entity
+					b.append(buf[n:i])
+					b.updateWidth()
+					n = i
+					b.startEscape(ch)
+				}
+			}
+
+		} else {
+			// inside escape
+			if ch == b.endChar {
+				// end of tag/entity
+				j := i + 1
+				if ch == Escape && b.flags&StripEscape != 0 {
+					j = i // strip Escape
+				}
+				b.append(buf[n:j])
+				n = i + 1 // ch consumed
+				b.endEscape()
+			}
+		}
+	}
+
+	// append leftover text
+	b.append(buf[n:])
+	n = len(buf)
+	return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/terminal.go b/automation/vendor/github.com/juju/ansiterm/terminal.go
new file mode 100644
index 0000000..96fd11c
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/terminal.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+	"os"
+
+	"github.com/mattn/go-colorable"
+	"github.com/mattn/go-isatty"
+)
+
+// colorEnabledWriter returns a writer that can handle the ansi color codes
+// and true if the writer passed in is a terminal capable of color. If the
+// TERM environment variable is set to "dumb", the terminal is not considered
+// color capable.
+func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
+	f, ok := w.(*os.File)
+	if !ok {
+		return w, false
+	}
+	// Check the TERM environment variable specifically
+	// to check for "dumb" terminals.
+	if os.Getenv("TERM") == "dumb" {
+		return w, false
+	}
+	if !isatty.IsTerminal(f.Fd()) {
+		return w, false
+	}
+	return colorable.NewColorable(f), true
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/writer.go b/automation/vendor/github.com/juju/ansiterm/writer.go
new file mode 100644
index 0000000..32437bb
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/writer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Writer allows colors and styles to be specified. If the io.Writer
+// is not a terminal capable of color, all attempts to set colors or
+// styles are no-ops.
+type Writer struct {
+	io.Writer
+
+	noColor bool
+}
+
+// NewWriter returns a Writer that allows the caller to specify colors and
+// styles. If the io.Writer is not a terminal capable of color, all attempts
+// to set colors or styles are no-ops.
+func NewWriter(w io.Writer) *Writer {
+	writer, colorCapable := colorEnabledWriter(w)
+	return &Writer{
+		Writer:  writer,
+		noColor: !colorCapable,
+	}
+}
+
+// SetColorCapable forces the writer to either write the ANSI escape color
+// if capable is true, or to not write them if capable is false.
+func (w *Writer) SetColorCapable(capable bool) {
+	w.noColor = !capable
+}
+
+// SetForeground sets the foreground color.
+func (w *Writer) SetForeground(c Color) {
+	w.writeSGR(c.foreground())
+}
+
+// SetBackground sets the background color.
+func (w *Writer) SetBackground(c Color) {
+	w.writeSGR(c.background())
+}
+
+// SetStyle sets the text style.
+func (w *Writer) SetStyle(s Style) {
+	w.writeSGR(s.enable())
+}
+
+// ClearStyle clears the text style.
+func (w *Writer) ClearStyle(s Style) {
+	w.writeSGR(s.disable())
+}
+
+// Reset returns the default foreground and background colors with no styles.
+func (w *Writer) Reset() {
+	w.writeSGR(reset)
+}
+
+type sgr interface {
+	// sgr returns the combined escape sequence for the Select Graphic Rendition.
+	sgr() string
+}
+
+// writeSGR takes the appropriate integer SGR parameters
+// and writes out the ANIS escape code.
+func (w *Writer) writeSGR(value sgr) {
+	if w.noColor {
+		return
+	}
+	fmt.Fprint(w, value.sgr())
+}
diff --git a/automation/vendor/github.com/juju/errors/LICENSE b/automation/vendor/github.com/juju/errors/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/errors/Makefile b/automation/vendor/github.com/juju/errors/Makefile
new file mode 100644
index 0000000..ab7c2e6
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test && go test -compiler gccgo
+
+docs:
+	godoc2md github.com/juju/errors > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/errors/README.md b/automation/vendor/github.com/juju/errors/README.md
new file mode 100644
index 0000000..ee24891
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/README.md
@@ -0,0 +1,536 @@
+
+# errors
+    import "github.com/juju/errors"
+
+[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+
+	    if err := SomeFunc(); err != nil {
+		    return err
+		}
+
+This instead becomes:
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Trace(err)
+		}
+
+which just records the file and line number of the Trace call, or
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Annotate(err, "more context")
+		}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+
+	    if err := FindField(field); err != nil {
+		    return errors.Wrap(err, errors.NotFoundf(field))
+		}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+
+
+
+
+
+## func AlreadyExistsf
+``` go
+func AlreadyExistsf(format string, args ...interface{}) error
+```
+AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+
+
+## func Annotate
+``` go
+func Annotate(other error, message string) error
+```
+Annotate is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "failed to frombulate")
+	}
+
+
+## func Annotatef
+``` go
+func Annotatef(other error, format string, args ...interface{}) error
+```
+Annotatef is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotatef(err, "failed to frombulate the %s", arg)
+	}
+
+
+## func Cause
+``` go
+func Cause(err error) error
+```
+Cause returns the cause of the given error.  This will be either the
+original error, or the result of a Wrap or Mask call.
+
+Cause is the usual way to diagnose errors that may have been wrapped by
+the other errors functions.
+
+
+## func DeferredAnnotatef
+``` go
+func DeferredAnnotatef(err *error, format string, args ...interface{})
+```
+DeferredAnnotatef annotates the given error (when it is not nil) with the given
+format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+does nothing. This method is used in a defer statement in order to annotate any
+resulting error with the same message.
+
+For example:
+
+
+	defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+
+
+## func Details
+``` go
+func Details(err error) string
+```
+Details returns information about the stack of errors wrapped by err, in
+the format:
+
+
+	[{filename:99: error one} {otherfile:55: cause of error one}]
+
+This is a terse alternative to ErrorStack as it returns a single line.
+
+
+## func ErrorStack
+``` go
+func ErrorStack(err error) string
+```
+ErrorStack returns a string representation of the annotated error. If the
+error passed as the parameter is not an annotated error, the result is
+simply the result of the Error() method on that error.
+
+If the error is an annotated error, a multi-line string is returned where
+each line represents one entry in the annotation stack. The full filename
+from the call stack is used in the output.
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+
+## func Errorf
+``` go
+func Errorf(format string, args ...interface{}) error
+```
+Errorf creates a new annotated error and records the location that the
+error is created.  This should be a drop in replacement for fmt.Errorf.
+
+For example:
+
+
+	return errors.Errorf("validation failed: %s", message)
+
+
+## func IsAlreadyExists
+``` go
+func IsAlreadyExists(err error) bool
+```
+IsAlreadyExists reports whether the error was created with
+AlreadyExistsf() or NewAlreadyExists().
+
+
+## func IsNotFound
+``` go
+func IsNotFound(err error) bool
+```
+IsNotFound reports whether err was created with NotFoundf() or
+NewNotFound().
+
+
+## func IsNotImplemented
+``` go
+func IsNotImplemented(err error) bool
+```
+IsNotImplemented reports whether err was created with
+NotImplementedf() or NewNotImplemented().
+
+
+## func IsNotSupported
+``` go
+func IsNotSupported(err error) bool
+```
+IsNotSupported reports whether the error was created with
+NotSupportedf() or NewNotSupported().
+
+
+## func IsNotValid
+``` go
+func IsNotValid(err error) bool
+```
+IsNotValid reports whether the error was created with NotValidf() or
+NewNotValid().
+
+
+## func IsUnauthorized
+``` go
+func IsUnauthorized(err error) bool
+```
+IsUnauthorized reports whether err was created with Unauthorizedf() or
+NewUnauthorized().
+
+
+## func Mask
+``` go
+func Mask(other error) error
+```
+Mask hides the underlying error type, and records the location of the masking.
+
+
+## func Maskf
+``` go
+func Maskf(other error, format string, args ...interface{}) error
+```
+Mask masks the given error with the given format string and arguments (like
+fmt.Sprintf), returning a new error that maintains the error stack, but
+hides the underlying error type.  The error string still contains the full
+annotations. If you want to hide the annotations, call Wrap.
+
+
+## func New
+``` go
+func New(message string) error
+```
+New is a drop in replacement for the standard libary errors module that records
+the location that the error is created.
+
+For example:
+
+
+	return errors.New("validation failed")
+
+
+## func NewAlreadyExists
+``` go
+func NewAlreadyExists(err error, msg string) error
+```
+NewAlreadyExists returns an error which wraps err and satisfies
+IsAlreadyExists().
+
+
+## func NewNotFound
+``` go
+func NewNotFound(err error, msg string) error
+```
+NewNotFound returns an error which wraps err that satisfies
+IsNotFound().
+
+
+## func NewNotImplemented
+``` go
+func NewNotImplemented(err error, msg string) error
+```
+NewNotImplemented returns an error which wraps err and satisfies
+IsNotImplemented().
+
+
+## func NewNotSupported
+``` go
+func NewNotSupported(err error, msg string) error
+```
+NewNotSupported returns an error which wraps err and satisfies
+IsNotSupported().
+
+
+## func NewNotValid
+``` go
+func NewNotValid(err error, msg string) error
+```
+NewNotValid returns an error which wraps err and satisfies IsNotValid().
+
+
+## func NewUnauthorized
+``` go
+func NewUnauthorized(err error, msg string) error
+```
+NewUnauthorized returns an error which wraps err and satisfies
+IsUnauthorized().
+
+
+## func NotFoundf
+``` go
+func NotFoundf(format string, args ...interface{}) error
+```
+NotFoundf returns an error which satisfies IsNotFound().
+
+
+## func NotImplementedf
+``` go
+func NotImplementedf(format string, args ...interface{}) error
+```
+NotImplementedf returns an error which satisfies IsNotImplemented().
+
+
+## func NotSupportedf
+``` go
+func NotSupportedf(format string, args ...interface{}) error
+```
+NotSupportedf returns an error which satisfies IsNotSupported().
+
+
+## func NotValidf
+``` go
+func NotValidf(format string, args ...interface{}) error
+```
+NotValidf returns an error which satisfies IsNotValid().
+
+
+## func Trace
+``` go
+func Trace(other error) error
+```
+Trace adds the location of the Trace call to the stack.  The Cause of the
+resulting error is the same as the error parameter.  If the other error is
+nil, the result will be nil.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+
+## func Unauthorizedf
+``` go
+func Unauthorizedf(format string, args ...interface{}) error
+```
+Unauthorizedf returns an error which satisfies IsUnauthorized().
+
+
+## func Wrap
+``` go
+func Wrap(other, newDescriptive error) error
+```
+Wrap changes the Cause of the error. The location of the Wrap call is also
+stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    newErr := &packageError{"more context", private_value}
+	    return errors.Wrap(err, newErr)
+	}
+
+
+## func Wrapf
+``` go
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
+```
+Wrapf changes the Cause of the error, and adds an annotation. The location
+of the Wrap call is also stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+	}
+
+
+
+## type Err
+``` go
+type Err struct {
+    // contains filtered or unexported fields
+}
+```
+Err holds a description of an error along with information about
+where the error was created.
+
+It may be embedded in custom error types to add extra information that
+this errors package can understand.
+
+
+
+
+
+
+
+
+
+### func NewErr
+``` go
+func NewErr(format string, args ...interface{}) Err
+```
+NewErr is used to return an Err for the purpose of embedding in other
+structures.  The location is not specified, and needs to be set with a call
+to SetLocation.
+
+For example:
+
+
+	type FooError struct {
+	    errors.Err
+	    code int
+	}
+	
+	func NewFooError(code int) error {
+	    err := &FooError{errors.NewErr("foo"), code}
+	    err.SetLocation(1)
+	    return err
+	}
+
+
+
+
+### func (\*Err) Cause
+``` go
+func (e *Err) Cause() error
+```
+The Cause of an error is the most recent error in the error stack that
+meets one of these criteria: the original error that was raised; the new
+error that was passed into the Wrap function; the most recently masked
+error; or nil if the error itself is considered the Cause.  Normally this
+method is not invoked directly, but instead through the Cause stand alone
+function.
+
+
+
+### func (\*Err) Error
+``` go
+func (e *Err) Error() string
+```
+Error implements error.Error.
+
+
+
+### func (\*Err) Location
+``` go
+func (e *Err) Location() (filename string, line int)
+```
+Location is the file and line of where the error was most recently
+created or annotated.
+
+
+
+### func (\*Err) Message
+``` go
+func (e *Err) Message() string
+```
+Message returns the message stored with the most recent location. This is
+the empty string if the most recent call was Trace, or the message stored
+with Annotate or Mask.
+
+
+
+### func (\*Err) SetLocation
+``` go
+func (e *Err) SetLocation(callDepth int)
+```
+SetLocation records the source location of the error at callDepth stack
+frames above the call.
+
+
+
+### func (\*Err) StackTrace
+``` go
+func (e *Err) StackTrace() []string
+```
+StackTrace returns one string for each location recorded in the stack of
+errors. The first value is the originating error, with a line for each
+other annotation or tracing of the error.
+
+
+
+### func (\*Err) Underlying
+``` go
+func (e *Err) Underlying() error
+```
+Underlying returns the previous error in the error stack, if any. A client
+should not ever really call this method.  It is used to build the error
+stack and should not be introspected by client calls.  Or more
+specifically, clients should not depend on anything but the `Cause` of an
+error.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/errors/doc.go b/automation/vendor/github.com/juju/errors/doc.go
new file mode 100644
index 0000000..35b119a
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/doc.go
@@ -0,0 +1,81 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+    if err := SomeFunc(); err != nil {
+	    return err
+	}
+
+This instead becomes:
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+which just records the file and line number of the Trace call, or
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "more context")
+	}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+    if err := FindField(field); err != nil {
+	    return errors.Wrap(err, errors.NotFoundf(field))
+	}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+*/
+package errors
diff --git a/automation/vendor/github.com/juju/errors/error.go b/automation/vendor/github.com/juju/errors/error.go
new file mode 100644
index 0000000..8c51c45
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/error.go
@@ -0,0 +1,145 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+)
+
+// Err holds a description of an error along with information about
+// where the error was created.
+//
+// It may be embedded in custom error types to add extra information that
+// this errors package can understand.
+type Err struct {
+	// message holds an annotation of the error.
+	message string
+
+	// cause holds the cause of the error as returned
+	// by the Cause method.
+	cause error
+
+	// previous holds the previous error in the error stack, if any.
+	previous error
+
+	// file and line hold the source code location where the error was
+	// created.
+	file string
+	line int
+}
+
+// NewErr is used to return an Err for the purpose of embedding in other
+// structures.  The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func NewFooError(code int) error {
+//         err := &FooError{errors.NewErr("foo"), code}
+//         err.SetLocation(1)
+//         return err
+//     }
+func NewErr(format string, args ...interface{}) Err {
+	return Err{
+		message: fmt.Sprintf(format, args...),
+	}
+}
+
+// NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other
+// structures. The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func (e *FooError) Annotate(format string, args ...interface{}) error {
+//         err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
+//         err.SetLocation(1)
+//         return err
+//     })
+func NewErrWithCause(other error, format string, args ...interface{}) Err {
+	return Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(other),
+		previous: other,
+	}
+}
+
+// Location is the file and line of where the error was most recently
+// created or annotated.
+func (e *Err) Location() (filename string, line int) {
+	return e.file, e.line
+}
+
+// Underlying returns the previous error in the error stack, if any. A client
+// should not ever really call this method.  It is used to build the error
+// stack and should not be introspected by client calls.  Or more
+// specifically, clients should not depend on anything but the `Cause` of an
+// error.
+func (e *Err) Underlying() error {
+	return e.previous
+}
+
+// The Cause of an error is the most recent error in the error stack that
+// meets one of these criteria: the original error that was raised; the new
+// error that was passed into the Wrap function; the most recently masked
+// error; or nil if the error itself is considered the Cause.  Normally this
+// method is not invoked directly, but instead through the Cause stand alone
+// function.
+func (e *Err) Cause() error {
+	return e.cause
+}
+
+// Message returns the message stored with the most recent location. This is
+// the empty string if the most recent call was Trace, or the message stored
+// with Annotate or Mask.
+func (e *Err) Message() string {
+	return e.message
+}
+
+// Error implements error.Error.
+func (e *Err) Error() string {
+	// We want to walk up the stack of errors showing the annotations
+	// as long as the cause is the same.
+	err := e.previous
+	if !sameError(Cause(err), e.cause) && e.cause != nil {
+		err = e.cause
+	}
+	switch {
+	case err == nil:
+		return e.message
+	case e.message == "":
+		return err.Error()
+	}
+	return fmt.Sprintf("%s: %v", e.message, err)
+}
+
+// SetLocation records the source location of the error at callDepth stack
+// frames above the call.
+func (e *Err) SetLocation(callDepth int) {
+	_, file, line, _ := runtime.Caller(callDepth + 1)
+	e.file = trimGoPath(file)
+	e.line = line
+}
+
+// StackTrace returns one string for each location recorded in the stack of
+// errors. The first value is the originating error, with a line for each
+// other annotation or tracing of the error.
+func (e *Err) StackTrace() []string {
+	return errorStack(e)
+}
+
+// Ideally we'd have a way to check identity, but deep equals will do.
+func sameError(e1, e2 error) bool {
+	return reflect.DeepEqual(e1, e2)
+}
diff --git a/automation/vendor/github.com/juju/errors/errortypes.go b/automation/vendor/github.com/juju/errors/errortypes.go
new file mode 100644
index 0000000..10b3b19
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/errortypes.go
@@ -0,0 +1,284 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+)
+
+// wrap is a helper to construct an *wrapper.
+func wrap(err error, format, suffix string, args ...interface{}) Err {
+	newErr := Err{
+		message:  fmt.Sprintf(format+suffix, args...),
+		previous: err,
+	}
+	newErr.SetLocation(2)
+	return newErr
+}
+
+// notFound represents an error when something has not been found.
+type notFound struct {
+	Err
+}
+
+// NotFoundf returns an error which satisfies IsNotFound().
+func NotFoundf(format string, args ...interface{}) error {
+	return &notFound{wrap(nil, format, " not found", args...)}
+}
+
+// NewNotFound returns an error which wraps err that satisfies
+// IsNotFound().
+func NewNotFound(err error, msg string) error {
+	return &notFound{wrap(err, msg, "")}
+}
+
+// IsNotFound reports whether err was created with NotFoundf() or
+// NewNotFound().
+func IsNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notFound)
+	return ok
+}
+
+// userNotFound represents an error when an inexistent user is looked up.
+type userNotFound struct {
+	Err
+}
+
+// UserNotFoundf returns an error which satisfies IsUserNotFound().
+func UserNotFoundf(format string, args ...interface{}) error {
+	return &userNotFound{wrap(nil, format, " user not found", args...)}
+}
+
+// NewUserNotFound returns an error which wraps err and satisfies
+// IsUserNotFound().
+func NewUserNotFound(err error, msg string) error {
+	return &userNotFound{wrap(err, msg, "")}
+}
+
+// IsUserNotFound reports whether err was created with UserNotFoundf() or
+// NewUserNotFound().
+func IsUserNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*userNotFound)
+	return ok
+}
+
+// unauthorized represents an error when an operation is unauthorized.
+type unauthorized struct {
+	Err
+}
+
+// Unauthorizedf returns an error which satisfies IsUnauthorized().
+func Unauthorizedf(format string, args ...interface{}) error {
+	return &unauthorized{wrap(nil, format, "", args...)}
+}
+
+// NewUnauthorized returns an error which wraps err and satisfies
+// IsUnauthorized().
+func NewUnauthorized(err error, msg string) error {
+	return &unauthorized{wrap(err, msg, "")}
+}
+
+// IsUnauthorized reports whether err was created with Unauthorizedf() or
+// NewUnauthorized().
+func IsUnauthorized(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*unauthorized)
+	return ok
+}
+
+// notImplemented represents an error when something is not
+// implemented.
+type notImplemented struct {
+	Err
+}
+
+// NotImplementedf returns an error which satisfies IsNotImplemented().
+func NotImplementedf(format string, args ...interface{}) error {
+	return &notImplemented{wrap(nil, format, " not implemented", args...)}
+}
+
+// NewNotImplemented returns an error which wraps err and satisfies
+// IsNotImplemented().
+func NewNotImplemented(err error, msg string) error {
+	return &notImplemented{wrap(err, msg, "")}
+}
+
+// IsNotImplemented reports whether err was created with
+// NotImplementedf() or NewNotImplemented().
+func IsNotImplemented(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notImplemented)
+	return ok
+}
+
+// alreadyExists represents and error when something already exists.
+type alreadyExists struct {
+	Err
+}
+
+// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+func AlreadyExistsf(format string, args ...interface{}) error {
+	return &alreadyExists{wrap(nil, format, " already exists", args...)}
+}
+
+// NewAlreadyExists returns an error which wraps err and satisfies
+// IsAlreadyExists().
+func NewAlreadyExists(err error, msg string) error {
+	return &alreadyExists{wrap(err, msg, "")}
+}
+
+// IsAlreadyExists reports whether the error was created with
+// AlreadyExistsf() or NewAlreadyExists().
+func IsAlreadyExists(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*alreadyExists)
+	return ok
+}
+
+// notSupported represents an error when something is not supported.
+type notSupported struct {
+	Err
+}
+
+// NotSupportedf returns an error which satisfies IsNotSupported().
+func NotSupportedf(format string, args ...interface{}) error {
+	return &notSupported{wrap(nil, format, " not supported", args...)}
+}
+
+// NewNotSupported returns an error which wraps err and satisfies
+// IsNotSupported().
+func NewNotSupported(err error, msg string) error {
+	return &notSupported{wrap(err, msg, "")}
+}
+
+// IsNotSupported reports whether the error was created with
+// NotSupportedf() or NewNotSupported().
+func IsNotSupported(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notSupported)
+	return ok
+}
+
+// notValid represents an error when something is not valid.
+type notValid struct {
+	Err
+}
+
+// NotValidf returns an error which satisfies IsNotValid().
+func NotValidf(format string, args ...interface{}) error {
+	return &notValid{wrap(nil, format, " not valid", args...)}
+}
+
+// NewNotValid returns an error which wraps err and satisfies IsNotValid().
+func NewNotValid(err error, msg string) error {
+	return &notValid{wrap(err, msg, "")}
+}
+
+// IsNotValid reports whether the error was created with NotValidf() or
+// NewNotValid().
+func IsNotValid(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notValid)
+	return ok
+}
+
+// notProvisioned represents an error when something is not yet provisioned.
+type notProvisioned struct {
+	Err
+}
+
+// NotProvisionedf returns an error which satisfies IsNotProvisioned().
+func NotProvisionedf(format string, args ...interface{}) error {
+	return &notProvisioned{wrap(nil, format, " not provisioned", args...)}
+}
+
+// NewNotProvisioned returns an error which wraps err that satisfies
+// IsNotProvisioned().
+func NewNotProvisioned(err error, msg string) error {
+	return &notProvisioned{wrap(err, msg, "")}
+}
+
+// IsNotProvisioned reports whether err was created with NotProvisionedf() or
+// NewNotProvisioned().
+func IsNotProvisioned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notProvisioned)
+	return ok
+}
+
+// notAssigned represents an error when something is not yet assigned to
+// something else.
+type notAssigned struct {
+	Err
+}
+
+// NotAssignedf returns an error which satisfies IsNotAssigned().
+func NotAssignedf(format string, args ...interface{}) error {
+	return &notAssigned{wrap(nil, format, " not assigned", args...)}
+}
+
+// NewNotAssigned returns an error which wraps err that satisfies
+// IsNotAssigned().
+func NewNotAssigned(err error, msg string) error {
+	return &notAssigned{wrap(err, msg, "")}
+}
+
+// IsNotAssigned reports whether err was created with NotAssignedf() or
+// NewNotAssigned().
+func IsNotAssigned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notAssigned)
+	return ok
+}
+
+// badRequest represents an error when a request has bad parameters.
+type badRequest struct {
+	Err
+}
+
+// BadRequestf returns an error which satisfies IsBadRequest().
+func BadRequestf(format string, args ...interface{}) error {
+	return &badRequest{wrap(nil, format, "", args...)}
+}
+
+// NewBadRequest returns an error which wraps err that satisfies
+// IsBadRequest().
+func NewBadRequest(err error, msg string) error {
+	return &badRequest{wrap(err, msg, "")}
+}
+
+// IsBadRequest reports whether err was created with BadRequestf() or
+// NewBadRequest().
+func IsBadRequest(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*badRequest)
+	return ok
+}
+
+// methodNotAllowed represents an error when an HTTP request
+// is made with an inappropriate method.
+type methodNotAllowed struct {
+	Err
+}
+
+// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
+func MethodNotAllowedf(format string, args ...interface{}) error {
+	return &methodNotAllowed{wrap(nil, format, "", args...)}
+}
+
+// NewMethodNotAllowed returns an error which wraps err that satisfies
+// IsMethodNotAllowed().
+func NewMethodNotAllowed(err error, msg string) error {
+	return &methodNotAllowed{wrap(err, msg, "")}
+}
+
+// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
+// NewMethodNotAllowed().
+func IsMethodNotAllowed(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*methodNotAllowed)
+	return ok
+}
diff --git a/automation/vendor/github.com/juju/errors/functions.go b/automation/vendor/github.com/juju/errors/functions.go
new file mode 100644
index 0000000..994208d
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/functions.go
@@ -0,0 +1,330 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"strings"
+)
+
+// New is a drop in replacement for the standard libary errors module that records
+// the location that the error is created.
+//
+// For example:
+//    return errors.New("validation failed")
+//
+func New(message string) error {
+	err := &Err{message: message}
+	err.SetLocation(1)
+	return err
+}
+
+// Errorf creates a new annotated error and records the location that the
+// error is created.  This should be a drop in replacement for fmt.Errorf.
+//
+// For example:
+//    return errors.Errorf("validation failed: %s", message)
+//
+func Errorf(format string, args ...interface{}) error {
+	err := &Err{message: fmt.Sprintf(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// Trace adds the location of the Trace call to the stack.  The Cause of the
+// resulting error is the same as the error parameter.  If the other error is
+// nil, the result will be nil.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Trace(err)
+//   }
+//
+func Trace(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{previous: other, cause: Cause(other)}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotate is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotate(err, "failed to frombulate")
+//   }
+//
+func Annotate(other error, message string) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  message,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotatef is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotatef(err, "failed to frombulate the %s", arg)
+//   }
+//
+func Annotatef(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  fmt.Sprintf(format, args...),
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// DeferredAnnotatef annotates the given error (when it is not nil) with the given
+// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+// does nothing. This method is used in a defer statement in order to annotate any
+// resulting error with the same message.
+//
+// For example:
+//
+//    defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+//
+func DeferredAnnotatef(err *error, format string, args ...interface{}) {
+	if *err == nil {
+		return
+	}
+	newErr := &Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(*err),
+		previous: *err,
+	}
+	newErr.SetLocation(1)
+	*err = newErr
+}
+
+// Wrap changes the Cause of the error. The location of the Wrap call is also
+// stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       newErr := &packageError{"more context", private_value}
+//       return errors.Wrap(err, newErr)
+//   }
+//
+func Wrap(other, newDescriptive error) error {
+	err := &Err{
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Wrapf changes the Cause of the error, and adds an annotation. The location
+// of the Wrap call is also stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+//   }
+//
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask masks the given error with the given format string and arguments (like
+// fmt.Sprintf), returning a new error that maintains the error stack, but
+// hides the underlying error type.  The error string still contains the full
+// annotations. If you want to hide the annotations, call Wrap.
+func Maskf(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask hides the underlying error type, and records the location of the masking.
+func Mask(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Cause returns the cause of the given error.  This will be either the
+// original error, or the result of a Wrap or Mask call.
+//
+// Cause is the usual way to diagnose errors that may have been wrapped by
+// the other errors functions.
+func Cause(err error) error {
+	var diag error
+	if err, ok := err.(causer); ok {
+		diag = err.Cause()
+	}
+	if diag != nil {
+		return diag
+	}
+	return err
+}
+
+type causer interface {
+	Cause() error
+}
+
+type wrapper interface {
+	// Message returns the top level error message,
+	// not including the message from the Previous
+	// error.
+	Message() string
+
+	// Underlying returns the Previous error, or nil
+	// if there is none.
+	Underlying() error
+}
+
+type locationer interface {
+	Location() (string, int)
+}
+
+var (
+	_ wrapper    = (*Err)(nil)
+	_ locationer = (*Err)(nil)
+	_ causer     = (*Err)(nil)
+)
+
+// Details returns information about the stack of errors wrapped by err, in
+// the format:
+//
+// 	[{filename:99: error one} {otherfile:55: cause of error one}]
+//
+// This is a terse alternative to ErrorStack as it returns a single line.
+func Details(err error) string {
+	if err == nil {
+		return "[]"
+	}
+	var s []byte
+	s = append(s, '[')
+	for {
+		s = append(s, '{')
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			if file != "" {
+				s = append(s, fmt.Sprintf("%s:%d", file, line)...)
+				s = append(s, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			s = append(s, cerr.Message()...)
+			err = cerr.Underlying()
+		} else {
+			s = append(s, err.Error()...)
+			err = nil
+		}
+		s = append(s, '}')
+		if err == nil {
+			break
+		}
+		s = append(s, ' ')
+	}
+	s = append(s, ']')
+	return string(s)
+}
+
+// ErrorStack returns a string representation of the annotated error. If the
+// error passed as the parameter is not an annotated error, the result is
+// simply the result of the Error() method on that error.
+//
+// If the error is an annotated error, a multi-line string is returned where
+// each line represents one entry in the annotation stack. The full filename
+// from the call stack is used in the output.
+//
+//     first error
+//     github.com/juju/errors/annotation_test.go:193:
+//     github.com/juju/errors/annotation_test.go:194: annotation
+//     github.com/juju/errors/annotation_test.go:195:
+//     github.com/juju/errors/annotation_test.go:196: more context
+//     github.com/juju/errors/annotation_test.go:197:
+func ErrorStack(err error) string {
+	return strings.Join(errorStack(err), "\n")
+}
+
+func errorStack(err error) []string {
+	if err == nil {
+		return nil
+	}
+
+	// We want the first error first
+	var lines []string
+	for {
+		var buff []byte
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			// Strip off the leading GOPATH/src path elements.
+			file = trimGoPath(file)
+			if file != "" {
+				buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
+				buff = append(buff, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			message := cerr.Message()
+			buff = append(buff, message...)
+			// If there is a cause for this error, and it is different to the cause
+			// of the underlying error, then output the error string in the stack trace.
+			var cause error
+			if err1, ok := err.(causer); ok {
+				cause = err1.Cause()
+			}
+			err = cerr.Underlying()
+			if cause != nil && !sameError(Cause(err), cause) {
+				if message != "" {
+					buff = append(buff, ": "...)
+				}
+				buff = append(buff, cause.Error()...)
+			}
+		} else {
+			buff = append(buff, err.Error()...)
+			err = nil
+		}
+		lines = append(lines, string(buff))
+		if err == nil {
+			break
+		}
+	}
+	// reverse the lines to get the original error, which was at the end of
+	// the list, back to the start.
+	var result []string
+	for i := len(lines); i > 0; i-- {
+		result = append(result, lines[i-1])
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/errors/path.go b/automation/vendor/github.com/juju/errors/path.go
new file mode 100644
index 0000000..a7b726a
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/path.go
@@ -0,0 +1,38 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"runtime"
+	"strings"
+)
+
+// prefixSize is used internally to trim the user specific path from the
+// front of the returned filenames from the runtime call stack.
+var prefixSize int
+
+// goPath is the deduced path based on the location of this file as compiled.
+var goPath string
+
+func init() {
+	_, file, _, ok := runtime.Caller(0)
+	if file == "?" {
+		return
+	}
+	if ok {
+		// We know that the end of the file should be:
+		// github.com/juju/errors/path.go
+		size := len(file)
+		suffix := len("github.com/juju/errors/path.go")
+		goPath = file[:size-suffix]
+		prefixSize = len(goPath)
+	}
+}
+
+func trimGoPath(filename string) string {
+	if strings.HasPrefix(filename, goPath) {
+		return filename[prefixSize:]
+	}
+	return filename
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/LICENSE b/automation/vendor/github.com/juju/gomaasapi/LICENSE
new file mode 100644
index 0000000..d5836af
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2012-2016 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/gomaasapi/Makefile b/automation/vendor/github.com/juju/gomaasapi/Makefile
new file mode 100644
index 0000000..ea6cbb5
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/Makefile
@@ -0,0 +1,26 @@
+# Build, and run tests.
+check: examples
+	go test ./...
+
+example_source := $(wildcard example/*.go)
+example_binaries := $(patsubst %.go,%,$(example_source))
+
+# Clean up binaries.
+clean:
+	$(RM) $(example_binaries)
+
+# Reformat the source files to match our layout standards.
+format:
+	gofmt -w .
+
+# Invoke gofmt's "simplify" option to streamline the source code.
+simplify:
+	gofmt -w -s .
+
+# Build the examples (we have no tests for them).
+examples: $(example_binaries)
+
+%: %.go
+	go build -o $@ $<
+
+.PHONY: check clean format examples simplify
diff --git a/automation/vendor/github.com/juju/gomaasapi/README.rst b/automation/vendor/github.com/juju/gomaasapi/README.rst
new file mode 100644
index 0000000..c153cd3
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/README.rst
@@ -0,0 +1,12 @@
+.. -*- mode: rst -*-
+
+******************************
+MAAS API client library for Go
+******************************
+
+This library serves as a minimal client for communicating with the MAAS web
+API in Go programs.
+
+For more information see the `project homepage`_.
+
+.. _project homepage: https://github.com/juju/gomaasapi
diff --git a/automation/vendor/github.com/juju/gomaasapi/blockdevice.go b/automation/vendor/github.com/juju/gomaasapi/blockdevice.go
new file mode 100644
index 0000000..ad04f9d
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/blockdevice.go
@@ -0,0 +1,176 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type blockdevice struct {
+	resourceURI string
+
+	id      int
+	name    string
+	model   string
+	path    string
+	usedFor string
+	tags    []string
+
+	blockSize uint64
+	usedSize  uint64
+	size      uint64
+
+	partitions []*partition
+}
+
+// ID implements BlockDevice.
+func (b *blockdevice) ID() int {
+	return b.id
+}
+
+// Name implements BlockDevice.
+func (b *blockdevice) Name() string {
+	return b.name
+}
+
+// Model implements BlockDevice.
+func (b *blockdevice) Model() string {
+	return b.model
+}
+
+// Path implements BlockDevice.
+func (b *blockdevice) Path() string {
+	return b.path
+}
+
+// UsedFor implements BlockDevice.
+func (b *blockdevice) UsedFor() string {
+	return b.usedFor
+}
+
+// Tags implements BlockDevice.
+func (b *blockdevice) Tags() []string {
+	return b.tags
+}
+
+// BlockSize implements BlockDevice.
+func (b *blockdevice) BlockSize() uint64 {
+	return b.blockSize
+}
+
+// UsedSize implements BlockDevice.
+func (b *blockdevice) UsedSize() uint64 {
+	return b.usedSize
+}
+
+// Size implements BlockDevice.
+func (b *blockdevice) Size() uint64 {
+	return b.size
+}
+
+// Partitions implements BlockDevice.
+func (b *blockdevice) Partitions() []Partition {
+	result := make([]Partition, len(b.partitions))
+	for i, v := range b.partitions {
+		result[i] = v
+	}
+	return result
+}
+
+func readBlockDevices(controllerVersion version.Number, source interface{}) ([]*blockdevice, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range blockdeviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no blockdevice read func for version %s", controllerVersion)
+	}
+	readFunc := blockdeviceDeserializationFuncs[deserialisationVersion]
+	return readBlockDeviceList(valid, readFunc)
+}
+
+// readBlockDeviceList expects the values of the sourceList to be string maps.
+func readBlockDeviceList(sourceList []interface{}, readFunc blockdeviceDeserializationFunc) ([]*blockdevice, error) {
+	result := make([]*blockdevice, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for blockdevice %d, %T", i, value)
+		}
+		blockdevice, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "blockdevice %d", i)
+		}
+		result = append(result, blockdevice)
+	}
+	return result, nil
+}
+
+type blockdeviceDeserializationFunc func(map[string]interface{}) (*blockdevice, error)
+
+var blockdeviceDeserializationFuncs = map[version.Number]blockdeviceDeserializationFunc{
+	twoDotOh: blockdevice_2_0,
+}
+
+func blockdevice_2_0(source map[string]interface{}) (*blockdevice, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":       schema.ForceInt(),
+		"name":     schema.String(),
+		"model":    schema.OneOf(schema.Nil(""), schema.String()),
+		"path":     schema.String(),
+		"used_for": schema.String(),
+		"tags":     schema.List(schema.String()),
+
+		"block_size": schema.ForceUint(),
+		"used_size":  schema.ForceUint(),
+		"size":       schema.ForceUint(),
+
+		"partitions": schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	partitions, err := readPartitionList(valid["partitions"].([]interface{}), partition_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	model, _ := valid["model"].(string)
+	result := &blockdevice{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		model:   model,
+		path:    valid["path"].(string),
+		usedFor: valid["used_for"].(string),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		blockSize: valid["block_size"].(uint64),
+		usedSize:  valid["used_size"].(uint64),
+		size:      valid["size"].(uint64),
+
+		partitions: partitions,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/bootresource.go b/automation/vendor/github.com/juju/gomaasapi/bootresource.go
new file mode 100644
index 0000000..619a2a9
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/bootresource.go
@@ -0,0 +1,136 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+type bootResource struct {
+	// Add the controller in when we need to do things with the bootResource.
+	// controller Controller
+
+	resourceURI string
+
+	id           int
+	name         string
+	type_        string
+	architecture string
+	subArches    string
+	kernelFlavor string
+}
+
+// ID implements BootResource.
+func (b *bootResource) ID() int {
+	return b.id
+}
+
+// Name implements BootResource.
+func (b *bootResource) Name() string {
+	return b.name
+}
+
+// Name implements BootResource.
+func (b *bootResource) Type() string {
+	return b.type_
+}
+
+// Name implements BootResource.
+func (b *bootResource) Architecture() string {
+	return b.architecture
+}
+
+// SubArchitectures implements BootResource.
+func (b *bootResource) SubArchitectures() set.Strings {
+	return set.NewStrings(strings.Split(b.subArches, ",")...)
+}
+
+// KernelFlavor implements BootResource.
+func (b *bootResource) KernelFlavor() string {
+	return b.kernelFlavor
+}
+
+func readBootResources(controllerVersion version.Number, source interface{}) ([]*bootResource, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range bootResourceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no boot resource read func for version %s", controllerVersion)
+	}
+	readFunc := bootResourceDeserializationFuncs[deserialisationVersion]
+	return readBootResourceList(valid, readFunc)
+}
+
+// readBootResourceList expects the values of the sourceList to be string maps.
+func readBootResourceList(sourceList []interface{}, readFunc bootResourceDeserializationFunc) ([]*bootResource, error) {
+	result := make([]*bootResource, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for boot resource %d, %T", i, value)
+		}
+		bootResource, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "boot resource %d", i)
+		}
+		result = append(result, bootResource)
+	}
+	return result, nil
+}
+
+type bootResourceDeserializationFunc func(map[string]interface{}) (*bootResource, error)
+
+var bootResourceDeserializationFuncs = map[version.Number]bootResourceDeserializationFunc{
+	twoDotOh: bootResource_2_0,
+}
+
+func bootResource_2_0(source map[string]interface{}) (*bootResource, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"type":         schema.String(),
+		"architecture": schema.String(),
+		"subarches":    schema.String(),
+		"kflavor":      schema.String(),
+	}
+	defaults := schema.Defaults{
+		"subarches": "",
+		"kflavor":   "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &bootResource{
+		resourceURI:  valid["resource_uri"].(string),
+		id:           valid["id"].(int),
+		name:         valid["name"].(string),
+		type_:        valid["type"].(string),
+		architecture: valid["architecture"].(string),
+		subArches:    valid["subarches"].(string),
+		kernelFlavor: valid["kflavor"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/client.go b/automation/vendor/github.com/juju/gomaasapi/client.go
new file mode 100644
index 0000000..ef887e6
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/client.go
@@ -0,0 +1,314 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime/multipart"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/juju/errors"
+)
+
+const (
+	// Number of retries performed when the server returns a 503
+	// response with a 'Retry-after' header.  A request will be issued
+	// at most NumberOfRetries + 1 times.
+	NumberOfRetries = 4
+
+	RetryAfterHeaderName = "Retry-After"
+)
+
+// Client represents a way to communicating with a MAAS API instance.
+// It is stateless, so it can have concurrent requests in progress.
+type Client struct {
+	APIURL *url.URL
+	Signer OAuthSigner
+}
+
+// ServerError is an http error (or at least, a non-2xx result) received from
+// the server.  It contains the numerical HTTP status code as well as an error
+// string and the response's headers.
+type ServerError struct {
+	error
+	StatusCode  int
+	Header      http.Header
+	BodyMessage string
+}
+
+// GetServerError returns the ServerError from the cause of the error if it is a
+// ServerError, and also returns the bool to indicate if it was a ServerError or
+// not.
+func GetServerError(err error) (ServerError, bool) {
+	svrErr, ok := errors.Cause(err).(ServerError)
+	return svrErr, ok
+}
+
+// readAndClose reads and closes the given ReadCloser.
+//
+// Trying to read from a nil simply returns nil, no error.
+func readAndClose(stream io.ReadCloser) ([]byte, error) {
+	if stream == nil {
+		return nil, nil
+	}
+	defer stream.Close()
+	return ioutil.ReadAll(stream)
+}
+
+// dispatchRequest sends a request to the server, and interprets the response.
+// Client-side errors will return an empty response and a non-nil error.  For
+// server-side errors however (i.e. responses with a non 2XX status code), the
+// returned error will be ServerError and the returned body will reflect the
+// server's response.  If the server returns a 503 response with a 'Retry-after'
+// header, the request will be transparenty retried.
+func (client Client) dispatchRequest(request *http.Request) ([]byte, error) {
+	// First, store the request's body into a byte[] to be able to restore it
+	// after each request.
+	bodyContent, err := readAndClose(request.Body)
+	if err != nil {
+		return nil, err
+	}
+	for retry := 0; retry < NumberOfRetries; retry++ {
+		// Restore body before issuing request.
+		newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+		request.Body = newBody
+		body, err := client.dispatchSingleRequest(request)
+		// If this is a 503 response with a non-void "Retry-After" header: wait
+		// as instructed and retry the request.
+		if err != nil {
+			serverError, ok := errors.Cause(err).(ServerError)
+			if ok && serverError.StatusCode == http.StatusServiceUnavailable {
+				retry_time_int, errConv := strconv.Atoi(serverError.Header.Get(RetryAfterHeaderName))
+				if errConv == nil {
+					select {
+					case <-time.After(time.Duration(retry_time_int) * time.Second):
+					}
+					continue
+				}
+			}
+		}
+		return body, err
+	}
+	// Restore body before issuing request.
+	newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+	request.Body = newBody
+	return client.dispatchSingleRequest(request)
+}
+
+func (client Client) dispatchSingleRequest(request *http.Request) ([]byte, error) {
+	client.Signer.OAuthSign(request)
+	httpClient := http.Client{}
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	request.Close = true
+	response, err := httpClient.Do(request)
+	if err != nil {
+		return nil, err
+	}
+	body, err := readAndClose(response.Body)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode < 200 || response.StatusCode > 299 {
+		err := errors.Errorf("ServerError: %v (%s)", response.Status, body)
+		return body, errors.Trace(ServerError{error: err, StatusCode: response.StatusCode, Header: response.Header, BodyMessage: string(body)})
+	}
+	return body, nil
+}
+
+// GetURL returns the URL to a given resource on the API, based on its URI.
+// The resource URI may be absolute or relative; either way the result is a
+// full absolute URL including the network part.
+func (client Client) GetURL(uri *url.URL) *url.URL {
+	return client.APIURL.ResolveReference(uri)
+}
+
+// Get performs an HTTP "GET" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Get(uri *url.URL, operation string, parameters url.Values) ([]byte, error) {
+	if parameters == nil {
+		parameters = make(url.Values)
+	}
+	opParameter := parameters.Get("op")
+	if opParameter != "" {
+		msg := errors.Errorf("reserved parameter 'op' passed (with value '%s')", opParameter)
+		return nil, msg
+	}
+	if operation != "" {
+		parameters.Set("op", operation)
+	}
+	queryUrl := client.GetURL(uri)
+	queryUrl.RawQuery = parameters.Encode()
+	request, err := http.NewRequest("GET", queryUrl.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	return client.dispatchRequest(request)
+}
+
+// writeMultiPartFiles writes the given files as parts of a multipart message
+// using the given writer.
+func writeMultiPartFiles(writer *multipart.Writer, files map[string][]byte) error {
+	for fileName, fileContent := range files {
+
+		fw, err := writer.CreateFormFile(fileName, fileName)
+		if err != nil {
+			return err
+		}
+		io.Copy(fw, bytes.NewBuffer(fileContent))
+	}
+	return nil
+}
+
+// writeMultiPartParams writes the given parameters as parts of a multipart
+// message using the given writer.
+func writeMultiPartParams(writer *multipart.Writer, parameters url.Values) error {
+	for key, values := range parameters {
+		for _, value := range values {
+			fw, err := writer.CreateFormField(key)
+			if err != nil {
+				return err
+			}
+			buffer := bytes.NewBufferString(value)
+			io.Copy(fw, buffer)
+		}
+	}
+	return nil
+
+}
+
+// nonIdempotentRequestFiles implements the common functionality of PUT and
+// POST requests (but not GET or DELETE requests) when uploading files is
+// needed.
+func (client Client) nonIdempotentRequestFiles(method string, uri *url.URL, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	buf := new(bytes.Buffer)
+	writer := multipart.NewWriter(buf)
+	err := writeMultiPartFiles(writer, files)
+	if err != nil {
+		return nil, err
+	}
+	err = writeMultiPartParams(writer, parameters)
+	if err != nil {
+		return nil, err
+	}
+	writer.Close()
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), buf)
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", writer.FormDataContentType())
+	return client.dispatchRequest(request)
+
+}
+
+// nonIdempotentRequest implements the common functionality of PUT and POST
+// requests (but not GET or DELETE requests).
+func (client Client) nonIdempotentRequest(method string, uri *url.URL, parameters url.Values) ([]byte, error) {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), strings.NewReader(string(parameters.Encode())))
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	return client.dispatchRequest(request)
+}
+
+// Post performs an HTTP "POST" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Post(uri *url.URL, operation string, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	queryParams := url.Values{"op": {operation}}
+	uri.RawQuery = queryParams.Encode()
+	if files != nil {
+		return client.nonIdempotentRequestFiles("POST", uri, parameters, files)
+	}
+	return client.nonIdempotentRequest("POST", uri, parameters)
+}
+
+// Put updates an object on the API, using an HTTP "PUT" request.
+func (client Client) Put(uri *url.URL, parameters url.Values) ([]byte, error) {
+	return client.nonIdempotentRequest("PUT", uri, parameters)
+}
+
+// Delete deletes an object on the API, using an HTTP "DELETE" request.
+func (client Client) Delete(uri *url.URL) error {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest("DELETE", url.String(), strings.NewReader(""))
+	if err != nil {
+		return err
+	}
+	_, err = client.dispatchRequest(request)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Anonymous "signature method" implementation.
+type anonSigner struct{}
+
+func (signer anonSigner) OAuthSign(request *http.Request) error {
+	return nil
+}
+
+// *anonSigner implements the OAuthSigner interface.
+var _ OAuthSigner = anonSigner{}
+
+func composeAPIURL(BaseURL string, apiVersion string) (*url.URL, error) {
+	baseurl := EnsureTrailingSlash(BaseURL)
+	apiurl := fmt.Sprintf("%sapi/%s/", baseurl, apiVersion)
+	return url.Parse(apiurl)
+}
+
+// NewAnonymousClient creates a client that issues anonymous requests.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAnonymousClient(BaseURL string, apiVersion string) (*Client, error) {
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: &anonSigner{}, APIURL: parsedBaseURL}, nil
+}
+
+// NewAuthenticatedClient parses the given MAAS API key into the individual
+// OAuth tokens and creates an Client that will use these tokens to sign the
+// requests it issues.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAuthenticatedClient(BaseURL string, apiKey string, apiVersion string) (*Client, error) {
+	elements := strings.Split(apiKey, ":")
+	if len(elements) != 3 {
+		errString := fmt.Sprintf("invalid API key %q; expected \"<consumer secret>:<token key>:<token secret>\"", apiKey)
+		return nil, errors.NewNotValid(nil, errString)
+	}
+	token := &OAuthToken{
+		ConsumerKey: elements[0],
+		// The consumer secret is the empty string in MAAS' authentication.
+		ConsumerSecret: "",
+		TokenKey:       elements[1],
+		TokenSecret:    elements[2],
+	}
+	signer, err := NewPlainTestOAuthSigner(token, "MAAS API")
+	if err != nil {
+		return nil, err
+	}
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: signer, APIURL: parsedBaseURL}, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/controller.go b/automation/vendor/github.com/juju/gomaasapi/controller.go
new file mode 100644
index 0000000..3c729c2
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/controller.go
@@ -0,0 +1,890 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync/atomic"
+
+	"github.com/juju/errors"
+	"github.com/juju/loggo"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+var (
+	logger = loggo.GetLogger("maas")
+
+	// The supported versions should be ordered from most desirable version to
+	// least as they will be tried in order.
+	supportedAPIVersions = []string{"2.0"}
+
+	// Each of the api versions that change the request or response structure
+	// for any given call should have a value defined for easy definition of
+	// the deserialization functions.
+	twoDotOh = version.Number{Major: 2, Minor: 0}
+
+	// Current request number. Informational only for logging.
+	requestNumber int64
+)
+
+// ControllerArgs is an argument struct for passing the required parameters
+// to the NewController method.
+type ControllerArgs struct {
+	BaseURL string
+	APIKey  string
+}
+
+// NewController creates an authenticated client to the MAAS API, and checks
+// the capabilities of the server.
+//
+// If the APIKey is not valid, a NotValid error is returned.
+// If the credentials are incorrect, a PermissionError is returned.
+func NewController(args ControllerArgs) (Controller, error) {
+	// For now we don't need to test multiple versions. It is expected that at
+	// some time in the future, we will try the most up to date version and then
+	// work our way backwards.
+	for _, apiVersion := range supportedAPIVersions {
+		major, minor, err := version.ParseMajorMinor(apiVersion)
+		// We should not get an error here. See the test.
+		if err != nil {
+			return nil, errors.Errorf("bad version defined in supported versions: %q", apiVersion)
+		}
+		client, err := NewAuthenticatedClient(args.BaseURL, args.APIKey, apiVersion)
+		if err != nil {
+			// If the credentials aren't valid, return now.
+			if errors.IsNotValid(err) {
+				return nil, errors.Trace(err)
+			}
+			// Any other error attempting to create the authenticated client
+			// is an unexpected error and return now.
+			return nil, NewUnexpectedError(err)
+		}
+		controllerVersion := version.Number{
+			Major: major,
+			Minor: minor,
+		}
+		controller := &controller{client: client}
+		// The controllerVersion returned from the function will include any patch version.
+		controller.capabilities, controller.apiVersion, err = controller.readAPIVersion(controllerVersion)
+		if err != nil {
+			logger.Debugf("read version failed: %#v", err)
+			continue
+		}
+
+		if err := controller.checkCreds(); err != nil {
+			return nil, errors.Trace(err)
+		}
+		return controller, nil
+	}
+
+	return nil, NewUnsupportedVersionError("controller at %s does not support any of %s", args.BaseURL, supportedAPIVersions)
+}
+
+type controller struct {
+	client       *Client
+	apiVersion   version.Number
+	capabilities set.Strings
+}
+
+// Capabilities implements Controller.
+func (c *controller) Capabilities() set.Strings {
+	return c.capabilities
+}
+
+// BootResources implements Controller.
+func (c *controller) BootResources() ([]BootResource, error) {
+	source, err := c.get("boot-resources")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	resources, err := readBootResources(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []BootResource
+	for _, r := range resources {
+		result = append(result, r)
+	}
+	return result, nil
+}
+
+// Fabrics implements Controller.
+func (c *controller) Fabrics() ([]Fabric, error) {
+	source, err := c.get("fabrics")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	fabrics, err := readFabrics(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Fabric
+	for _, f := range fabrics {
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// Spaces implements Controller.
+func (c *controller) Spaces() ([]Space, error) {
+	source, err := c.get("spaces")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	spaces, err := readSpaces(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Space
+	for _, space := range spaces {
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+// Zones implements Controller.
+func (c *controller) Zones() ([]Zone, error) {
+	source, err := c.get("zones")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	zones, err := readZones(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Zone
+	for _, z := range zones {
+		result = append(result, z)
+	}
+	return result, nil
+}
+
+// DevicesArgs is a argument struct for selecting Devices.
+// Only devices that match the specified criteria are returned.
+type DevicesArgs struct {
+	Hostname     []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+}
+
+// Devices implements Controller.
+func (c *controller) Devices(args DevicesArgs) ([]Device, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostname)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	source, err := c.getQuery("devices", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	devices, err := readDevices(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, d := range devices {
+		d.controller = c
+		result = append(result, d)
+	}
+	return result, nil
+}
+
+// CreateDeviceArgs is a argument struct for passing information into CreateDevice.
+type CreateDeviceArgs struct {
+	Hostname     string
+	MACAddresses []string
+	Domain       string
+	Parent       string
+}
+
+// Devices implements Controller.
+func (c *controller) CreateDevice(args CreateDeviceArgs) (Device, error) {
+	// There must be at least one mac address.
+	if len(args.MACAddresses) == 0 {
+		return nil, NewBadRequestError("at least one MAC address must be specified")
+	}
+	params := NewURLParams()
+	params.MaybeAdd("hostname", args.Hostname)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAddMany("mac_addresses", args.MACAddresses)
+	params.MaybeAdd("parent", args.Parent)
+	result, err := c.post("devices", "", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, NewUnexpectedError(err)
+	}
+
+	device, err := readDevice(c.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	device.controller = c
+	return device, nil
+}
+
+// MachinesArgs is a argument struct for selecting Machines.
+// Only machines that match the specified criteria are returned.
+type MachinesArgs struct {
+	Hostnames    []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+	OwnerData    map[string]string
+}
+
+// Machines implements Controller.
+func (c *controller) Machines(args MachinesArgs) ([]Machine, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostnames)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	// At the moment the MAAS API doesn't support filtering by owner
+	// data so we do that ourselves below.
+	source, err := c.getQuery("machines", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	machines, err := readMachines(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Machine
+	for _, m := range machines {
+		m.controller = c
+		if ownerDataMatches(m.ownerData, args.OwnerData) {
+			result = append(result, m)
+		}
+	}
+	return result, nil
+}
+
+func ownerDataMatches(ownerData, filter map[string]string) bool {
+	for key, value := range filter {
+		if ownerData[key] != value {
+			return false
+		}
+	}
+	return true
+}
+
+// StorageSpec represents one element of storage constraints necessary
+// to be satisfied to allocate a machine.
+type StorageSpec struct {
+	// Label is optional and an arbitrary string. Labels need to be unique
+	// across the StorageSpec elements specified in the AllocateMachineArgs.
+	Label string
+	// Size is required and refers to the required minimum size in GB.
+	Size int
+	// Zero or more tags assocated to with the disks.
+	Tags []string
+}
+
+// Validate ensures that there is a positive size and that there are no Empty
+// tag values.
+func (s *StorageSpec) Validate() error {
+	if s.Size <= 0 {
+		return errors.NotValidf("Size value %d", s.Size)
+	}
+	for _, v := range s.Tags {
+		if v == "" {
+			return errors.NotValidf("empty tag")
+		}
+	}
+	return nil
+}
+
+// String returns the string representation of the storage spec.
+func (s *StorageSpec) String() string {
+	label := s.Label
+	if label != "" {
+		label += ":"
+	}
+	tags := strings.Join(s.Tags, ",")
+	if tags != "" {
+		tags = "(" + tags + ")"
+	}
+	return fmt.Sprintf("%s%d%s", label, s.Size, tags)
+}
+
+// InterfaceSpec represents one elemenet of network related constraints.
+type InterfaceSpec struct {
+	// Label is required and an arbitrary string. Labels need to be unique
+	// across the InterfaceSpec elements specified in the AllocateMachineArgs.
+	// The label is returned in the ConstraintMatches response from
+	// AllocateMachine.
+	Label string
+	Space string
+
+	// NOTE: there are other interface spec values that we are not exposing at
+	// this stage that can be added on an as needed basis. Other possible values are:
+	//     'fabric_class', 'not_fabric_class',
+	//     'subnet_cidr', 'not_subnet_cidr',
+	//     'vid', 'not_vid',
+	//     'fabric', 'not_fabric',
+	//     'subnet', 'not_subnet',
+	//     'mode'
+}
+
+// Validate ensures that a Label is specified and that there is at least one
+// Space or NotSpace value set.
+func (a *InterfaceSpec) Validate() error {
+	if a.Label == "" {
+		return errors.NotValidf("missing Label")
+	}
+	// Perhaps at some stage in the future there will be other possible specs
+	// supported (like vid, subnet, etc), but until then, just space to check.
+	if a.Space == "" {
+		return errors.NotValidf("empty Space constraint")
+	}
+	return nil
+}
+
+// String returns the interface spec as MaaS requires it.
+func (a *InterfaceSpec) String() string {
+	return fmt.Sprintf("%s:space=%s", a.Label, a.Space)
+}
+
+// AllocateMachineArgs is an argument struct for passing args into Machine.Allocate.
+type AllocateMachineArgs struct {
+	Hostname     string
+	Architecture string
+	MinCPUCount  int
+	// MinMemory represented in MB.
+	MinMemory int
+	Tags      []string
+	NotTags   []string
+	Zone      string
+	NotInZone []string
+	// Storage represents the required disks on the Machine. If any are specified
+	// the first value is used for the root disk.
+	Storage []StorageSpec
+	// Interfaces represents a number of required interfaces on the machine.
+	// Each InterfaceSpec relates to an individual network interface.
+	Interfaces []InterfaceSpec
+	// NotSpace is a machine level constraint, and applies to the entire machine
+	// rather than specific interfaces.
+	NotSpace  []string
+	AgentName string
+	Comment   string
+	DryRun    bool
+}
+
+// Validate makes sure that any labels specifed in Storage or Interfaces
+// are unique, and that the required specifications are valid.
+func (a *AllocateMachineArgs) Validate() error {
+	storageLabels := set.NewStrings()
+	for _, spec := range a.Storage {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Storage")
+		}
+		if spec.Label != "" {
+			if storageLabels.Contains(spec.Label) {
+				return errors.NotValidf("reusing storage label %q", spec.Label)
+			}
+			storageLabels.Add(spec.Label)
+		}
+	}
+	interfaceLabels := set.NewStrings()
+	for _, spec := range a.Interfaces {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Interfaces")
+		}
+		if interfaceLabels.Contains(spec.Label) {
+			return errors.NotValidf("reusing interface label %q", spec.Label)
+		}
+		interfaceLabels.Add(spec.Label)
+	}
+	for _, v := range a.NotSpace {
+		if v == "" {
+			return errors.NotValidf("empty NotSpace constraint")
+		}
+	}
+	return nil
+}
+
+func (a *AllocateMachineArgs) storage() string {
+	var values []string
+	for _, spec := range a.Storage {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ",")
+}
+
+func (a *AllocateMachineArgs) interfaces() string {
+	var values []string
+	for _, spec := range a.Interfaces {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ";")
+}
+
+func (a *AllocateMachineArgs) notSubnets() []string {
+	var values []string
+	for _, v := range a.NotSpace {
+		values = append(values, "space:"+v)
+	}
+	return values
+}
+
+// ConstraintMatches provides a way for the caller of AllocateMachine to determine
+//.how the allocated machine matched the storage and interfaces constraints specified.
+// The labels that were used in the constraints are the keys in the maps.
+type ConstraintMatches struct {
+	// Interface is a mapping of the constraint label specified to the Interfaces
+	// that match that constraint.
+	Interfaces map[string][]Interface
+
+	// Storage is a mapping of the constraint label specified to the BlockDevices
+	// that match that constraint.
+	Storage map[string][]BlockDevice
+}
+
+// AllocateMachine implements Controller.
+//
+// Returns an error that satisfies IsNoMatchError if the requested
+// constraints cannot be met.
+func (c *controller) AllocateMachine(args AllocateMachineArgs) (Machine, ConstraintMatches, error) {
+	var matches ConstraintMatches
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Hostname)
+	params.MaybeAdd("arch", args.Architecture)
+	params.MaybeAddInt("cpu_count", args.MinCPUCount)
+	params.MaybeAddInt("mem", args.MinMemory)
+	params.MaybeAddMany("tags", args.Tags)
+	params.MaybeAddMany("not_tags", args.NotTags)
+	params.MaybeAdd("storage", args.storage())
+	params.MaybeAdd("interfaces", args.interfaces())
+	params.MaybeAddMany("not_subnets", args.notSubnets())
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAddMany("not_in_zone", args.NotInZone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	params.MaybeAdd("comment", args.Comment)
+	params.MaybeAddBool("dry_run", args.DryRun)
+	result, err := c.post("machines", "allocate", params.Values)
+	if err != nil {
+		// A 409 Status code is "No Matching Machines"
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusConflict {
+				return nil, matches, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, matches, NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(c.apiVersion, result)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+	machine.controller = c
+
+	// Parse the constraint matches.
+	matches, err = parseAllocateConstraintsResponse(result, machine)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+
+	return machine, matches, nil
+}
+
+// ReleaseMachinesArgs is an argument struct for passing the machine system IDs
+// and an optional comment into the ReleaseMachines method.
+type ReleaseMachinesArgs struct {
+	SystemIDs []string
+	Comment   string
+}
+
+// ReleaseMachines implements Controller.
+//
+// Release multiple machines at once. Returns
+//  - BadRequestError if any of the machines cannot be found
+//  - PermissionError if the user does not have permission to release any of the machines
+//  - CannotCompleteError if any of the machines could not be released due to their current state
+func (c *controller) ReleaseMachines(args ReleaseMachinesArgs) error {
+	params := NewURLParams()
+	params.MaybeAddMany("machines", args.SystemIDs)
+	params.MaybeAdd("comment", args.Comment)
+	_, err := c.post("machines", "release", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusConflict:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	return nil
+}
+
+// Files implements Controller.
+func (c *controller) Files(prefix string) ([]File, error) {
+	params := NewURLParams()
+	params.MaybeAdd("prefix", prefix)
+	source, err := c.getQuery("files", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	files, err := readFiles(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []File
+	for _, f := range files {
+		f.controller = c
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// GetFile implements Controller.
+func (c *controller) GetFile(filename string) (File, error) {
+	if filename == "" {
+		return nil, errors.NotValidf("missing filename")
+	}
+	source, err := c.get("files/" + filename)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusNotFound {
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	file, err := readFile(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	file.controller = c
+	return file, nil
+}
+
+// AddFileArgs is a argument struct for passing information into AddFile.
+// One of Content or (Reader, Length) must be specified.
+type AddFileArgs struct {
+	Filename string
+	Content  []byte
+	Reader   io.Reader
+	Length   int64
+}
+
+// Validate checks to make sure the filename has no slashes, and that one of
+// Content or (Reader, Length) is specified.
+func (a *AddFileArgs) Validate() error {
+	dir, _ := path.Split(a.Filename)
+	if dir != "" {
+		return errors.NotValidf("paths in Filename %q", a.Filename)
+	}
+	if a.Filename == "" {
+		return errors.NotValidf("missing Filename")
+	}
+	if a.Content == nil {
+		if a.Reader == nil {
+			return errors.NotValidf("missing Content or Reader")
+		}
+		if a.Length == 0 {
+			return errors.NotValidf("missing Length")
+		}
+	} else {
+		if a.Reader != nil {
+			return errors.NotValidf("specifying Content and Reader")
+		}
+		if a.Length != 0 {
+			return errors.NotValidf("specifying Length and Content")
+		}
+	}
+	return nil
+}
+
+// AddFile implements Controller.
+func (c *controller) AddFile(args AddFileArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	fileContent := args.Content
+	if fileContent == nil {
+		content, err := ioutil.ReadAll(io.LimitReader(args.Reader, args.Length))
+		if err != nil {
+			return errors.Annotatef(err, "cannot read file content")
+		}
+		fileContent = content
+	}
+	params := url.Values{"filename": {args.Filename}}
+	_, err := c.postFile("files", "", params, fileContent)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) checkCreds() error {
+	if _, err := c.getOp("users", "whoami"); err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusUnauthorized {
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) put(path string, params url.Values) (interface{}, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: PUT %s%s, params: %s", requestID, c.client.APIURL, path, params.Encode())
+	bytes, err := c.client.Put(&url.URL{Path: path}, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) post(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._postRaw(path, op, params, nil)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) postFile(path, op string, params url.Values, fileContent []byte) (interface{}, error) {
+	// Only one file is ever sent at a time.
+	files := map[string][]byte{"file": fileContent}
+	return c._postRaw(path, op, params, files)
+}
+
+func (c *controller) _postRaw(path, op string, params url.Values, files map[string][]byte) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		opArg := ""
+		if op != "" {
+			opArg = "?op=" + op
+		}
+		logger.Tracef("request %x: POST %s%s%s, params=%s", requestID, c.client.APIURL, path, opArg, params.Encode())
+	}
+	bytes, err := c.client.Post(&url.URL{Path: path}, op, params, files)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func (c *controller) delete(path string) error {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: DELETE %s%s", requestID, c.client.APIURL, path)
+	err := c.client.Delete(&url.URL{Path: path})
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return errors.Trace(err)
+	}
+	logger.Tracef("response %x: complete", requestID)
+	return nil
+}
+
+func (c *controller) getQuery(path string, params url.Values) (interface{}, error) {
+	return c._get(path, "", params)
+}
+
+func (c *controller) get(path string) (interface{}, error) {
+	return c._get(path, "", nil)
+}
+
+func (c *controller) getOp(path, op string) (interface{}, error) {
+	return c._get(path, op, nil)
+}
+
+func (c *controller) _get(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._getRaw(path, op, params)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) _getRaw(path, op string, params url.Values) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		var query string
+		if params != nil {
+			query = "?" + params.Encode()
+		}
+		logger.Tracef("request %x: GET %s%s%s", requestID, c.client.APIURL, path, query)
+	}
+	bytes, err := c.client.Get(&url.URL{Path: path}, op, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func nextRequestID() int64 {
+	return atomic.AddInt64(&requestNumber, 1)
+}
+
+func (c *controller) readAPIVersion(apiVersion version.Number) (set.Strings, version.Number, error) {
+	parsed, err := c.get("version")
+	if err != nil {
+		return nil, apiVersion, errors.Trace(err)
+	}
+
+	// As we care about other fields, add them.
+	fields := schema.Fields{
+		"capabilities": schema.List(schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(parsed, nil)
+	if err != nil {
+		return nil, apiVersion, WrapWithDeserializationError(err, "version response")
+	}
+	// For now, we don't append any subversion, but as it becomes used, we
+	// should parse and check.
+
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	capabilities := set.NewStrings()
+	capabilityValues := valid["capabilities"].([]interface{})
+	for _, value := range capabilityValues {
+		capabilities.Add(value.(string))
+	}
+
+	return capabilities, apiVersion, nil
+}
+
+func parseAllocateConstraintsResponse(source interface{}, machine *machine) (ConstraintMatches, error) {
+	var empty ConstraintMatches
+	matchFields := schema.Fields{
+		"storage":    schema.StringMap(schema.List(schema.ForceInt())),
+		"interfaces": schema.StringMap(schema.List(schema.ForceInt())),
+	}
+	matchDefaults := schema.Defaults{
+		"storage":    schema.Omit,
+		"interfaces": schema.Omit,
+	}
+	fields := schema.Fields{
+		"constraints_by_type": schema.FieldMap(matchFields, matchDefaults),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return empty, WrapWithDeserializationError(err, "allocation constraints response schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	constraintsMap := valid["constraints_by_type"].(map[string]interface{})
+	result := ConstraintMatches{
+		Interfaces: make(map[string][]Interface),
+		Storage:    make(map[string][]BlockDevice),
+	}
+
+	if interfaceMatches, found := constraintsMap["interfaces"]; found {
+		matches := convertConstraintMatches(interfaceMatches)
+		for label, ids := range matches {
+			interfaces := make([]Interface, len(ids))
+			for index, id := range ids {
+				iface := machine.Interface(id)
+				if iface == nil {
+					return empty, NewDeserializationError("constraint match interface %q: %d does not match an interface for the machine", label, id)
+				}
+				interfaces[index] = iface
+			}
+			result.Interfaces[label] = interfaces
+		}
+	}
+
+	if storageMatches, found := constraintsMap["storage"]; found {
+		matches := convertConstraintMatches(storageMatches)
+		for label, ids := range matches {
+			blockDevices := make([]BlockDevice, len(ids))
+			for index, id := range ids {
+				blockDevice := machine.PhysicalBlockDevice(id)
+				if blockDevice == nil {
+					return empty, NewDeserializationError("constraint match storage %q: %d does not match a physical block device for the machine", label, id)
+				}
+				blockDevices[index] = blockDevice
+			}
+			result.Storage[label] = blockDevices
+		}
+	}
+	return result, nil
+}
+
+func convertConstraintMatches(source interface{}) map[string][]int {
+	// These casts are all safe because of the schema check.
+	result := make(map[string][]int)
+	matchMap := source.(map[string]interface{})
+	for label, values := range matchMap {
+		items := values.([]interface{})
+		result[label] = make([]int, len(items))
+		for index, value := range items {
+			result[label][index] = value.(int)
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv b/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv
new file mode 100644
index 0000000..4cd966f
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv
@@ -0,0 +1,11 @@
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/loggo	git	8477fc936adf0e382d680310047ca27e128a309a	2015-05-27T03:58:39Z
+github.com/juju/names	git	8a0aa0963bbacdc790914892e9ff942e94d6f795	2016-03-30T15:05:33Z
+github.com/juju/schema	git	075de04f9b7d7580d60a1e12a0b3f50bb18e6998	2016-04-20T04:42:03Z
+github.com/juju/testing	git	162fafccebf20a4207ab93d63b986c230e3f4d2e	2016-04-04T09:43:17Z
+github.com/juju/utils	git	eb6cb958762135bb61aed1e0951f657c674d427f	2016-04-11T02:40:59Z
+github.com/juju/version	git	ef897ad7f130870348ce306f61332f5335355063	2015-11-27T20:34:00Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/automation/vendor/github.com/juju/gomaasapi/device.go b/automation/vendor/github.com/juju/gomaasapi/device.go
new file mode 100644
index 0000000..7c9bc70
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/device.go
@@ -0,0 +1,293 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type device struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID string
+	hostname string
+	fqdn     string
+
+	parent string
+	owner  string
+
+	ipAddresses  []string
+	interfaceSet []*interface_
+	zone         *zone
+}
+
+// SystemID implements Device.
+func (d *device) SystemID() string {
+	return d.systemID
+}
+
+// Hostname implements Device.
+func (d *device) Hostname() string {
+	return d.hostname
+}
+
+// FQDN implements Device.
+func (d *device) FQDN() string {
+	return d.fqdn
+}
+
+// Parent implements Device.
+func (d *device) Parent() string {
+	return d.parent
+}
+
+// Owner implements Device.
+func (d *device) Owner() string {
+	return d.owner
+}
+
+// IPAddresses implements Device.
+func (d *device) IPAddresses() []string {
+	return d.ipAddresses
+}
+
+// Zone implements Device.
+func (d *device) Zone() Zone {
+	if d.zone == nil {
+		return nil
+	}
+	return d.zone
+}
+
+// InterfaceSet implements Device.
+func (d *device) InterfaceSet() []Interface {
+	result := make([]Interface, len(d.interfaceSet))
+	for i, v := range d.interfaceSet {
+		v.controller = d.controller
+		result[i] = v
+	}
+	return result
+}
+
+// CreateInterfaceArgs is an argument struct for passing parameters to
+// the Machine.CreateInterface method.
+type CreateInterfaceArgs struct {
+	// Name of the interface (required).
+	Name string
+	// MACAddress is the MAC address of the interface (required).
+	MACAddress string
+	// VLAN is the untagged VLAN the interface is connected to (required).
+	VLAN VLAN
+	// Tags to attach to the interface (optional).
+	Tags []string
+	// MTU - Maximum transmission unit. (optional)
+	MTU int
+	// AcceptRA - Accept router advertisements. (IPv6 only)
+	AcceptRA bool
+	// Autoconf - Perform stateless autoconfiguration. (IPv6 only)
+	Autoconf bool
+}
+
+// Validate checks the required fields are set for the arg structure.
+func (a *CreateInterfaceArgs) Validate() error {
+	if a.Name == "" {
+		return errors.NotValidf("missing Name")
+	}
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+	if a.VLAN == nil {
+		return errors.NotValidf("missing VLAN")
+	}
+	return nil
+}
+
+// interfacesURI used to add interfaces for this device. The operations
+// are on the nodes endpoint, not devices.
+func (d *device) interfacesURI() string {
+	return strings.Replace(d.resourceURI, "devices", "nodes", 1) + "interfaces/"
+}
+
+// CreateInterface implements Device.
+func (d *device) CreateInterface(args CreateInterfaceArgs) (Interface, error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("name", args.Name)
+	params.Values.Add("mac_address", args.MACAddress)
+	params.Values.Add("vlan", fmt.Sprint(args.VLAN.ID()))
+	params.MaybeAdd("tags", strings.Join(args.Tags, ","))
+	params.MaybeAddInt("mtu", args.MTU)
+	params.MaybeAddBool("accept_ra", args.AcceptRA)
+	params.MaybeAddBool("autoconf", args.Autoconf)
+	result, err := d.controller.post(d.interfacesURI(), "create_physical", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return nil, errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+
+	iface, err := readInterface(d.controller.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	iface.controller = d.controller
+
+	// TODO: add to the interfaces for the device when the interfaces are returned.
+	// lp:bug 1567213.
+	return iface, nil
+}
+
+// Delete implements Device.
+func (d *device) Delete() error {
+	err := d.controller.delete(d.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func readDevice(controllerVersion version.Number, source interface{}) (*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readDevices(controllerVersion version.Number, source interface{}) ([]*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readDeviceList(valid, readFunc)
+}
+
+func getDeviceDeserializationFunc(controllerVersion version.Number) (deviceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range deviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no device read func for version %s", controllerVersion)
+	}
+	return deviceDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readDeviceList expects the values of the sourceList to be string maps.
+func readDeviceList(sourceList []interface{}, readFunc deviceDeserializationFunc) ([]*device, error) {
+	result := make([]*device, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for device %d, %T", i, value)
+		}
+		device, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "device %d", i)
+		}
+		result = append(result, device)
+	}
+	return result, nil
+}
+
+type deviceDeserializationFunc func(map[string]interface{}) (*device, error)
+
+var deviceDeserializationFuncs = map[version.Number]deviceDeserializationFunc{
+	twoDotOh: device_2_0,
+}
+
+func device_2_0(source map[string]interface{}) (*device, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id": schema.String(),
+		"hostname":  schema.String(),
+		"fqdn":      schema.String(),
+		"parent":    schema.OneOf(schema.Nil(""), schema.String()),
+		"owner":     schema.OneOf(schema.Nil(""), schema.String()),
+
+		"ip_addresses":  schema.List(schema.String()),
+		"interface_set": schema.List(schema.StringMap(schema.Any())),
+		"zone":          schema.StringMap(schema.Any()),
+	}
+	defaults := schema.Defaults{
+		"owner":  "",
+		"parent": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	owner, _ := valid["owner"].(string)
+	parent, _ := valid["parent"].(string)
+	result := &device{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID: valid["system_id"].(string),
+		hostname: valid["hostname"].(string),
+		fqdn:     valid["fqdn"].(string),
+		parent:   parent,
+		owner:    owner,
+
+		ipAddresses:  convertToStringSlice(valid["ip_addresses"]),
+		interfaceSet: interfaceSet,
+		zone:         zone,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/enum.go b/automation/vendor/github.com/juju/gomaasapi/enum.go
new file mode 100644
index 0000000..a516d6b
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/enum.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+const (
+	// NodeStatus* values represent the vocabulary of a Node‘s possible statuses.
+
+	// The node has been created and has a system ID assigned to it.
+	NodeStatusDeclared = "0"
+
+	//Testing and other commissioning steps are taking place.
+	NodeStatusCommissioning = "1"
+
+	// Smoke or burn-in testing has a found a problem.
+	NodeStatusFailedTests = "2"
+
+	// The node can’t be contacted.
+	NodeStatusMissing = "3"
+
+	// The node is in the general pool ready to be deployed.
+	NodeStatusReady = "4"
+
+	// The node is ready for named deployment.
+	NodeStatusReserved = "5"
+
+	// The node is powering a service from a charm or is ready for use with a fresh Ubuntu install.
+	NodeStatusDeployed = "6"
+
+	// The node has been removed from service manually until an admin overrides the retirement.
+	NodeStatusRetired = "7"
+
+	// The node is broken: a step in the node lifecyle failed. More details
+	// can be found in the node's event log.
+	NodeStatusBroken = "8"
+
+	// The node is being installed.
+	NodeStatusDeploying = "9"
+
+	// The node has been allocated to a user and is ready for deployment.
+	NodeStatusAllocated = "10"
+
+	// The deployment of the node failed.
+	NodeStatusFailedDeployment = "11"
+
+	// The node is powering down after a release request.
+	NodeStatusReleasing = "12"
+
+	// The releasing of the node failed.
+	NodeStatusFailedReleasing = "13"
+
+	// The node is erasing its disks.
+	NodeStatusDiskErasing = "14"
+
+	// The node failed to erase its disks.
+	NodeStatusFailedDiskErasing = "15"
+)
diff --git a/automation/vendor/github.com/juju/gomaasapi/errors.go b/automation/vendor/github.com/juju/gomaasapi/errors.go
new file mode 100644
index 0000000..8931d56
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/errors.go
@@ -0,0 +1,161 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+)
+
+// NoMatchError is returned when the requested action cannot be performed
+// due to being unable to service due to no entities available that match the
+// request.
+type NoMatchError struct {
+	errors.Err
+}
+
+// NewNoMatchError constructs a new NoMatchError and sets the location.
+func NewNoMatchError(message string) error {
+	err := &NoMatchError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsNoMatchError returns true if err is a NoMatchError.
+func IsNoMatchError(err error) bool {
+	_, ok := errors.Cause(err).(*NoMatchError)
+	return ok
+}
+
+// UnexpectedError is an error for a condition that hasn't been determined.
+type UnexpectedError struct {
+	errors.Err
+}
+
+// NewUnexpectedError constructs a new UnexpectedError and sets the location.
+func NewUnexpectedError(err error) error {
+	uerr := &UnexpectedError{Err: errors.NewErr("unexpected: %v", err)}
+	uerr.SetLocation(1)
+	return errors.Wrap(err, uerr)
+}
+
+// IsUnexpectedError returns true if err is an UnexpectedError.
+func IsUnexpectedError(err error) bool {
+	_, ok := errors.Cause(err).(*UnexpectedError)
+	return ok
+}
+
+// UnsupportedVersionError refers to calls made to an unsupported api version.
+type UnsupportedVersionError struct {
+	errors.Err
+}
+
+// NewUnsupportedVersionError constructs a new UnsupportedVersionError and sets the location.
+func NewUnsupportedVersionError(format string, args ...interface{}) error {
+	err := &UnsupportedVersionError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsUnsupportedVersionError returns true if err is an UnsupportedVersionError.
+func IsUnsupportedVersionError(err error) bool {
+	_, ok := errors.Cause(err).(*UnsupportedVersionError)
+	return ok
+}
+
+// DeserializationError types are returned when the returned JSON data from
+// the controller doesn't match the code's expectations.
+type DeserializationError struct {
+	errors.Err
+}
+
+// NewDeserializationError constructs a new DeserializationError and sets the location.
+func NewDeserializationError(format string, args ...interface{}) error {
+	err := &DeserializationError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// WrapWithDeserializationError constructs a new DeserializationError with the
+// specified message, and sets the location and returns a new error with the
+// full error stack set including the error passed in.
+func WrapWithDeserializationError(err error, format string, args ...interface{}) error {
+	message := fmt.Sprintf(format, args...)
+	// We want the deserialization error message to include the error text of the
+	// previous error, but wrap it in the new type.
+	derr := &DeserializationError{Err: errors.NewErr(message + ": " + err.Error())}
+	derr.SetLocation(1)
+	wrapped := errors.Wrap(err, derr)
+	// We want the location of the wrapped error to be the caller of this function,
+	// not the line above.
+	if errType, ok := wrapped.(*errors.Err); ok {
+		// We know it is because that is what Wrap returns.
+		errType.SetLocation(1)
+	}
+	return wrapped
+}
+
+// IsDeserializationError returns true if err is a DeserializationError.
+func IsDeserializationError(err error) bool {
+	_, ok := errors.Cause(err).(*DeserializationError)
+	return ok
+}
+
+// BadRequestError is returned when the requested action cannot be performed
+// due to bad or incorrect parameters passed to the server.
+type BadRequestError struct {
+	errors.Err
+}
+
+// NewBadRequestError constructs a new BadRequestError and sets the location.
+func NewBadRequestError(message string) error {
+	err := &BadRequestError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsBadRequestError returns true if err is a NoMatchError.
+func IsBadRequestError(err error) bool {
+	_, ok := errors.Cause(err).(*BadRequestError)
+	return ok
+}
+
+// PermissionError is returned when the user does not have permission to do the
+// requested action.
+type PermissionError struct {
+	errors.Err
+}
+
+// NewPermissionError constructs a new PermissionError and sets the location.
+func NewPermissionError(message string) error {
+	err := &PermissionError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsPermissionError returns true if err is a NoMatchError.
+func IsPermissionError(err error) bool {
+	_, ok := errors.Cause(err).(*PermissionError)
+	return ok
+}
+
+// CannotCompleteError is returned when the requested action is unable to
+// complete for some server side reason.
+type CannotCompleteError struct {
+	errors.Err
+}
+
+// NewCannotCompleteError constructs a new CannotCompleteError and sets the location.
+func NewCannotCompleteError(message string) error {
+	err := &CannotCompleteError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsCannotCompleteError returns true if err is a NoMatchError.
+func IsCannotCompleteError(err error) bool {
+	_, ok := errors.Cause(err).(*CannotCompleteError)
+	return ok
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/fabric.go b/automation/vendor/github.com/juju/gomaasapi/fabric.go
new file mode 100644
index 0000000..e38a61a
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/fabric.go
@@ -0,0 +1,128 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type fabric struct {
+	// Add the controller in when we need to do things with the fabric.
+	// controller Controller
+
+	resourceURI string
+
+	id        int
+	name      string
+	classType string
+
+	vlans []*vlan
+}
+
+// ID implements Fabric.
+func (f *fabric) ID() int {
+	return f.id
+}
+
+// Name implements Fabric.
+func (f *fabric) Name() string {
+	return f.name
+}
+
+// ClassType implements Fabric.
+func (f *fabric) ClassType() string {
+	return f.classType
+}
+
+// VLANs implements Fabric.
+func (f *fabric) VLANs() []VLAN {
+	var result []VLAN
+	for _, v := range f.vlans {
+		result = append(result, v)
+	}
+	return result
+}
+
+func readFabrics(controllerVersion version.Number, source interface{}) ([]*fabric, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range fabricDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no fabric read func for version %s", controllerVersion)
+	}
+	readFunc := fabricDeserializationFuncs[deserialisationVersion]
+	return readFabricList(valid, readFunc)
+}
+
+// readFabricList expects the values of the sourceList to be string maps.
+func readFabricList(sourceList []interface{}, readFunc fabricDeserializationFunc) ([]*fabric, error) {
+	result := make([]*fabric, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for fabric %d, %T", i, value)
+		}
+		fabric, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "fabric %d", i)
+		}
+		result = append(result, fabric)
+	}
+	return result, nil
+}
+
+type fabricDeserializationFunc func(map[string]interface{}) (*fabric, error)
+
+var fabricDeserializationFuncs = map[version.Number]fabricDeserializationFunc{
+	twoDotOh: fabric_2_0,
+}
+
+func fabric_2_0(source map[string]interface{}) (*fabric, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"class_type":   schema.OneOf(schema.Nil(""), schema.String()),
+		"vlans":        schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlans, err := readVLANList(valid["vlans"].([]interface{}), vlan_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the class_type is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	classType, _ := valid["class_type"].(string)
+
+	result := &fabric{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		classType:   classType,
+		vlans:       vlans,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/file.go b/automation/vendor/github.com/juju/gomaasapi/file.go
new file mode 100644
index 0000000..63fb854
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/file.go
@@ -0,0 +1,181 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/base64"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type file struct {
+	controller *controller
+
+	resourceURI  string
+	filename     string
+	anonymousURI *url.URL
+	content      string
+}
+
+// Filename implements File.
+func (f *file) Filename() string {
+	return f.filename
+}
+
+// AnonymousURL implements File.
+func (f *file) AnonymousURL() string {
+	url := f.controller.client.GetURL(f.anonymousURI)
+	return url.String()
+}
+
+// Delete implements File.
+func (f *file) Delete() error {
+	err := f.controller.delete(f.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// ReadAll implements File.
+func (f *file) ReadAll() ([]byte, error) {
+	if f.content == "" {
+		return f.readFromServer()
+	}
+	bytes, err := base64.StdEncoding.DecodeString(f.content)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func (f *file) readFromServer() ([]byte, error) {
+	// If the content is available, it is base64 encoded, so
+	args := make(url.Values)
+	args.Add("filename", f.filename)
+	bytes, err := f.controller._getRaw("files", "get", args)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func readFiles(controllerVersion version.Number, source interface{}) ([]*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readFileList(valid, readFunc)
+}
+
+func readFile(controllerVersion version.Number, source interface{}) (*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func getFileDeserializationFunc(controllerVersion version.Number) (fileDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range fileDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no file read func for version %s", controllerVersion)
+	}
+	return fileDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readFileList expects the values of the sourceList to be string maps.
+func readFileList(sourceList []interface{}, readFunc fileDeserializationFunc) ([]*file, error) {
+	result := make([]*file, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for file %d, %T", i, value)
+		}
+		file, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "file %d", i)
+		}
+		result = append(result, file)
+	}
+	return result, nil
+}
+
+type fileDeserializationFunc func(map[string]interface{}) (*file, error)
+
+var fileDeserializationFuncs = map[version.Number]fileDeserializationFunc{
+	twoDotOh: file_2_0,
+}
+
+func file_2_0(source map[string]interface{}) (*file, error) {
+	fields := schema.Fields{
+		"resource_uri":      schema.String(),
+		"filename":          schema.String(),
+		"anon_resource_uri": schema.String(),
+		"content":           schema.String(),
+	}
+	defaults := schema.Defaults{
+		"content": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	anonURI, err := url.ParseRequestURI(valid["anon_resource_uri"].(string))
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+
+	result := &file{
+		resourceURI:  valid["resource_uri"].(string),
+		filename:     valid["filename"].(string),
+		anonymousURI: anonURI,
+		content:      valid["content"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/filesystem.go b/automation/vendor/github.com/juju/gomaasapi/filesystem.go
new file mode 100644
index 0000000..4514e52
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/filesystem.go
@@ -0,0 +1,69 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/schema"
+
+type filesystem struct {
+	fstype     string
+	mountPoint string
+	label      string
+	uuid       string
+	// no idea what the mount_options are as a value type, so ignoring for now.
+}
+
+// Type implements FileSystem.
+func (f *filesystem) Type() string {
+	return f.fstype
+}
+
+// MountPoint implements FileSystem.
+func (f *filesystem) MountPoint() string {
+	return f.mountPoint
+}
+
+// Label implements FileSystem.
+func (f *filesystem) Label() string {
+	return f.label
+}
+
+// UUID implements FileSystem.
+func (f *filesystem) UUID() string {
+	return f.uuid
+}
+
+// There is no need for controller based parsing of filesystems until we need it.
+// Currently the filesystem reading is only called by the Partition parsing.
+
+func filesystem2_0(source map[string]interface{}) (*filesystem, error) {
+	fields := schema.Fields{
+		"fstype":      schema.String(),
+		"mount_point": schema.OneOf(schema.Nil(""), schema.String()),
+		"label":       schema.OneOf(schema.Nil(""), schema.String()),
+		"uuid":        schema.String(),
+		// TODO: mount_options when we know the type (note it can be
+		// nil).
+	}
+	defaults := schema.Defaults{
+		"mount_point": "",
+		"label":       "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "filesystem 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	mount_point, _ := valid["mount_point"].(string)
+	label, _ := valid["label"].(string)
+	result := &filesystem{
+		fstype:     valid["fstype"].(string),
+		mountPoint: mount_point,
+		label:      label,
+		uuid:       valid["uuid"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go b/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go
new file mode 100644
index 0000000..f457e29
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go
@@ -0,0 +1,4 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
diff --git a/automation/vendor/github.com/juju/gomaasapi/interface.go b/automation/vendor/github.com/juju/gomaasapi/interface.go
new file mode 100644
index 0000000..f30a9a8
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/interface.go
@@ -0,0 +1,440 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+// Can't use interface as a type, so add an underscore. Yay.
+type interface_ struct {
+	controller *controller
+
+	resourceURI string
+
+	id      int
+	name    string
+	type_   string
+	enabled bool
+	tags    []string
+
+	vlan  *vlan
+	links []*link
+
+	macAddress   string
+	effectiveMTU int
+
+	parents  []string
+	children []string
+}
+
+func (i *interface_) updateFrom(other *interface_) {
+	i.resourceURI = other.resourceURI
+	i.id = other.id
+	i.name = other.name
+	i.type_ = other.type_
+	i.enabled = other.enabled
+	i.tags = other.tags
+	i.vlan = other.vlan
+	i.links = other.links
+	i.macAddress = other.macAddress
+	i.effectiveMTU = other.effectiveMTU
+	i.parents = other.parents
+	i.children = other.children
+}
+
+// ID implements Interface.
+func (i *interface_) ID() int {
+	return i.id
+}
+
+// Name implements Interface.
+func (i *interface_) Name() string {
+	return i.name
+}
+
+// Parents implements Interface.
+func (i *interface_) Parents() []string {
+	return i.parents
+}
+
+// Children implements Interface.
+func (i *interface_) Children() []string {
+	return i.children
+}
+
+// Type implements Interface.
+func (i *interface_) Type() string {
+	return i.type_
+}
+
+// Enabled implements Interface.
+func (i *interface_) Enabled() bool {
+	return i.enabled
+}
+
+// Tags implements Interface.
+func (i *interface_) Tags() []string {
+	return i.tags
+}
+
+// VLAN implements Interface.
+func (i *interface_) VLAN() VLAN {
+	if i.vlan == nil {
+		return nil
+	}
+	return i.vlan
+}
+
+// Links implements Interface.
+func (i *interface_) Links() []Link {
+	result := make([]Link, len(i.links))
+	for i, link := range i.links {
+		result[i] = link
+	}
+	return result
+}
+
+// MACAddress implements Interface.
+func (i *interface_) MACAddress() string {
+	return i.macAddress
+}
+
+// EffectiveMTU implements Interface.
+func (i *interface_) EffectiveMTU() int {
+	return i.effectiveMTU
+}
+
+// UpdateInterfaceArgs is an argument struct for calling Interface.Update.
+type UpdateInterfaceArgs struct {
+	Name       string
+	MACAddress string
+	VLAN       VLAN
+}
+
+func (a *UpdateInterfaceArgs) vlanID() int {
+	if a.VLAN == nil {
+		return 0
+	}
+	return a.VLAN.ID()
+}
+
+// Update implements Interface.
+func (i *interface_) Update(args UpdateInterfaceArgs) error {
+	var empty UpdateInterfaceArgs
+	if args == empty {
+		return nil
+	}
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Name)
+	params.MaybeAdd("mac_address", args.MACAddress)
+	params.MaybeAddInt("vlan", args.vlanID())
+	source, err := i.controller.put(i.resourceURI, params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+// Delete implements Interface.
+func (i *interface_) Delete() error {
+	err := i.controller.delete(i.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// InterfaceLinkMode is the type of the various link mode constants used for
+// LinkSubnetArgs.
+type InterfaceLinkMode string
+
+const (
+	// LinkModeDHCP - Bring the interface up with DHCP on the given subnet. Only
+	// one subnet can be set to DHCP. If the subnet is managed this interface
+	// will pull from the dynamic IP range.
+	LinkModeDHCP InterfaceLinkMode = "DHCP"
+
+	// LinkModeStatic - Bring the interface up with a STATIC IP address on the
+	// given subnet. Any number of STATIC links can exist on an interface.
+	LinkModeStatic InterfaceLinkMode = "STATIC"
+
+	// LinkModeLinkUp - Bring the interface up only on the given subnet. No IP
+	// address will be assigned to this interface. The interface cannot have any
+	// current DHCP or STATIC links.
+	LinkModeLinkUp InterfaceLinkMode = "LINK_UP"
+)
+
+// LinkSubnetArgs is an argument struct for passing parameters to
+// the Interface.LinkSubnet method.
+type LinkSubnetArgs struct {
+	// Mode is used to describe how the address is provided for the Link.
+	// Required field.
+	Mode InterfaceLinkMode
+	// Subnet is the subnet to link to. Required field.
+	Subnet Subnet
+	// IPAddress is only valid when the Mode is set to LinkModeStatic. If
+	// not specified with a Mode of LinkModeStatic, an IP address from the
+	// subnet will be auto selected.
+	IPAddress string
+	// DefaultGateway will set the gateway IP address for the Subnet as the
+	// default gateway for the machine or device the interface belongs to.
+	// Option can only be used with mode LinkModeStatic.
+	DefaultGateway bool
+}
+
+// Validate ensures that the Mode and Subnet are set, and that the other options
+// are consistent with the Mode.
+func (a *LinkSubnetArgs) Validate() error {
+	switch a.Mode {
+	case LinkModeDHCP, LinkModeLinkUp, LinkModeStatic:
+	case "":
+		return errors.NotValidf("missing Mode")
+	default:
+		return errors.NotValidf("unknown Mode value (%q)", a.Mode)
+	}
+	if a.Subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	if a.IPAddress != "" && a.Mode != LinkModeStatic {
+		return errors.NotValidf("setting IP Address when Mode is not LinkModeStatic")
+	}
+	if a.DefaultGateway && a.Mode != LinkModeStatic {
+		return errors.NotValidf("specifying DefaultGateway for Mode %q", a.Mode)
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) LinkSubnet(args LinkSubnetArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("mode", string(args.Mode))
+	params.Values.Add("subnet", fmt.Sprint(args.Subnet.ID()))
+	params.MaybeAdd("ip_address", args.IPAddress)
+	params.MaybeAddBool("default_gateway", args.DefaultGateway)
+	source, err := i.controller.post(i.resourceURI, "link_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+func (i *interface_) linkForSubnet(subnet Subnet) *link {
+	for _, link := range i.links {
+		if s := link.Subnet(); s != nil && s.ID() == subnet.ID() {
+			return link
+		}
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) UnlinkSubnet(subnet Subnet) error {
+	if subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	link := i.linkForSubnet(subnet)
+	if link == nil {
+		return errors.NotValidf("unlinked Subnet")
+	}
+	params := NewURLParams()
+	params.Values.Add("id", fmt.Sprint(link.ID()))
+	source, err := i.controller.post(i.resourceURI, "unlink_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+
+	return nil
+}
+
+func readInterface(controllerVersion version.Number, source interface{}) (*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readInterfaces(controllerVersion version.Number, source interface{}) ([]*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readInterfaceList(valid, readFunc)
+}
+
+func getInterfaceDeserializationFunc(controllerVersion version.Number) (interfaceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range interfaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no interface read func for version %s", controllerVersion)
+	}
+	return interfaceDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readInterfaceList(sourceList []interface{}, readFunc interfaceDeserializationFunc) ([]*interface_, error) {
+	result := make([]*interface_, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for interface %d, %T", i, value)
+		}
+		read, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "interface %d", i)
+		}
+		result = append(result, read)
+	}
+	return result, nil
+}
+
+type interfaceDeserializationFunc func(map[string]interface{}) (*interface_, error)
+
+var interfaceDeserializationFuncs = map[version.Number]interfaceDeserializationFunc{
+	twoDotOh: interface_2_0,
+}
+
+func interface_2_0(source map[string]interface{}) (*interface_, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":      schema.ForceInt(),
+		"name":    schema.String(),
+		"type":    schema.String(),
+		"enabled": schema.Bool(),
+		"tags":    schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+
+		"vlan":  schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"links": schema.List(schema.StringMap(schema.Any())),
+
+		"mac_address":   schema.OneOf(schema.Nil(""), schema.String()),
+		"effective_mtu": schema.ForceInt(),
+
+		"parents":  schema.List(schema.String()),
+		"children": schema.List(schema.String()),
+	}
+	defaults := schema.Defaults{
+		"mac_address": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var vlan *vlan
+	// If it's not an attribute map then we know it's nil from the schema check.
+	if vlanMap, ok := valid["vlan"].(map[string]interface{}); ok {
+		vlan, err = vlan_2_0(vlanMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	links, err := readLinkList(valid["links"].([]interface{}), link_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	macAddress, _ := valid["mac_address"].(string)
+	result := &interface_{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		type_:   valid["type"].(string),
+		enabled: valid["enabled"].(bool),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		vlan:  vlan,
+		links: links,
+
+		macAddress:   macAddress,
+		effectiveMTU: valid["effective_mtu"].(int),
+
+		parents:  convertToStringSlice(valid["parents"]),
+		children: convertToStringSlice(valid["children"]),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/interfaces.go b/automation/vendor/github.com/juju/gomaasapi/interfaces.go
new file mode 100644
index 0000000..6b80115
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/interfaces.go
@@ -0,0 +1,362 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/utils/set"
+
+const (
+	// Capability constants.
+	NetworksManagement      = "networks-management"
+	StaticIPAddresses       = "static-ipaddresses"
+	IPv6DeploymentUbuntu    = "ipv6-deployment-ubuntu"
+	DevicesManagement       = "devices-management"
+	StorageDeploymentUbuntu = "storage-deployment-ubuntu"
+	NetworkDeploymentUbuntu = "network-deployment-ubuntu"
+)
+
+// Controller represents an API connection to a MAAS Controller. Since the API
+// is restful, there is no long held connection to the API server, but instead
+// HTTP calls are made and JSON response structures parsed.
+type Controller interface {
+
+	// Capabilities returns a set of capabilities as defined by the string
+	// constants.
+	Capabilities() set.Strings
+
+	BootResources() ([]BootResource, error)
+
+	// Fabrics returns the list of Fabrics defined in the MAAS controller.
+	Fabrics() ([]Fabric, error)
+
+	// Spaces returns the list of Spaces defined in the MAAS controller.
+	Spaces() ([]Space, error)
+
+	// Zones lists all the zones known to the MAAS controller.
+	Zones() ([]Zone, error)
+
+	// Machines returns a list of machines that match the params.
+	Machines(MachinesArgs) ([]Machine, error)
+
+	// AllocateMachine will attempt to allocate a machine to the user.
+	// If successful, the allocated machine is returned.
+	AllocateMachine(AllocateMachineArgs) (Machine, ConstraintMatches, error)
+
+	// ReleaseMachines will stop the specified machines, and release them
+	// from the user making them available to be allocated again.
+	ReleaseMachines(ReleaseMachinesArgs) error
+
+	// Devices returns a list of devices that match the params.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// CreateDevice creates and returns a new Device.
+	CreateDevice(CreateDeviceArgs) (Device, error)
+
+	// Files returns all the files that match the specified prefix.
+	Files(prefix string) ([]File, error)
+
+	// Return a single file by its filename.
+	GetFile(filename string) (File, error)
+
+	// AddFile adds or replaces the content of the specified filename.
+	// If or when the MAAS api is able to return metadata about a single
+	// file without sending the content of the file, we can return a File
+	// instance here too.
+	AddFile(AddFileArgs) error
+}
+
+// File represents a file stored in the MAAS controller.
+type File interface {
+	// Filename is the name of the file. No path, just the filename.
+	Filename() string
+
+	// AnonymousURL is a URL that can be used to retrieve the conents of the
+	// file without credentials.
+	AnonymousURL() string
+
+	// Delete removes the file from the MAAS controller.
+	Delete() error
+
+	// ReadAll returns the content of the file.
+	ReadAll() ([]byte, error)
+}
+
+// Fabric represents a set of interconnected VLANs that are capable of mutual
+// communication. A fabric can be thought of as a logical grouping in which
+// VLANs can be considered unique.
+//
+// For example, a distributed network may have a fabric in London containing
+// VLAN 100, while a separate fabric in San Francisco may contain a VLAN 100,
+// whose attached subnets are completely different and unrelated.
+type Fabric interface {
+	ID() int
+	Name() string
+	ClassType() string
+
+	VLANs() []VLAN
+}
+
+// VLAN represents an instance of a Virtual LAN. VLANs are a common way to
+// create logically separate networks using the same physical infrastructure.
+//
+// Managed switches can assign VLANs to each port in either a “tagged” or an
+// “untagged” manner. A VLAN is said to be “untagged” on a particular port when
+// it is the default VLAN for that port, and requires no special configuration
+// in order to access.
+//
+// “Tagged” VLANs (traditionally used by network administrators in order to
+// aggregate multiple networks over inter-switch “trunk” lines) can also be used
+// with nodes in MAAS. That is, if a switch port is configured such that
+// “tagged” VLAN frames can be sent and received by a MAAS node, that MAAS node
+// can be configured to automatically bring up VLAN interfaces, so that the
+// deployed node can make use of them.
+//
+// A “Default VLAN” is created for every Fabric, to which every new VLAN-aware
+// object in the fabric will be associated to by default (unless otherwise
+// specified).
+type VLAN interface {
+	ID() int
+	Name() string
+	Fabric() string
+
+	// VID is the VLAN ID. eth0.10 -> VID = 10.
+	VID() int
+	// MTU (maximum transmission unit) is the largest size packet or frame,
+	// specified in octets (eight-bit bytes), that can be sent.
+	MTU() int
+	DHCP() bool
+
+	PrimaryRack() string
+	SecondaryRack() string
+}
+
+// Zone represents a physical zone that a Machine is in. The meaning of a
+// physical zone is up to you: it could identify e.g. a server rack, a network,
+// or a data centre. Users can then allocate nodes from specific physical zones,
+// to suit their redundancy or performance requirements.
+type Zone interface {
+	Name() string
+	Description() string
+}
+
+// BootResource is the bomb... find something to say here.
+type BootResource interface {
+	ID() int
+	Name() string
+	Type() string
+	Architecture() string
+	SubArchitectures() set.Strings
+	KernelFlavor() string
+}
+
+// Device represents some form of device in MAAS.
+type Device interface {
+	// TODO: add domain
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	IPAddresses() []string
+	Zone() Zone
+
+	// Parent returns the SystemID of the Parent. Most often this will be a
+	// Machine.
+	Parent() string
+
+	// Owner is the username of the user that created the device.
+	Owner() string
+
+	// InterfaceSet returns all the interfaces for the Device.
+	InterfaceSet() []Interface
+
+	// CreateInterface will create a physical interface for this machine.
+	CreateInterface(CreateInterfaceArgs) (Interface, error)
+
+	// Delete will remove this Device.
+	Delete() error
+}
+
+// Machine represents a physical machine.
+type Machine interface {
+	OwnerDataHolder
+
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	Tags() []string
+
+	OperatingSystem() string
+	DistroSeries() string
+	Architecture() string
+	Memory() int
+	CPUCount() int
+
+	IPAddresses() []string
+	PowerState() string
+
+	// Devices returns a list of devices that match the params and have
+	// this Machine as the parent.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// Consider bundling the status values into a single struct.
+	// but need to check for consistent representation if exposed on other
+	// entities.
+
+	StatusName() string
+	StatusMessage() string
+
+	// BootInterface returns the interface that was used to boot the Machine.
+	BootInterface() Interface
+	// InterfaceSet returns all the interfaces for the Machine.
+	InterfaceSet() []Interface
+	// Interface returns the interface for the machine that matches the id
+	// specified. If there is no match, nil is returned.
+	Interface(id int) Interface
+
+	// PhysicalBlockDevices returns all the physical block devices on the machine.
+	PhysicalBlockDevices() []BlockDevice
+	// PhysicalBlockDevice returns the physical block device for the machine
+	// that matches the id specified. If there is no match, nil is returned.
+	PhysicalBlockDevice(id int) BlockDevice
+
+	// BlockDevices returns all the physical and virtual block devices on the machine.
+	BlockDevices() []BlockDevice
+
+	Zone() Zone
+
+	// Start the machine and install the operating system specified in the args.
+	Start(StartArgs) error
+
+	// CreateDevice creates a new Device with this Machine as the parent.
+	// The device will have one interface that is linked to the specified subnet.
+	CreateDevice(CreateMachineDeviceArgs) (Device, error)
+}
+
+// Space is a name for a collection of Subnets.
+type Space interface {
+	ID() int
+	Name() string
+	Subnets() []Subnet
+}
+
+// Subnet refers to an IP range on a VLAN.
+type Subnet interface {
+	ID() int
+	Name() string
+	Space() string
+	VLAN() VLAN
+
+	Gateway() string
+	CIDR() string
+	// dns_mode
+
+	// DNSServers is a list of ip addresses of the DNS servers for the subnet.
+	// This list may be empty.
+	DNSServers() []string
+}
+
+// Interface represents a physical or virtual network interface on a Machine.
+type Interface interface {
+	ID() int
+	Name() string
+	// The parents of an interface are the names of interfaces that must exist
+	// for this interface  to exist. For example a parent of "eth0.100" would be
+	// "eth0". Parents may be empty.
+	Parents() []string
+	// The children interfaces are the names of those that are dependent on this
+	// interface existing. Children may be empty.
+	Children() []string
+	Type() string
+	Enabled() bool
+	Tags() []string
+
+	VLAN() VLAN
+	Links() []Link
+
+	MACAddress() string
+	EffectiveMTU() int
+
+	// Params is a JSON field, and defaults to an empty string, but is almost
+	// always a JSON object in practice. Gleefully ignoring it until we need it.
+
+	// Update the name, mac address or VLAN.
+	Update(UpdateInterfaceArgs) error
+
+	// Delete this interface.
+	Delete() error
+
+	// LinkSubnet will attempt to make this interface available on the specified
+	// Subnet.
+	LinkSubnet(LinkSubnetArgs) error
+
+	// UnlinkSubnet will remove the Link to the subnet, and release the IP
+	// address associated if there is one.
+	UnlinkSubnet(Subnet) error
+}
+
+// Link represents a network link between an Interface and a Subnet.
+type Link interface {
+	ID() int
+	Mode() string
+	Subnet() Subnet
+	// IPAddress returns the address if one has been assigned.
+	// If unavailble, the address will be empty.
+	IPAddress() string
+}
+
+// FileSystem represents a formatted filesystem mounted at a location.
+type FileSystem interface {
+	// Type is the format type, e.g. "ext4".
+	Type() string
+
+	MountPoint() string
+	Label() string
+	UUID() string
+}
+
+// Partition represents a partition of a block device. It may be mounted
+// as a filesystem.
+type Partition interface {
+	ID() int
+	Path() string
+	// FileSystem may be nil if not mounted.
+	FileSystem() FileSystem
+	UUID() string
+	// UsedFor is a human readable string.
+	UsedFor() string
+	// Size is the number of bytes in the partition.
+	Size() uint64
+}
+
+// BlockDevice represents an entire block device on the machine.
+type BlockDevice interface {
+	ID() int
+	Name() string
+	Model() string
+	Path() string
+	UsedFor() string
+	Tags() []string
+
+	BlockSize() uint64
+	UsedSize() uint64
+	Size() uint64
+
+	Partitions() []Partition
+
+	// There are some other attributes for block devices, but we can
+	// expose them on an as needed basis.
+}
+
+// OwnerDataHolder represents any MAAS object that can store key/value
+// data.
+type OwnerDataHolder interface {
+	// OwnerData returns a copy of the key/value data stored for this
+	// object.
+	OwnerData() map[string]string
+
+	// SetOwnerData updates the key/value data stored for this object
+	// with the values passed in. Existing keys that aren't specified
+	// in the map passed in will be left in place; to clear a key set
+	// its value to "". All owner data is cleared when the object is
+	// released.
+	SetOwnerData(map[string]string) error
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/jsonobject.go b/automation/vendor/github.com/juju/gomaasapi/jsonobject.go
new file mode 100644
index 0000000..cdd3dc1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/jsonobject.go
@@ -0,0 +1,215 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+)
+
+// JSONObject is a wrapper around a JSON structure which provides
+// methods to extract data from that structure.
+// A JSONObject provides a simple structure consisting of the data types
+// defined in JSON: string, number, object, list, and bool.  To get the
+// value you want out of a JSONObject, you must know (or figure out) which
+// kind of value you have, and then call the appropriate Get*() method to
+// get at it.  Reading an item as the wrong type will return an error.
+// For instance, if your JSONObject consists of a number, call GetFloat64()
+// to get the value as a float64.  If it's a list, call GetArray() to get
+// a slice of JSONObjects.  To read any given item from the slice, you'll
+// need to "Get" that as the right type as well.
+// There is one exception: a MAASObject is really a special kind of map,
+// so you can read it as either.
+// Reading a null item is also an error.  So before you try obj.Get*(),
+// first check obj.IsNil().
+type JSONObject struct {
+	// Parsed value.  May actually be any of the types a JSONObject can
+	// wrap, except raw bytes.  If the object can only be interpreted
+	// as raw bytes, this will be nil.
+	value interface{}
+	// Raw bytes, if this object was parsed directly from an API response.
+	// Is nil for sub-objects found within other objects.  An object that
+	// was parsed directly from a response can be both raw bytes and some
+	// other value at the same time.
+	// For example, "[]" looks like a JSON list, so you can read it as an
+	// array.  But it may also be the raw contents of a file that just
+	// happens to look like JSON, and so you can read it as raw bytes as
+	// well.
+	bytes []byte
+	// Client for further communication with the API.
+	client Client
+	// Is this a JSON null?
+	isNull bool
+}
+
+// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact
+// that it contains a key "resource_uri".  (A regular map might contain the
+// same key through sheer coincide, but never mind: you can still treat it
+// as a jsonMap and never notice the difference.)
+const resourceURI = "resource_uri"
+
+// maasify turns a completely untyped json.Unmarshal result into a JSONObject
+// (with the appropriate implementation of course).  This function is
+// recursive.  Maps and arrays are deep-copied, with each individual value
+// being converted to a JSONObject type.
+func maasify(client Client, value interface{}) JSONObject {
+	if value == nil {
+		return JSONObject{isNull: true}
+	}
+	switch value.(type) {
+	case string, float64, bool:
+		return JSONObject{value: value}
+	case map[string]interface{}:
+		original := value.(map[string]interface{})
+		result := make(map[string]JSONObject, len(original))
+		for key, value := range original {
+			result[key] = maasify(client, value)
+		}
+		return JSONObject{value: result, client: client}
+	case []interface{}:
+		original := value.([]interface{})
+		result := make([]JSONObject, len(original))
+		for index, value := range original {
+			result[index] = maasify(client, value)
+		}
+		return JSONObject{value: result}
+	}
+	msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value)
+	panic(msg)
+}
+
+// Parse a JSON blob into a JSONObject.
+func Parse(client Client, input []byte) (JSONObject, error) {
+	var obj JSONObject
+	if input == nil {
+		panic(errors.New("Parse() called with nil input"))
+	}
+	var parsed interface{}
+	err := json.Unmarshal(input, &parsed)
+	if err == nil {
+		obj = maasify(client, parsed)
+		obj.bytes = input
+	} else {
+		switch err.(type) {
+		case *json.InvalidUTF8Error:
+		case *json.SyntaxError:
+			// This isn't JSON.  Treat it as raw binary data.
+		default:
+			return obj, err
+		}
+		obj = JSONObject{value: nil, client: client, bytes: input}
+	}
+	return obj, nil
+}
+
+// JSONObjectFromStruct takes a struct and converts it to a JSONObject
+func JSONObjectFromStruct(client Client, input interface{}) (JSONObject, error) {
+	j, err := json.MarshalIndent(input, "", "  ")
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(client, j)
+}
+
+// Return error value for failed type conversion.
+func failConversion(wantedType string, obj JSONObject) error {
+	msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value)
+	return errors.New(msg)
+}
+
+// MarshalJSON tells the standard json package how to serialize a JSONObject
+// back to JSON.
+func (obj JSONObject) MarshalJSON() ([]byte, error) {
+	if obj.IsNil() {
+		return json.Marshal(nil)
+	}
+	return json.MarshalIndent(obj.value, "", "  ")
+}
+
+// With MarshalJSON, JSONObject implements json.Marshaler.
+var _ json.Marshaler = (*JSONObject)(nil)
+
+// IsNil tells you whether a JSONObject is a JSON "null."
+// There is one irregularity.  If the original JSON blob was actually raw
+// data, not JSON, then its IsNil will return false because the object
+// contains the binary data as a non-nil value.  But, if the original JSON
+// blob consisted of a null, then IsNil returns true even though you can
+// still retrieve binary data from it.
+func (obj JSONObject) IsNil() bool {
+	if obj.value != nil {
+		return false
+	}
+	if obj.bytes == nil {
+		return true
+	}
+	// This may be a JSON null.  We can't expect every JSON null to look
+	// the same; there may be leading or trailing space.
+	return obj.isNull
+}
+
+// GetString retrieves the object's value as a string.  If the value wasn't
+// a JSON string, that's an error.
+func (obj JSONObject) GetString() (value string, err error) {
+	value, ok := obj.value.(string)
+	if !ok {
+		err = failConversion("string", obj)
+	}
+	return
+}
+
+// GetFloat64 retrieves the object's value as a float64.  If the value wasn't
+// a JSON number, that's an error.
+func (obj JSONObject) GetFloat64() (value float64, err error) {
+	value, ok := obj.value.(float64)
+	if !ok {
+		err = failConversion("float64", obj)
+	}
+	return
+}
+
+// GetMap retrieves the object's value as a map.  If the value wasn't a JSON
+// object, that's an error.
+func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) {
+	value, ok := obj.value.(map[string]JSONObject)
+	if !ok {
+		err = failConversion("map", obj)
+	}
+	return
+}
+
+// GetArray retrieves the object's value as an array.  If the value wasn't a
+// JSON list, that's an error.
+func (obj JSONObject) GetArray() (value []JSONObject, err error) {
+	value, ok := obj.value.([]JSONObject)
+	if !ok {
+		err = failConversion("array", obj)
+	}
+	return
+}
+
+// GetBool retrieves the object's value as a bool.  If the value wasn't a JSON
+// bool, that's an error.
+func (obj JSONObject) GetBool() (value bool, err error) {
+	value, ok := obj.value.(bool)
+	if !ok {
+		err = failConversion("bool", obj)
+	}
+	return
+}
+
+// GetBytes retrieves the object's value as raw bytes.  A JSONObject that was
+// parsed from the original input (as opposed to one that's embedded in
+// another JSONObject) can contain both the raw bytes and the parsed JSON
+// value, but either can be the case without the other.
+// If this object wasn't parsed directly from the original input, that's an
+// error.
+// If the object was parsed from an original input that just said "null", then
+// IsNil will return true but the raw bytes are still available from GetBytes.
+func (obj JSONObject) GetBytes() ([]byte, error) {
+	if obj.bytes == nil {
+		return nil, failConversion("bytes", obj)
+	}
+	return obj.bytes, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/link.go b/automation/vendor/github.com/juju/gomaasapi/link.go
new file mode 100644
index 0000000..9e930e1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/link.go
@@ -0,0 +1,124 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type link struct {
+	id        int
+	mode      string
+	subnet    *subnet
+	ipAddress string
+}
+
+// NOTE: not using lowercase L as the receiver as it is a horrible idea.
+// Instead using 'k'.
+
+// ID implements Link.
+func (k *link) ID() int {
+	return k.id
+}
+
+// Mode implements Link.
+func (k *link) Mode() string {
+	return k.mode
+}
+
+// Subnet implements Link.
+func (k *link) Subnet() Subnet {
+	if k.subnet == nil {
+		return nil
+	}
+	return k.subnet
+}
+
+// IPAddress implements Link.
+func (k *link) IPAddress() string {
+	return k.ipAddress
+}
+
+func readLinks(controllerVersion version.Number, source interface{}) ([]*link, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range linkDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no link read func for version %s", controllerVersion)
+	}
+	readFunc := linkDeserializationFuncs[deserialisationVersion]
+	return readLinkList(valid, readFunc)
+}
+
+// readLinkList expects the values of the sourceList to be string maps.
+func readLinkList(sourceList []interface{}, readFunc linkDeserializationFunc) ([]*link, error) {
+	result := make([]*link, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for link %d, %T", i, value)
+		}
+		link, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "link %d", i)
+		}
+		result = append(result, link)
+	}
+	return result, nil
+}
+
+type linkDeserializationFunc func(map[string]interface{}) (*link, error)
+
+var linkDeserializationFuncs = map[version.Number]linkDeserializationFunc{
+	twoDotOh: link_2_0,
+}
+
+func link_2_0(source map[string]interface{}) (*link, error) {
+	fields := schema.Fields{
+		"id":         schema.ForceInt(),
+		"mode":       schema.String(),
+		"subnet":     schema.StringMap(schema.Any()),
+		"ip_address": schema.String(),
+	}
+	defaults := schema.Defaults{
+		"ip_address": "",
+		"subnet":     schema.Omit,
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var subnet *subnet
+	if value, ok := valid["subnet"]; ok {
+		subnet, err = subnet_2_0(value.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	result := &link{
+		id:        valid["id"].(int),
+		mode:      valid["mode"].(string),
+		subnet:    subnet,
+		ipAddress: valid["ip_address"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/maas.go b/automation/vendor/github.com/juju/gomaasapi/maas.go
new file mode 100644
index 0000000..cd6ce29
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/maas.go
@@ -0,0 +1,11 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+// NewMAAS returns an interface to the MAAS API as a *MAASObject.
+func NewMAAS(client Client) *MAASObject {
+	attrs := map[string]interface{}{resourceURI: client.APIURL.String()}
+	obj := newJSONMAASObject(attrs, client)
+	return &obj
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/maasobject.go b/automation/vendor/github.com/juju/gomaasapi/maasobject.go
new file mode 100644
index 0000000..3978252
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/maasobject.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+)
+
+// MAASObject represents a MAAS object as returned by the MAAS API, such as a
+// Node or a Tag.
+// You can extract a MAASObject out of a JSONObject using
+// JSONObject.GetMAASObject.  A MAAS API call will usually return either a
+// MAASObject or a list of MAASObjects.  The list itself would be wrapped in
+// a JSONObject, so if an API call returns a list of objects "l," you first
+// obtain the array using l.GetArray().  Then, for each item "i" in the array,
+// obtain the matching MAASObject using i.GetMAASObject().
+type MAASObject struct {
+	values map[string]JSONObject
+	client Client
+	uri    *url.URL
+}
+
+// newJSONMAASObject creates a new MAAS object.  It will panic if the given map
+// does not contain a valid URL for the 'resource_uri' key.
+func newJSONMAASObject(jmap map[string]interface{}, client Client) MAASObject {
+	obj, err := maasify(client, jmap).GetMAASObject()
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// MarshalJSON tells the standard json package how to serialize a MAASObject.
+func (obj MAASObject) MarshalJSON() ([]byte, error) {
+	return json.MarshalIndent(obj.GetMap(), "", "  ")
+}
+
+// With MarshalJSON, MAASObject implements json.Marshaler.
+var _ json.Marshaler = (*MAASObject)(nil)
+
+func marshalNode(node MAASObject) string {
+	res, _ := json.MarshalIndent(node, "", "  ")
+	return string(res)
+
+}
+
+var noResourceURI = errors.New("not a MAAS object: no 'resource_uri' key")
+
+// extractURI obtains the "resource_uri" string from a JSONObject map.
+func extractURI(attrs map[string]JSONObject) (*url.URL, error) {
+	uriEntry, ok := attrs[resourceURI]
+	if !ok {
+		return nil, noResourceURI
+	}
+	uri, err := uriEntry.GetString()
+	if err != nil {
+		return nil, fmt.Errorf("invalid resource_uri: %v", uri)
+	}
+	resourceURL, err := url.Parse(uri)
+	if err != nil {
+		return nil, fmt.Errorf("resource_uri does not contain a valid URL: %v", uri)
+	}
+	return resourceURL, nil
+}
+
+// JSONObject getter for a MAAS object.  From a decoding perspective, a
+// MAASObject is just like a map except it contains a key "resource_uri", and
+// it keeps track of the Client you got it from so that you can invoke API
+// methods directly on their MAAS objects.
+func (obj JSONObject) GetMAASObject() (MAASObject, error) {
+	attrs, err := obj.GetMap()
+	if err != nil {
+		return MAASObject{}, err
+	}
+	uri, err := extractURI(attrs)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return MAASObject{values: attrs, client: obj.client, uri: uri}, nil
+}
+
+// GetField extracts a string field from this MAAS object.
+func (obj MAASObject) GetField(name string) (string, error) {
+	return obj.values[name].GetString()
+}
+
+// URI is the resource URI for this MAAS object.  It is an absolute path, but
+// without a network part.
+func (obj MAASObject) URI() *url.URL {
+	// Duplicate the URL.
+	uri, err := url.Parse(obj.uri.String())
+	if err != nil {
+		panic(err)
+	}
+	return uri
+}
+
+// URL returns a full absolute URL (including network part) for this MAAS
+// object on the API.
+func (obj MAASObject) URL() *url.URL {
+	return obj.client.GetURL(obj.URI())
+}
+
+// GetMap returns all of the object's attributes in the form of a map.
+func (obj MAASObject) GetMap() map[string]JSONObject {
+	return obj.values
+}
+
+// GetSubObject returns a new MAASObject representing the API resource found
+// at a given sub-path of the current object's resource URI.
+func (obj MAASObject) GetSubObject(name string) MAASObject {
+	uri := obj.URI()
+	newURL := url.URL{Path: name}
+	resUrl := uri.ResolveReference(&newURL)
+	resUrl.Path = EnsureTrailingSlash(resUrl.Path)
+	input := map[string]interface{}{resourceURI: resUrl.String()}
+	return newJSONMAASObject(input, obj.client)
+}
+
+var NotImplemented = errors.New("Not implemented")
+
+// Get retrieves a fresh copy of this MAAS object from the API.
+func (obj MAASObject) Get() (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, "", url.Values{})
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Post overwrites this object's existing value on the API with those given
+// in "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Post(params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, "", params, nil)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// Update modifies this object on the API, based on the values given in
+// "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Update(params url.Values) (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Put(uri, params)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Delete removes this object on the API.
+func (obj MAASObject) Delete() error {
+	uri := obj.URI()
+	return obj.client.Delete(uri)
+}
+
+// CallGet invokes an idempotent API method on this object.
+func (obj MAASObject) CallGet(operation string, params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, operation, params)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// CallPost invokes a non-idempotent API method on this object.
+func (obj MAASObject) CallPost(operation string, params url.Values) (JSONObject, error) {
+	return obj.CallPostFiles(operation, params, nil)
+}
+
+// CallPostFiles invokes a non-idempotent API method on this object.  It is
+// similar to CallPost but has an extra parameter, 'files', which should
+// contain the files that will be uploaded to the API.
+func (obj MAASObject) CallPostFiles(operation string, params url.Values, files map[string][]byte) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, operation, params, files)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/machine.go b/automation/vendor/github.com/juju/gomaasapi/machine.go
new file mode 100644
index 0000000..8518d94
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/machine.go
@@ -0,0 +1,584 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type machine struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID  string
+	hostname  string
+	fqdn      string
+	tags      []string
+	ownerData map[string]string
+
+	operatingSystem string
+	distroSeries    string
+	architecture    string
+	memory          int
+	cpuCount        int
+
+	ipAddresses []string
+	powerState  string
+
+	// NOTE: consider some form of status struct
+	statusName    string
+	statusMessage string
+
+	bootInterface *interface_
+	interfaceSet  []*interface_
+	zone          *zone
+	// Don't really know the difference between these two lists:
+	physicalBlockDevices []*blockdevice
+	blockDevices         []*blockdevice
+}
+
+func (m *machine) updateFrom(other *machine) {
+	m.resourceURI = other.resourceURI
+	m.systemID = other.systemID
+	m.hostname = other.hostname
+	m.fqdn = other.fqdn
+	m.operatingSystem = other.operatingSystem
+	m.distroSeries = other.distroSeries
+	m.architecture = other.architecture
+	m.memory = other.memory
+	m.cpuCount = other.cpuCount
+	m.ipAddresses = other.ipAddresses
+	m.powerState = other.powerState
+	m.statusName = other.statusName
+	m.statusMessage = other.statusMessage
+	m.zone = other.zone
+	m.tags = other.tags
+	m.ownerData = other.ownerData
+}
+
+// SystemID implements Machine.
+func (m *machine) SystemID() string {
+	return m.systemID
+}
+
+// Hostname implements Machine.
+func (m *machine) Hostname() string {
+	return m.hostname
+}
+
+// FQDN implements Machine.
+func (m *machine) FQDN() string {
+	return m.fqdn
+}
+
+// Tags implements Machine.
+func (m *machine) Tags() []string {
+	return m.tags
+}
+
+// IPAddresses implements Machine.
+func (m *machine) IPAddresses() []string {
+	return m.ipAddresses
+}
+
+// Memory implements Machine.
+func (m *machine) Memory() int {
+	return m.memory
+}
+
+// CPUCount implements Machine.
+func (m *machine) CPUCount() int {
+	return m.cpuCount
+}
+
+// PowerState implements Machine.
+func (m *machine) PowerState() string {
+	return m.powerState
+}
+
+// Zone implements Machine.
+func (m *machine) Zone() Zone {
+	if m.zone == nil {
+		return nil
+	}
+	return m.zone
+}
+
+// BootInterface implements Machine.
+func (m *machine) BootInterface() Interface {
+	if m.bootInterface == nil {
+		return nil
+	}
+	m.bootInterface.controller = m.controller
+	return m.bootInterface
+}
+
+// InterfaceSet implements Machine.
+func (m *machine) InterfaceSet() []Interface {
+	result := make([]Interface, len(m.interfaceSet))
+	for i, v := range m.interfaceSet {
+		v.controller = m.controller
+		result[i] = v
+	}
+	return result
+}
+
+// Interface implements Machine.
+func (m *machine) Interface(id int) Interface {
+	for _, iface := range m.interfaceSet {
+		if iface.ID() == id {
+			iface.controller = m.controller
+			return iface
+		}
+	}
+	return nil
+}
+
+// OperatingSystem implements Machine.
+func (m *machine) OperatingSystem() string {
+	return m.operatingSystem
+}
+
+// DistroSeries implements Machine.
+func (m *machine) DistroSeries() string {
+	return m.distroSeries
+}
+
+// Architecture implements Machine.
+func (m *machine) Architecture() string {
+	return m.architecture
+}
+
+// StatusName implements Machine.
+func (m *machine) StatusName() string {
+	return m.statusName
+}
+
+// StatusMessage implements Machine.
+func (m *machine) StatusMessage() string {
+	return m.statusMessage
+}
+
+// PhysicalBlockDevices implements Machine.
+func (m *machine) PhysicalBlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.physicalBlockDevices))
+	for i, v := range m.physicalBlockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// PhysicalBlockDevice implements Machine.
+func (m *machine) PhysicalBlockDevice(id int) BlockDevice {
+	for _, blockDevice := range m.physicalBlockDevices {
+		if blockDevice.ID() == id {
+			return blockDevice
+		}
+	}
+	return nil
+}
+
+// BlockDevices implements Machine.
+func (m *machine) BlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.blockDevices))
+	for i, v := range m.blockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// Devices implements Machine.
+func (m *machine) Devices(args DevicesArgs) ([]Device, error) {
+	// Perhaps in the future, MAAS will give us a way to query just for the
+	// devices for a particular parent.
+	devices, err := m.controller.Devices(args)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, device := range devices {
+		if device.Parent() == m.SystemID() {
+			result = append(result, device)
+		}
+	}
+	return result, nil
+}
+
+// StartArgs is an argument struct for passing parameters to the Machine.Start
+// method.
+type StartArgs struct {
+	// UserData needs to be Base64 encoded user data for cloud-init.
+	UserData     string
+	DistroSeries string
+	Kernel       string
+	Comment      string
+}
+
+// Start implements Machine.
+func (m *machine) Start(args StartArgs) error {
+	params := NewURLParams()
+	params.MaybeAdd("user_data", args.UserData)
+	params.MaybeAdd("distro_series", args.DistroSeries)
+	params.MaybeAdd("hwe_kernel", args.Kernel)
+	params.MaybeAdd("comment", args.Comment)
+	result, err := m.controller.post(m.resourceURI, "deploy", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+// CreateMachineDeviceArgs is an argument structure for Machine.CreateDevice.
+// Only InterfaceName and MACAddress fields are required, the others are only
+// used if set. If Subnet and VLAN are both set, Subnet.VLAN() must match the
+// given VLAN. On failure, returns an error satisfying errors.IsNotValid().
+type CreateMachineDeviceArgs struct {
+	Hostname      string
+	InterfaceName string
+	MACAddress    string
+	Subnet        Subnet
+	VLAN          VLAN
+}
+
+// Validate ensures that all required values are non-emtpy.
+func (a *CreateMachineDeviceArgs) Validate() error {
+	if a.InterfaceName == "" {
+		return errors.NotValidf("missing InterfaceName")
+	}
+
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+
+	if a.Subnet != nil && a.VLAN != nil && a.Subnet.VLAN() != a.VLAN {
+		msg := fmt.Sprintf(
+			"given subnet %q on VLAN %d does not match given VLAN %d",
+			a.Subnet.CIDR(), a.Subnet.VLAN().ID(), a.VLAN.ID(),
+		)
+		return errors.NewNotValid(nil, msg)
+	}
+
+	return nil
+}
+
+// CreateDevice implements Machine
+func (m *machine) CreateDevice(args CreateMachineDeviceArgs) (_ Device, err error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	device, err := m.controller.CreateDevice(CreateDeviceArgs{
+		Hostname:     args.Hostname,
+		MACAddresses: []string{args.MACAddress},
+		Parent:       m.SystemID(),
+	})
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	defer func(err *error) {
+		// If there is an error return, at least try to delete the device we just created.
+		if *err != nil {
+			if innerErr := device.Delete(); innerErr != nil {
+				logger.Warningf("could not delete device %q", device.SystemID())
+			}
+		}
+	}(&err)
+
+	// Update the VLAN to use for the interface, if given.
+	vlanToUse := args.VLAN
+	if vlanToUse == nil && args.Subnet != nil {
+		vlanToUse = args.Subnet.VLAN()
+	}
+
+	// There should be one interface created for each MAC Address, and since we
+	// only specified one, there should just be one response.
+	interfaces := device.InterfaceSet()
+	if count := len(interfaces); count != 1 {
+		err := errors.Errorf("unexpected interface count for device: %d", count)
+		return nil, NewUnexpectedError(err)
+	}
+	iface := interfaces[0]
+	nameToUse := args.InterfaceName
+
+	if err := m.updateDeviceInterface(iface, nameToUse, vlanToUse); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	if args.Subnet == nil {
+		// Nothing further to update.
+		return device, nil
+	}
+
+	if err := m.linkDeviceInterfaceToSubnet(iface, args.Subnet); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	return device, nil
+}
+
+func (m *machine) updateDeviceInterface(iface Interface, nameToUse string, vlanToUse VLAN) error {
+	updateArgs := UpdateInterfaceArgs{}
+	updateArgs.Name = nameToUse
+
+	if vlanToUse != nil {
+		updateArgs.VLAN = vlanToUse
+	}
+
+	if err := iface.Update(updateArgs); err != nil {
+		return errors.Annotatef(err, "updating device interface %q failed", iface.Name())
+	}
+
+	return nil
+}
+
+func (m *machine) linkDeviceInterfaceToSubnet(iface Interface, subnetToUse Subnet) error {
+	err := iface.LinkSubnet(LinkSubnetArgs{
+		Mode:   LinkModeStatic,
+		Subnet: subnetToUse,
+	})
+	if err != nil {
+		return errors.Annotatef(
+			err, "linking device interface %q to subnet %q failed",
+			iface.Name(), subnetToUse.CIDR())
+	}
+
+	return nil
+}
+
+// OwnerData implements OwnerDataHolder.
+func (m *machine) OwnerData() map[string]string {
+	result := make(map[string]string)
+	for key, value := range m.ownerData {
+		result[key] = value
+	}
+	return result
+}
+
+// SetOwnerData implements OwnerDataHolder.
+func (m *machine) SetOwnerData(ownerData map[string]string) error {
+	params := make(url.Values)
+	for key, value := range ownerData {
+		params.Add(key, value)
+	}
+	result, err := m.controller.post(m.resourceURI, "set_owner_data", params)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+func readMachine(controllerVersion version.Number, source interface{}) (*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readMachines(controllerVersion version.Number, source interface{}) ([]*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readMachineList(valid, readFunc)
+}
+
+func getMachineDeserializationFunc(controllerVersion version.Number) (machineDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range machineDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no machine read func for version %s", controllerVersion)
+	}
+	return machineDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readMachineList(sourceList []interface{}, readFunc machineDeserializationFunc) ([]*machine, error) {
+	result := make([]*machine, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for machine %d, %T", i, value)
+		}
+		machine, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "machine %d", i)
+		}
+		result = append(result, machine)
+	}
+	return result, nil
+}
+
+type machineDeserializationFunc func(map[string]interface{}) (*machine, error)
+
+var machineDeserializationFuncs = map[version.Number]machineDeserializationFunc{
+	twoDotOh: machine_2_0,
+}
+
+func machine_2_0(source map[string]interface{}) (*machine, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id":  schema.String(),
+		"hostname":   schema.String(),
+		"fqdn":       schema.String(),
+		"tag_names":  schema.List(schema.String()),
+		"owner_data": schema.StringMap(schema.String()),
+
+		"osystem":       schema.String(),
+		"distro_series": schema.String(),
+		"architecture":  schema.OneOf(schema.Nil(""), schema.String()),
+		"memory":        schema.ForceInt(),
+		"cpu_count":     schema.ForceInt(),
+
+		"ip_addresses":   schema.List(schema.String()),
+		"power_state":    schema.String(),
+		"status_name":    schema.String(),
+		"status_message": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"boot_interface": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"interface_set":  schema.List(schema.StringMap(schema.Any())),
+		"zone":           schema.StringMap(schema.Any()),
+
+		"physicalblockdevice_set": schema.List(schema.StringMap(schema.Any())),
+		"blockdevice_set":         schema.List(schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"architecture": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var bootInterface *interface_
+	if ifaceMap, ok := valid["boot_interface"].(map[string]interface{}); ok {
+		bootInterface, err = interface_2_0(ifaceMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	physicalBlockDevices, err := readBlockDeviceList(valid["physicalblockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	blockDevices, err := readBlockDeviceList(valid["blockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	architecture, _ := valid["architecture"].(string)
+	statusMessage, _ := valid["status_message"].(string)
+	result := &machine{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID:  valid["system_id"].(string),
+		hostname:  valid["hostname"].(string),
+		fqdn:      valid["fqdn"].(string),
+		tags:      convertToStringSlice(valid["tag_names"]),
+		ownerData: convertToStringMap(valid["owner_data"]),
+
+		operatingSystem: valid["osystem"].(string),
+		distroSeries:    valid["distro_series"].(string),
+		architecture:    architecture,
+		memory:          valid["memory"].(int),
+		cpuCount:        valid["cpu_count"].(int),
+
+		ipAddresses:   convertToStringSlice(valid["ip_addresses"]),
+		powerState:    valid["power_state"].(string),
+		statusName:    valid["status_name"].(string),
+		statusMessage: statusMessage,
+
+		bootInterface:        bootInterface,
+		interfaceSet:         interfaceSet,
+		zone:                 zone,
+		physicalBlockDevices: physicalBlockDevices,
+		blockDevices:         blockDevices,
+	}
+
+	return result, nil
+}
+
+func convertToStringSlice(field interface{}) []string {
+	if field == nil {
+		return nil
+	}
+	fieldSlice := field.([]interface{})
+	result := make([]string, len(fieldSlice))
+	for i, value := range fieldSlice {
+		result[i] = value.(string)
+	}
+	return result
+}
+
+func convertToStringMap(field interface{}) map[string]string {
+	if field == nil {
+		return nil
+	}
+	// This function is only called after a schema Coerce, so it's
+	// safe.
+	fieldMap := field.(map[string]interface{})
+	result := make(map[string]string)
+	for key, value := range fieldMap {
+		result[key] = value.(string)
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/oauth.go b/automation/vendor/github.com/juju/gomaasapi/oauth.go
new file mode 100644
index 0000000..920960d
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/oauth.go
@@ -0,0 +1,80 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"crypto/rand"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Not a true uuidgen, but at least creates same length random
+func generateNonce() (string, error) {
+	randBytes := make([]byte, 16)
+	_, err := rand.Read(randBytes)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%16x", randBytes), nil
+}
+
+func generateTimestamp() string {
+	return strconv.Itoa(int(time.Now().Unix()))
+}
+
+type OAuthSigner interface {
+	OAuthSign(request *http.Request) error
+}
+
+type OAuthToken struct {
+	ConsumerKey    string
+	ConsumerSecret string
+	TokenKey       string
+	TokenSecret    string
+}
+
+// Trick to ensure *plainTextOAuthSigner implements the OAuthSigner interface.
+var _ OAuthSigner = (*plainTextOAuthSigner)(nil)
+
+type plainTextOAuthSigner struct {
+	token *OAuthToken
+	realm string
+}
+
+func NewPlainTestOAuthSigner(token *OAuthToken, realm string) (OAuthSigner, error) {
+	return &plainTextOAuthSigner{token, realm}, nil
+}
+
+// OAuthSignPLAINTEXT signs the provided request using the OAuth PLAINTEXT
+// method: http://oauth.net/core/1.0/#anchor22.
+func (signer plainTextOAuthSigner) OAuthSign(request *http.Request) error {
+
+	signature := signer.token.ConsumerSecret + `&` + signer.token.TokenSecret
+	nonce, err := generateNonce()
+	if err != nil {
+		return err
+	}
+	authData := map[string]string{
+		"realm":                  signer.realm,
+		"oauth_consumer_key":     signer.token.ConsumerKey,
+		"oauth_token":            signer.token.TokenKey,
+		"oauth_signature_method": "PLAINTEXT",
+		"oauth_signature":        signature,
+		"oauth_timestamp":        generateTimestamp(),
+		"oauth_nonce":            nonce,
+		"oauth_version":          "1.0",
+	}
+	// Build OAuth header.
+	var authHeader []string
+	for key, value := range authData {
+		authHeader = append(authHeader, fmt.Sprintf(`%s="%s"`, key, url.QueryEscape(value)))
+	}
+	strHeader := "OAuth " + strings.Join(authHeader, ", ")
+	request.Header.Add("Authorization", strHeader)
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/partition.go b/automation/vendor/github.com/juju/gomaasapi/partition.go
new file mode 100644
index 0000000..f6d6afa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/partition.go
@@ -0,0 +1,145 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type partition struct {
+	resourceURI string
+
+	id   int
+	path string
+	uuid string
+
+	usedFor string
+	size    uint64
+
+	filesystem *filesystem
+}
+
+// ID implements Partition.
+func (p *partition) ID() int {
+	return p.id
+}
+
+// Path implements Partition.
+func (p *partition) Path() string {
+	return p.path
+}
+
+// FileSystem implements Partition.
+func (p *partition) FileSystem() FileSystem {
+	if p.filesystem == nil {
+		return nil
+	}
+	return p.filesystem
+}
+
+// UUID implements Partition.
+func (p *partition) UUID() string {
+	return p.uuid
+}
+
+// UsedFor implements Partition.
+func (p *partition) UsedFor() string {
+	return p.usedFor
+}
+
+// Size implements Partition.
+func (p *partition) Size() uint64 {
+	return p.size
+}
+
+func readPartitions(controllerVersion version.Number, source interface{}) ([]*partition, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range partitionDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no partition read func for version %s", controllerVersion)
+	}
+	readFunc := partitionDeserializationFuncs[deserialisationVersion]
+	return readPartitionList(valid, readFunc)
+}
+
+// readPartitionList expects the values of the sourceList to be string maps.
+func readPartitionList(sourceList []interface{}, readFunc partitionDeserializationFunc) ([]*partition, error) {
+	result := make([]*partition, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for partition %d, %T", i, value)
+		}
+		partition, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "partition %d", i)
+		}
+		result = append(result, partition)
+	}
+	return result, nil
+}
+
+type partitionDeserializationFunc func(map[string]interface{}) (*partition, error)
+
+var partitionDeserializationFuncs = map[version.Number]partitionDeserializationFunc{
+	twoDotOh: partition_2_0,
+}
+
+func partition_2_0(source map[string]interface{}) (*partition, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":   schema.ForceInt(),
+		"path": schema.String(),
+		"uuid": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"used_for": schema.String(),
+		"size":     schema.ForceUint(),
+
+		"filesystem": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"uuid": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var filesystem *filesystem
+	if fsSource := valid["filesystem"]; fsSource != nil {
+		filesystem, err = filesystem2_0(fsSource.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+	uuid, _ := valid["uuid"].(string)
+	result := &partition{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		path:        valid["path"].(string),
+		uuid:        uuid,
+		usedFor:     valid["used_for"].(string),
+		size:        valid["size"].(uint64),
+		filesystem:  filesystem,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/space.go b/automation/vendor/github.com/juju/gomaasapi/space.go
new file mode 100644
index 0000000..5b8b8cf
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/space.go
@@ -0,0 +1,115 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type space struct {
+	// Add the controller in when we need to do things with the space.
+	// controller Controller
+
+	resourceURI string
+
+	id   int
+	name string
+
+	subnets []*subnet
+}
+
+// Id implements Space.
+func (s *space) ID() int {
+	return s.id
+}
+
+// Name implements Space.
+func (s *space) Name() string {
+	return s.name
+}
+
+// Subnets implements Space.
+func (s *space) Subnets() []Subnet {
+	var result []Subnet
+	for _, subnet := range s.subnets {
+		result = append(result, subnet)
+	}
+	return result
+}
+
+func readSpaces(controllerVersion version.Number, source interface{}) ([]*space, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range spaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no space read func for version %s", controllerVersion)
+	}
+	readFunc := spaceDeserializationFuncs[deserialisationVersion]
+	return readSpaceList(valid, readFunc)
+}
+
+// readSpaceList expects the values of the sourceList to be string maps.
+func readSpaceList(sourceList []interface{}, readFunc spaceDeserializationFunc) ([]*space, error) {
+	result := make([]*space, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for space %d, %T", i, value)
+		}
+		space, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "space %d", i)
+		}
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+type spaceDeserializationFunc func(map[string]interface{}) (*space, error)
+
+var spaceDeserializationFuncs = map[version.Number]spaceDeserializationFunc{
+	twoDotOh: space_2_0,
+}
+
+func space_2_0(source map[string]interface{}) (*space, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"subnets":      schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	subnets, err := readSubnetList(valid["subnets"].([]interface{}), subnet_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	result := &space{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		subnets:     subnets,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/subnet.go b/automation/vendor/github.com/juju/gomaasapi/subnet.go
new file mode 100644
index 0000000..f509ccd
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/subnet.go
@@ -0,0 +1,152 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type subnet struct {
+	// Add the controller in when we need to do things with the subnet.
+	// controller Controller
+
+	resourceURI string
+
+	id    int
+	name  string
+	space string
+	vlan  *vlan
+
+	gateway string
+	cidr    string
+
+	dnsServers []string
+}
+
+// ID implements Subnet.
+func (s *subnet) ID() int {
+	return s.id
+}
+
+// Name implements Subnet.
+func (s *subnet) Name() string {
+	return s.name
+}
+
+// Space implements Subnet.
+func (s *subnet) Space() string {
+	return s.space
+}
+
+// VLAN implements Subnet.
+func (s *subnet) VLAN() VLAN {
+	if s.vlan == nil {
+		return nil
+	}
+	return s.vlan
+}
+
+// Gateway implements Subnet.
+func (s *subnet) Gateway() string {
+	return s.gateway
+}
+
+// CIDR implements Subnet.
+func (s *subnet) CIDR() string {
+	return s.cidr
+}
+
+// DNSServers implements Subnet.
+func (s *subnet) DNSServers() []string {
+	return s.dnsServers
+}
+
+func readSubnets(controllerVersion version.Number, source interface{}) ([]*subnet, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range subnetDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no subnet read func for version %s", controllerVersion)
+	}
+	readFunc := subnetDeserializationFuncs[deserialisationVersion]
+	return readSubnetList(valid, readFunc)
+}
+
+// readSubnetList expects the values of the sourceList to be string maps.
+func readSubnetList(sourceList []interface{}, readFunc subnetDeserializationFunc) ([]*subnet, error) {
+	result := make([]*subnet, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for subnet %d, %T", i, value)
+		}
+		subnet, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "subnet %d", i)
+		}
+		result = append(result, subnet)
+	}
+	return result, nil
+}
+
+type subnetDeserializationFunc func(map[string]interface{}) (*subnet, error)
+
+var subnetDeserializationFuncs = map[version.Number]subnetDeserializationFunc{
+	twoDotOh: subnet_2_0,
+}
+
+func subnet_2_0(source map[string]interface{}) (*subnet, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"space":        schema.String(),
+		"gateway_ip":   schema.OneOf(schema.Nil(""), schema.String()),
+		"cidr":         schema.String(),
+		"vlan":         schema.StringMap(schema.Any()),
+		"dns_servers":  schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlan, err := vlan_2_0(valid["vlan"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the gateway_ip is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	gateway, _ := valid["gateway_ip"].(string)
+
+	result := &subnet{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		space:       valid["space"].(string),
+		vlan:        vlan,
+		gateway:     gateway,
+		cidr:        valid["cidr"].(string),
+		dnsServers:  convertToStringSlice(valid["dns_servers"]),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testing.go b/automation/vendor/github.com/juju/gomaasapi/testing.go
new file mode 100644
index 0000000..54d67aa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testing.go
@@ -0,0 +1,222 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+)
+
+type singleServingServer struct {
+	*httptest.Server
+	requestContent *string
+	requestHeader  *http.Header
+}
+
+// newSingleServingServer creates a single-serving test http server which will
+// return only one response as defined by the passed arguments.
+func newSingleServingServer(uri string, response string, code int) *singleServingServer {
+	var requestContent string
+	var requestHeader http.Header
+	var requested bool
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		if requested {
+			http.Error(writer, "Already requested", http.StatusServiceUnavailable)
+		}
+		res, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requestContent = string(res)
+		requestHeader = request.Header
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else {
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, response)
+		}
+		requested = true
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &singleServingServer{server, &requestContent, &requestHeader}
+}
+
+type flakyServer struct {
+	*httptest.Server
+	nbRequests *int
+	requests   *[][]byte
+}
+
+// newFlakyServer creates a "flaky" test http server which will
+// return `nbFlakyResponses` responses with the given code and then a 200 response.
+func newFlakyServer(uri string, code int, nbFlakyResponses int) *flakyServer {
+	nbRequests := 0
+	requests := make([][]byte, nbFlakyResponses+1)
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		nbRequests += 1
+		body, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requests[nbRequests-1] = body
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else if nbRequests <= nbFlakyResponses {
+			if code == http.StatusServiceUnavailable {
+				writer.Header().Set("Retry-After", "0")
+			}
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, "flaky")
+		} else {
+			writer.WriteHeader(http.StatusOK)
+			fmt.Fprint(writer, "ok")
+		}
+
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &flakyServer{server, &nbRequests, &requests}
+}
+
+type simpleResponse struct {
+	status int
+	body   string
+}
+
+type SimpleTestServer struct {
+	*httptest.Server
+
+	getResponses        map[string][]simpleResponse
+	getResponseIndex    map[string]int
+	putResponses        map[string][]simpleResponse
+	putResponseIndex    map[string]int
+	postResponses       map[string][]simpleResponse
+	postResponseIndex   map[string]int
+	deleteResponses     map[string][]simpleResponse
+	deleteResponseIndex map[string]int
+
+	requests []*http.Request
+}
+
+func NewSimpleServer() *SimpleTestServer {
+	server := &SimpleTestServer{
+		getResponses:        make(map[string][]simpleResponse),
+		getResponseIndex:    make(map[string]int),
+		putResponses:        make(map[string][]simpleResponse),
+		putResponseIndex:    make(map[string]int),
+		postResponses:       make(map[string][]simpleResponse),
+		postResponseIndex:   make(map[string]int),
+		deleteResponses:     make(map[string][]simpleResponse),
+		deleteResponseIndex: make(map[string]int),
+	}
+	server.Server = httptest.NewUnstartedServer(http.HandlerFunc(server.handler))
+	return server
+}
+
+func (s *SimpleTestServer) AddGetResponse(path string, status int, body string) {
+	logger.Debugf("add get response for: %s, %d", path, status)
+	s.getResponses[path] = append(s.getResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPutResponse(path string, status int, body string) {
+	logger.Debugf("add put response for: %s, %d", path, status)
+	s.putResponses[path] = append(s.putResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPostResponse(path string, status int, body string) {
+	logger.Debugf("add post response for: %s, %d", path, status)
+	s.postResponses[path] = append(s.postResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddDeleteResponse(path string, status int, body string) {
+	logger.Debugf("add delete response for: %s, %d", path, status)
+	s.deleteResponses[path] = append(s.deleteResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) LastRequest() *http.Request {
+	pos := len(s.requests) - 1
+	if pos < 0 {
+		return nil
+	}
+	return s.requests[pos]
+}
+
+func (s *SimpleTestServer) LastNRequests(n int) []*http.Request {
+	start := len(s.requests) - n
+	if start < 0 {
+		start = 0
+	}
+	return s.requests[start:]
+}
+
+func (s *SimpleTestServer) RequestCount() int {
+	return len(s.requests)
+}
+
+func (s *SimpleTestServer) ResetRequests() {
+	s.requests = nil
+}
+
+func (s *SimpleTestServer) handler(writer http.ResponseWriter, request *http.Request) {
+	method := request.Method
+	var (
+		err           error
+		responses     map[string][]simpleResponse
+		responseIndex map[string]int
+	)
+	switch method {
+	case "GET":
+		responses = s.getResponses
+		responseIndex = s.getResponseIndex
+		_, err = readAndClose(request.Body)
+		if err != nil {
+			panic(err) // it is a test, panic should be fine
+		}
+	case "PUT":
+		responses = s.putResponses
+		responseIndex = s.putResponseIndex
+		err = request.ParseForm()
+		if err != nil {
+			panic(err)
+		}
+	case "POST":
+		responses = s.postResponses
+		responseIndex = s.postResponseIndex
+		contentType := request.Header.Get("Content-Type")
+		if strings.HasPrefix(contentType, "multipart/form-data;") {
+			err = request.ParseMultipartForm(2 << 20)
+		} else {
+			err = request.ParseForm()
+		}
+		if err != nil {
+			panic(err)
+		}
+	case "DELETE":
+		responses = s.deleteResponses
+		responseIndex = s.deleteResponseIndex
+		_, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+	default:
+		panic("unsupported method " + method)
+	}
+	s.requests = append(s.requests, request)
+	uri := request.URL.String()
+	testResponses, found := responses[uri]
+	if !found {
+		errorMsg := fmt.Sprintf("Error 404: page not found ('%v').", uri)
+		http.Error(writer, errorMsg, http.StatusNotFound)
+	} else {
+		index := responseIndex[uri]
+		response := testResponses[index]
+		responseIndex[uri] = index + 1
+
+		writer.WriteHeader(response.status)
+		fmt.Fprint(writer, response.body)
+	}
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice.go b/automation/vendor/github.com/juju/gomaasapi/testservice.go
new file mode 100644
index 0000000..aa582da
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice.go
@@ -0,0 +1,1672 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"mime/multipart"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"text/template"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// TestMAASObject is a fake MAAS server MAASObject.
+type TestMAASObject struct {
+	MAASObject
+	TestServer *TestServer
+}
+
+// checkError is a shorthand helper that panics if err is not nil.
+func checkError(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
+
+// NewTestMAAS returns a TestMAASObject that implements the MAASObject
+// interface and thus can be used as a test object instead of the one returned
+// by gomaasapi.NewMAAS().
+func NewTestMAAS(version string) *TestMAASObject {
+	server := NewTestServer(version)
+	authClient, err := NewAnonymousClient(server.URL, version)
+	checkError(err)
+	maas := NewMAAS(*authClient)
+	return &TestMAASObject{*maas, server}
+}
+
+// Close shuts down the test server.
+func (testMAASObject *TestMAASObject) Close() {
+	testMAASObject.TestServer.Close()
+}
+
+// A TestServer is an HTTP server listening on a system-chosen port on the
+// local loopback interface, which simulates the behavior of a MAAS server.
+// It is intendend for use in end-to-end HTTP tests using the gomaasapi
+// library.
+type TestServer struct {
+	*httptest.Server
+	serveMux   *http.ServeMux
+	client     Client
+	nodes      map[string]MAASObject
+	ownedNodes map[string]bool
+	// mapping system_id -> list of operations performed.
+	nodeOperations map[string][]string
+	// list of operations performed at the /nodes/ level.
+	nodesOperations []string
+	// mapping system_id -> list of Values passed when performing
+	// operations
+	nodeOperationRequestValues map[string][]url.Values
+	// list of Values passed when performing operations at the
+	// /nodes/ level.
+	nodesOperationRequestValues []url.Values
+	nodeMetadata                map[string]Node
+	files                       map[string]MAASObject
+	networks                    map[string]MAASObject
+	networksPerNode             map[string][]string
+	ipAddressesPerNetwork       map[string][]string
+	version                     string
+	macAddressesPerNetwork      map[string]map[string]JSONObject
+	nodeDetails                 map[string]string
+	zones                       map[string]JSONObject
+	// bootImages is a map of nodegroup UUIDs to boot-image objects.
+	bootImages map[string][]JSONObject
+	// nodegroupsInterfaces is a map of nodegroup UUIDs to interface
+	// objects.
+	nodegroupsInterfaces map[string][]JSONObject
+
+	// versionJSON is the response to the /version/ endpoint listing the
+	// capabilities of the MAAS server.
+	versionJSON string
+
+	// devices is a map of device UUIDs to devices.
+	devices map[string]*TestDevice
+
+	subnets        map[uint]TestSubnet
+	subnetNameToID map[string]uint
+	nextSubnet     uint
+	spaces         map[uint]*TestSpace
+	spaceNameToID  map[string]uint
+	nextSpace      uint
+	vlans          map[int]TestVLAN
+	nextVLAN       int
+}
+
+type TestDevice struct {
+	IPAddresses  []string
+	SystemId     string
+	MACAddresses []string
+	Parent       string
+	Hostname     string
+
+	// Not part of the device definition but used by the template.
+	APIVersion string
+}
+
+func getNodesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodes/", version)
+}
+
+func getNodeURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/", version, systemId)
+}
+
+func getNodeURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodes/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getDevicesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/devices/", version)
+}
+
+func getDeviceURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/devices/%s/", version, systemId)
+}
+
+func getDeviceURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/devices/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getFilesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/files/", version)
+}
+
+func getFileURL(version, filename string) string {
+	// Uses URL object so filename is correctly percent-escaped
+	url := url.URL{}
+	url.Path = fmt.Sprintf("/api/%s/files/%s/", version, filename)
+	return url.String()
+}
+
+func getFileURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/files/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getNetworksEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/networks/", version)
+}
+
+func getNetworkURL(version, name string) string {
+	return fmt.Sprintf("/api/%s/networks/%s/", version, name)
+}
+
+func getNetworkURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/networks/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getIPAddressesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/ipaddresses/", version)
+}
+
+func getMACAddressURL(version, systemId, macAddress string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/macs/%s/", version, systemId, url.QueryEscape(macAddress))
+}
+
+func getVersionURL(version string) string {
+	return fmt.Sprintf("/api/%s/version/", version)
+}
+
+func getNodegroupsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/", version)
+}
+
+func getNodegroupURL(version, uuid string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/%s/", version, uuid)
+}
+
+func getNodegroupsInterfacesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/interfaces/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getBootimagesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/boot-images/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getZonesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/zones/", version)
+}
+
+// Clear clears all the fake data stored and recorded by the test server
+// (nodes, recorded operations, etc.).
+func (server *TestServer) Clear() {
+	server.nodes = make(map[string]MAASObject)
+	server.ownedNodes = make(map[string]bool)
+	server.nodesOperations = make([]string, 0)
+	server.nodeOperations = make(map[string][]string)
+	server.nodesOperationRequestValues = make([]url.Values, 0)
+	server.nodeOperationRequestValues = make(map[string][]url.Values)
+	server.nodeMetadata = make(map[string]Node)
+	server.files = make(map[string]MAASObject)
+	server.networks = make(map[string]MAASObject)
+	server.networksPerNode = make(map[string][]string)
+	server.ipAddressesPerNetwork = make(map[string][]string)
+	server.macAddressesPerNetwork = make(map[string]map[string]JSONObject)
+	server.nodeDetails = make(map[string]string)
+	server.bootImages = make(map[string][]JSONObject)
+	server.nodegroupsInterfaces = make(map[string][]JSONObject)
+	server.zones = make(map[string]JSONObject)
+	server.versionJSON = `{"capabilities": ["networks-management","static-ipaddresses","devices-management","network-deployment-ubuntu"]}`
+	server.devices = make(map[string]*TestDevice)
+	server.subnets = make(map[uint]TestSubnet)
+	server.subnetNameToID = make(map[string]uint)
+	server.nextSubnet = 1
+	server.spaces = make(map[uint]*TestSpace)
+	server.spaceNameToID = make(map[string]uint)
+	server.nextSpace = 1
+	server.vlans = make(map[int]TestVLAN)
+	server.nextVLAN = 1
+}
+
+// SetVersionJSON sets the JSON response (capabilities) returned from the
+// /version/ endpoint.
+func (server *TestServer) SetVersionJSON(json string) {
+	server.versionJSON = json
+}
+
+// NodesOperations returns the list of operations performed at the /nodes/
+// level.
+func (server *TestServer) NodesOperations() []string {
+	return server.nodesOperations
+}
+
+// NodeOperations returns the map containing the list of the operations
+// performed for each node.
+func (server *TestServer) NodeOperations() map[string][]string {
+	return server.nodeOperations
+}
+
+// NodesOperationRequestValues returns the list of url.Values extracted
+// from the request used when performing operations at the /nodes/ level.
+func (server *TestServer) NodesOperationRequestValues() []url.Values {
+	return server.nodesOperationRequestValues
+}
+
+// NodeOperationRequestValues returns the map containing the list of the
+// url.Values extracted from the request used when performing operations
+// on nodes.
+func (server *TestServer) NodeOperationRequestValues() map[string][]url.Values {
+	return server.nodeOperationRequestValues
+}
+
+func parseRequestValues(request *http.Request) url.Values {
+	var requestValues url.Values
+	if request.Header.Get("Content-Type") == "application/x-www-form-urlencoded" {
+		if request.PostForm == nil {
+			if err := request.ParseForm(); err != nil {
+				panic(err)
+			}
+		}
+		requestValues = request.PostForm
+	}
+	return requestValues
+}
+
+func (server *TestServer) addNodesOperation(operation string, request *http.Request) url.Values {
+	requestValues := parseRequestValues(request)
+	server.nodesOperations = append(server.nodesOperations, operation)
+	server.nodesOperationRequestValues = append(server.nodesOperationRequestValues, requestValues)
+	return requestValues
+}
+
+func (server *TestServer) addNodeOperation(systemId, operation string, request *http.Request) url.Values {
+	operations, present := server.nodeOperations[systemId]
+	operationRequestValues, present2 := server.nodeOperationRequestValues[systemId]
+	if present != present2 {
+		panic("inconsistent state: nodeOperations and nodeOperationRequestValues don't have the same keys.")
+	}
+	requestValues := parseRequestValues(request)
+	if !present {
+		operations = []string{operation}
+		operationRequestValues = []url.Values{requestValues}
+	} else {
+		operations = append(operations, operation)
+		operationRequestValues = append(operationRequestValues, requestValues)
+	}
+	server.nodeOperations[systemId] = operations
+	server.nodeOperationRequestValues[systemId] = operationRequestValues
+	return requestValues
+}
+
+// NewNode creates a MAAS node.  The provided string should be a valid json
+// string representing a map and contain a string value for the key
+// 'system_id'.  e.g. `{"system_id": "mysystemid"}`.
+// If one of these conditions is not met, NewNode panics.
+func (server *TestServer) NewNode(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	systemIdEntry, hasSystemId := attrs["system_id"]
+	if !hasSystemId {
+		panic("The given map json string does not contain a 'system_id' value.")
+	}
+	systemId := systemIdEntry.(string)
+	attrs[resourceURI] = getNodeURL(server.version, systemId)
+	if _, hasStatus := attrs["status"]; !hasStatus {
+		attrs["status"] = NodeStatusDeployed
+	}
+	obj := newJSONMAASObject(attrs, server.client)
+	server.nodes[systemId] = obj
+	return obj
+}
+
+// Nodes returns a map associating all the nodes' system ids with the nodes'
+// objects.
+func (server *TestServer) Nodes() map[string]MAASObject {
+	return server.nodes
+}
+
+// OwnedNodes returns a map whose keys represent the nodes that are currently
+// allocated.
+func (server *TestServer) OwnedNodes() map[string]bool {
+	return server.ownedNodes
+}
+
+// NewFile creates a file in the test MAAS server.
+func (server *TestServer) NewFile(filename string, filecontent []byte) MAASObject {
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getFileURL(server.version, filename)
+	base64Content := base64.StdEncoding.EncodeToString(filecontent)
+	attrs["content"] = base64Content
+	attrs["filename"] = filename
+
+	// Allocate an arbitrary URL here.  It would be nice if the caller
+	// could do this, but that would change the API and require many
+	// changes.
+	escapedName := url.QueryEscape(filename)
+	attrs["anon_resource_uri"] = "/maas/1.0/files/?op=get_by_key&key=" + escapedName + "_key"
+
+	obj := newJSONMAASObject(attrs, server.client)
+	server.files[filename] = obj
+	return obj
+}
+
+func (server *TestServer) Files() map[string]MAASObject {
+	return server.files
+}
+
+// ChangeNode updates a node with the given key/value.
+func (server *TestServer) ChangeNode(systemId, key, value string) {
+	node, found := server.nodes[systemId]
+	if !found {
+		panic("No node with such 'system_id'.")
+	}
+	node.GetMap()[key] = maasify(server.client, value)
+}
+
+// NewIPAddress creates a new static IP address reservation for the
+// given network/subnet and ipAddress. While networks is being deprecated
+// try the given name as both a netowrk and a subnet.
+func (server *TestServer) NewIPAddress(ipAddress, networkOrSubnet string) {
+	_, foundNetwork := server.networks[networkOrSubnet]
+	subnetID, foundSubnet := server.subnetNameToID[networkOrSubnet]
+
+	if (foundNetwork || foundSubnet) == false {
+		panic("No such network or subnet: " + networkOrSubnet)
+	}
+	if foundNetwork {
+		ips, found := server.ipAddressesPerNetwork[networkOrSubnet]
+		if found {
+			ips = append(ips, ipAddress)
+		} else {
+			ips = []string{ipAddress}
+		}
+		server.ipAddressesPerNetwork[networkOrSubnet] = ips
+	} else {
+		subnet := server.subnets[subnetID]
+		netIp := net.ParseIP(ipAddress)
+		if netIp == nil {
+			panic(ipAddress + " is invalid")
+		}
+		ip := IPFromNetIP(netIp)
+		ip.Purpose = []string{"assigned-ip"}
+		subnet.InUseIPAddresses = append(subnet.InUseIPAddresses, ip)
+		server.subnets[subnetID] = subnet
+	}
+}
+
+// RemoveIPAddress removes the given existing ipAddress and returns
+// whether it was actually removed.
+func (server *TestServer) RemoveIPAddress(ipAddress string) bool {
+	for network, ips := range server.ipAddressesPerNetwork {
+		for i, ip := range ips {
+			if ip == ipAddress {
+				ips = append(ips[:i], ips[i+1:]...)
+				server.ipAddressesPerNetwork[network] = ips
+				return true
+			}
+		}
+	}
+	for _, device := range server.devices {
+		for i, addr := range device.IPAddresses {
+			if addr == ipAddress {
+				device.IPAddresses = append(device.IPAddresses[:i], device.IPAddresses[i+1:]...)
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IPAddresses returns the map with network names as keys and slices
+// of IP addresses belonging to each network as values.
+func (server *TestServer) IPAddresses() map[string][]string {
+	return server.ipAddressesPerNetwork
+}
+
+// NewNetwork creates a network in the test MAAS server
+func (server *TestServer) NewNetwork(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	nameEntry, hasName := attrs["name"]
+	_, hasIP := attrs["ip"]
+	_, hasNetmask := attrs["netmask"]
+	if !hasName || !hasIP || !hasNetmask {
+		panic("The given map json string does not contain a 'name', 'ip', or 'netmask' value.")
+	}
+	// TODO(gz): Sanity checking done on other fields
+	name := nameEntry.(string)
+	attrs[resourceURI] = getNetworkURL(server.version, name)
+	obj := newJSONMAASObject(attrs, server.client)
+	server.networks[name] = obj
+	return obj
+}
+
+// NewNodegroupInterface adds a nodegroup-interface, for the specified
+// nodegroup,  in the test MAAS server.
+func (server *TestServer) NewNodegroupInterface(uuid, jsonText string) JSONObject {
+	_, ok := server.bootImages[uuid]
+	if !ok {
+		panic("no nodegroup with the given UUID")
+	}
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	requiredMembers := []string{"ip_range_high", "ip_range_low", "broadcast_ip", "static_ip_range_low", "static_ip_range_high", "name", "ip", "subnet_mask", "management", "interface"}
+	for _, member := range requiredMembers {
+		_, hasMember := attrs[member]
+		if !hasMember {
+			panic(fmt.Sprintf("The given map json string does not contain a required %q", member))
+		}
+	}
+	obj := maasify(server.client, attrs)
+	server.nodegroupsInterfaces[uuid] = append(server.nodegroupsInterfaces[uuid], obj)
+	return obj
+}
+
+func (server *TestServer) ConnectNodeToNetwork(systemId, name string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	_, hasNetwork := server.networks[name]
+	if !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, name)
+}
+
+func (server *TestServer) ConnectNodeToNetworkWithMACAddress(systemId, networkName, macAddress string) {
+	node, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	if _, hasNetwork := server.networks[networkName]; !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, networkName)
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getMACAddressURL(server.version, systemId, macAddress)
+	attrs["mac_address"] = macAddress
+	array := []JSONObject{}
+	if set, ok := node.GetMap()["macaddress_set"]; ok {
+		var err error
+		array, err = set.GetArray()
+		if err != nil {
+			panic(err)
+		}
+	}
+	array = append(array, maasify(server.client, attrs))
+	node.GetMap()["macaddress_set"] = JSONObject{value: array, client: server.client}
+	if _, ok := server.macAddressesPerNetwork[networkName]; !ok {
+		server.macAddressesPerNetwork[networkName] = map[string]JSONObject{}
+	}
+	server.macAddressesPerNetwork[networkName][systemId] = maasify(server.client, attrs)
+}
+
+// AddBootImage adds a boot-image object to the specified nodegroup.
+func (server *TestServer) AddBootImage(nodegroupUUID string, jsonText string) {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	if _, ok := attrs["architecture"]; !ok {
+		panic("The boot-image json string does not contain an 'architecture' value.")
+	}
+	if _, ok := attrs["release"]; !ok {
+		panic("The boot-image json string does not contain a 'release' value.")
+	}
+	obj := maasify(server.client, attrs)
+	server.bootImages[nodegroupUUID] = append(server.bootImages[nodegroupUUID], obj)
+}
+
+// AddZone adds a physical zone to the server.
+func (server *TestServer) AddZone(name, description string) {
+	attrs := map[string]interface{}{
+		"name":        name,
+		"description": description,
+	}
+	obj := maasify(server.client, attrs)
+	server.zones[name] = obj
+}
+
+func (server *TestServer) AddDevice(device *TestDevice) {
+	server.devices[device.SystemId] = device
+}
+
+func (server *TestServer) Devices() map[string]*TestDevice {
+	return server.devices
+}
+
+// NewTestServer starts and returns a new MAAS test server. The caller should call Close when finished, to shut it down.
+func NewTestServer(version string) *TestServer {
+	server := &TestServer{version: version}
+
+	serveMux := http.NewServeMux()
+	devicesURL := getDevicesEndpoint(server.version)
+	// Register handler for '/api/<version>/devices/*'.
+	serveMux.HandleFunc(devicesURL, func(w http.ResponseWriter, r *http.Request) {
+		devicesHandler(server, w, r)
+	})
+	nodesURL := getNodesEndpoint(server.version)
+	// Register handler for '/api/<version>/nodes/*'.
+	serveMux.HandleFunc(nodesURL, func(w http.ResponseWriter, r *http.Request) {
+		nodesHandler(server, w, r)
+	})
+	filesURL := getFilesEndpoint(server.version)
+	// Register handler for '/api/<version>/files/*'.
+	serveMux.HandleFunc(filesURL, func(w http.ResponseWriter, r *http.Request) {
+		filesHandler(server, w, r)
+	})
+	networksURL := getNetworksEndpoint(server.version)
+	// Register handler for '/api/<version>/networks/'.
+	serveMux.HandleFunc(networksURL, func(w http.ResponseWriter, r *http.Request) {
+		networksHandler(server, w, r)
+	})
+	ipAddressesURL := getIPAddressesEndpoint(server.version)
+	// Register handler for '/api/<version>/ipaddresses/'.
+	serveMux.HandleFunc(ipAddressesURL, func(w http.ResponseWriter, r *http.Request) {
+		ipAddressesHandler(server, w, r)
+	})
+	versionURL := getVersionURL(server.version)
+	// Register handler for '/api/<version>/version/'.
+	serveMux.HandleFunc(versionURL, func(w http.ResponseWriter, r *http.Request) {
+		versionHandler(server, w, r)
+	})
+	// Register handler for '/api/<version>/nodegroups/*'.
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	serveMux.HandleFunc(nodegroupsURL, func(w http.ResponseWriter, r *http.Request) {
+		nodegroupsHandler(server, w, r)
+	})
+
+	// Register handler for '/api/<version>/zones/*'.
+	zonesURL := getZonesEndpoint(server.version)
+	serveMux.HandleFunc(zonesURL, func(w http.ResponseWriter, r *http.Request) {
+		zonesHandler(server, w, r)
+	})
+
+	subnetsURL := getSubnetsEndpoint(server.version)
+	serveMux.HandleFunc(subnetsURL, func(w http.ResponseWriter, r *http.Request) {
+		subnetsHandler(server, w, r)
+	})
+
+	spacesURL := getSpacesEndpoint(server.version)
+	serveMux.HandleFunc(spacesURL, func(w http.ResponseWriter, r *http.Request) {
+		spacesHandler(server, w, r)
+	})
+
+	vlansURL := getVLANsEndpoint(server.version)
+	serveMux.HandleFunc(vlansURL, func(w http.ResponseWriter, r *http.Request) {
+		vlansHandler(server, w, r)
+	})
+
+	var mu sync.Mutex
+	singleFile := func(w http.ResponseWriter, req *http.Request) {
+		mu.Lock()
+		defer mu.Unlock()
+		serveMux.ServeHTTP(w, req)
+	}
+
+	newServer := httptest.NewServer(http.HandlerFunc(singleFile))
+	client, err := NewAnonymousClient(newServer.URL, "1.0")
+	checkError(err)
+	server.Server = newServer
+	server.serveMux = serveMux
+	server.client = *client
+	server.Clear()
+	return server
+}
+
+// devicesHandler handles requests for '/api/<version>/devices/*'.
+func devicesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	deviceURLRE := getDeviceURLRE(server.version)
+	deviceURLMatch := deviceURLRE.FindStringSubmatch(r.URL.Path)
+	devicesURL := getDevicesEndpoint(server.version)
+	switch {
+	case r.URL.Path == devicesURL:
+		devicesTopLevelHandler(server, w, r, op)
+	case deviceURLMatch != nil:
+		// Request for a single device.
+		deviceHandler(server, w, r, deviceURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// devicesTopLevelHandler handles a request for /api/<version>/devices/
+// (with no device id following as part of the path).
+func devicesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Device listing operation.
+		deviceListingHandler(server, w, r)
+	case r.Method == "POST" && op == "new":
+		newDeviceHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+func macMatches(mac string, device *TestDevice) bool {
+	return contains(device.MACAddresses, mac)
+}
+
+// deviceListingHandler handles requests for '/devices/'.
+func deviceListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	// TODO(mfoord): support filtering by hostname and id
+	macs, hasMac := values["mac_address"]
+	var matchedDevices []*TestDevice
+	if !hasMac {
+		for _, device := range server.devices {
+			matchedDevices = append(matchedDevices, device)
+		}
+	} else {
+		for _, mac := range macs {
+			for _, device := range server.devices {
+				if macMatches(mac, device) {
+					matchedDevices = append(matchedDevices, device)
+				}
+			}
+		}
+	}
+	deviceChunks := make([]string, len(matchedDevices))
+	for i := range matchedDevices {
+		deviceChunks[i] = renderDevice(matchedDevices[i])
+	}
+	json := fmt.Sprintf("[%v]", strings.Join(deviceChunks, ", "))
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, json)
+}
+
+var templateFuncs = template.FuncMap{
+	"quotedList": func(items []string) string {
+		var pieces []string
+		for _, item := range items {
+			pieces = append(pieces, fmt.Sprintf("%q", item))
+		}
+		return strings.Join(pieces, ", ")
+	},
+	"last": func(items []string) []string {
+		if len(items) == 0 {
+			return []string{}
+		}
+		return items[len(items)-1:]
+	},
+	"allButLast": func(items []string) []string {
+		if len(items) < 2 {
+			return []string{}
+		}
+		return items[0 : len(items)-1]
+	},
+}
+
+const (
+	// The json template for generating new devices.
+	// TODO(mfoord): set resource_uri in MAC addresses
+	deviceTemplate = `{
+	"macaddress_set": [{{range .MACAddresses | allButLast}}
+	    {
+		"mac_address": "{{.}}"
+	    },{{end}}{{range .MACAddresses | last}}
+	    {
+		"mac_address": "{{.}}"
+	    }{{end}}
+	],
+	"zone": {
+	    "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/",
+	    "name": "default",
+	    "description": ""
+	},
+	"parent": "{{.Parent}}",
+	"ip_addresses": [{{.IPAddresses | quotedList }}],
+	"hostname": "{{.Hostname}}",
+	"tag_names": [],
+	"owner": "maas-admin",
+	"system_id": "{{.SystemId}}",
+	"resource_uri": "/MAAS/api/{{.APIVersion}}/devices/{{.SystemId}}/"
+}`
+)
+
+func renderDevice(device *TestDevice) string {
+	t := template.New("Device template")
+	t = t.Funcs(templateFuncs)
+	t, err := t.Parse(deviceTemplate)
+	checkError(err)
+	var buf bytes.Buffer
+	err = t.Execute(&buf, device)
+	checkError(err)
+	return buf.String()
+}
+
+func getValue(values url.Values, value string) (string, bool) {
+	result, hasResult := values[value]
+	if !hasResult || len(result) != 1 || result[0] == "" {
+		return "", false
+	}
+	return result[0], true
+}
+
+func getValues(values url.Values, key string) ([]string, bool) {
+	result, hasResult := values[key]
+	if !hasResult {
+		return nil, false
+	}
+	var output []string
+	for _, val := range result {
+		if val != "" {
+			output = append(output, val)
+		}
+	}
+	if len(output) == 0 {
+		return nil, false
+	}
+	return output, true
+}
+
+// newDeviceHandler creates, stores and returns new devices.
+func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.PostForm
+
+	// TODO(mfood): generate a "proper" uuid for the system Id.
+	uuid, err := generateNonce()
+	checkError(err)
+	systemId := fmt.Sprintf("node-%v", uuid)
+	// At least one MAC address must be specified.
+	// TODO(mfoord) we only support a single MAC in the test server.
+	macs, hasMacs := getValues(values, "mac_addresses")
+
+	// hostname and parent are optional.
+	// TODO(mfoord): we require both to be set in the test server.
+	hostname, hasHostname := getValue(values, "hostname")
+	parent, hasParent := getValue(values, "parent")
+	if !hasHostname || !hasMacs || !hasParent {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	device := &TestDevice{
+		MACAddresses: macs,
+		APIVersion:   server.version,
+		Parent:       parent,
+		Hostname:     hostname,
+		SystemId:     systemId,
+	}
+
+	deviceJSON := renderDevice(device)
+	server.devices[systemId] = device
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, deviceJSON)
+	return
+}
+
+// deviceHandler handles requests for '/api/<version>/devices/<system_id>/'.
+func deviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	device, ok := server.devices[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if r.Method == "GET" {
+		deviceJSON := renderDevice(device)
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		if operation == "claim_sticky_ip_address" {
+			err := r.ParseForm()
+			checkError(err)
+			values := r.PostForm
+			// TODO(mfoord): support optional mac_address parameter
+			// TODO(mfoord): requested_address should be optional
+			// and we should generate one if it isn't provided.
+			address, hasAddress := getValue(values, "requested_address")
+			if !hasAddress {
+				w.WriteHeader(http.StatusBadRequest)
+				return
+			}
+			checkError(err)
+			device.IPAddresses = append(device.IPAddresses, address)
+			deviceJSON := renderDevice(device)
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	} else if r.Method == "DELETE" {
+		delete(server.devices, systemId)
+		w.WriteHeader(http.StatusNoContent)
+		return
+
+	}
+
+	// TODO(mfoord): support PUT method for updating device
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// nodesHandler handles requests for '/api/<version>/nodes/*'.
+func nodesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	nodeURLRE := getNodeURLRE(server.version)
+	nodeURLMatch := nodeURLRE.FindStringSubmatch(r.URL.Path)
+	nodesURL := getNodesEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodesURL:
+		nodesTopLevelHandler(server, w, r, op)
+	case nodeURLMatch != nil:
+		// Request for a single node.
+		nodeHandler(server, w, r, nodeURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodeHandler handles requests for '/api/<version>/nodes/<system_id>/'.
+func nodeHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	node, ok := server.nodes[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	UUID, UUIDError := node.values["system_id"].GetString()
+	if UUIDError == nil {
+		i, err := JSONObjectFromStruct(server.client, server.nodeMetadata[UUID].Interfaces)
+		checkError(err)
+		node.values["interface_set"] = i
+	}
+
+	if r.Method == "GET" {
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		} else if operation == "details" {
+			nodeDetailsHandler(server, w, r, systemId)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		// The only operations supported are "start", "stop" and "release".
+		if operation == "start" || operation == "stop" || operation == "release" {
+			// Record operation on node.
+			server.addNodeOperation(systemId, operation, r)
+
+			if operation == "release" {
+				delete(server.OwnedNodes(), systemId)
+			}
+
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		}
+
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	if r.Method == "DELETE" {
+		delete(server.nodes, systemId)
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func contains(slice []string, val string) bool {
+	for _, item := range slice {
+		if item == val {
+			return true
+		}
+	}
+	return false
+}
+
+// nodeListingHandler handles requests for '/nodes/'.
+func nodeListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	ids, hasId := values["id"]
+	var convertedNodes = []map[string]JSONObject{}
+	for systemId, node := range server.nodes {
+		if !hasId || contains(ids, systemId) {
+			convertedNodes = append(convertedNodes, node.GetMap())
+		}
+	}
+	res, err := json.MarshalIndent(convertedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodeDeploymentStatusHandler handles requests for '/nodes/?op=deployment_status'.
+func nodeDeploymentStatusHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	nodes, _ := values["nodes"]
+	var nodeStatus = make(map[string]interface{})
+	for _, systemId := range nodes {
+		node := server.nodes[systemId]
+		field, err := node.GetField("status")
+		if err != nil {
+			continue
+		}
+		switch field {
+		case NodeStatusDeployed:
+			nodeStatus[systemId] = "Deployed"
+		case NodeStatusFailedDeployment:
+			nodeStatus[systemId] = "Failed deployment"
+		default:
+			nodeStatus[systemId] = "Not in Deployment"
+		}
+	}
+	obj := maasify(server.client, nodeStatus)
+	res, err := json.MarshalIndent(obj, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// findFreeNode looks for a node that is currently available, and
+// matches the specified filter.
+func findFreeNode(server *TestServer, filter url.Values) *MAASObject {
+	for systemID, node := range server.Nodes() {
+		_, present := server.OwnedNodes()[systemID]
+		if !present {
+			var agentName, nodeName, zoneName, mem, cpuCores, arch string
+			for k := range filter {
+				switch k {
+				case "agent_name":
+					agentName = filter.Get(k)
+				case "name":
+					nodeName = filter.Get(k)
+				case "zone":
+					zoneName = filter.Get(k)
+				case "mem":
+					mem = filter.Get(k)
+				case "arch":
+					arch = filter.Get(k)
+				case "cpu-cores":
+					cpuCores = filter.Get(k)
+				}
+			}
+			if nodeName != "" && !matchField(node, "hostname", nodeName) {
+				continue
+			}
+			if zoneName != "" && !matchField(node, "zone", zoneName) {
+				continue
+			}
+			if mem != "" && !matchNumericField(node, "memory", mem) {
+				continue
+			}
+			if arch != "" && !matchArchitecture(node, "architecture", arch) {
+				continue
+			}
+			if cpuCores != "" && !matchNumericField(node, "cpu_count", cpuCores) {
+				continue
+			}
+			if agentName != "" {
+				agentNameObj := maasify(server.client, agentName)
+				node.GetMap()["agent_name"] = agentNameObj
+			} else {
+				delete(node.GetMap(), "agent_name")
+			}
+			return &node
+		}
+	}
+	return nil
+}
+
+func matchArchitecture(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	baseArch := strings.Split(field, "/")
+	return v == baseArch[0]
+}
+
+func matchNumericField(node MAASObject, k, v string) bool {
+	field, ok := node.GetMap()[k]
+	if !ok {
+		return false
+	}
+	nodeVal, err := field.GetFloat64()
+	if err != nil {
+		return false
+	}
+	constraintVal, err := strconv.ParseFloat(v, 64)
+	if err != nil {
+		return false
+	}
+	return constraintVal <= nodeVal
+}
+
+func matchField(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	return field == v
+}
+
+// nodesAcquireHandler simulates acquiring a node.
+func nodesAcquireHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	requestValues := server.addNodesOperation("acquire", r)
+	node := findFreeNode(server, requestValues)
+	if node == nil {
+		w.WriteHeader(http.StatusConflict)
+	} else {
+		systemId, err := node.GetField("system_id")
+		checkError(err)
+		server.OwnedNodes()[systemId] = true
+		res, err := json.MarshalIndent(node, "", "  ")
+		checkError(err)
+		// Record operation.
+		server.addNodeOperation(systemId, "acquire", r)
+		w.WriteHeader(http.StatusOK)
+		fmt.Fprint(w, string(res))
+	}
+}
+
+// nodesReleaseHandler simulates releasing multiple nodes.
+func nodesReleaseHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	server.addNodesOperation("release", r)
+	values := server.NodesOperationRequestValues()
+	systemIds := values[len(values)-1]["nodes"]
+	var unknown []string
+	for _, systemId := range systemIds {
+		if _, ok := server.Nodes()[systemId]; !ok {
+			unknown = append(unknown, systemId)
+		}
+	}
+	if len(unknown) > 0 {
+		w.WriteHeader(http.StatusBadRequest)
+		fmt.Fprintf(w, "Unknown node(s): %s.", strings.Join(unknown, ", "))
+		return
+	}
+	var releasedNodes = []map[string]JSONObject{}
+	for _, systemId := range systemIds {
+		if _, ok := server.OwnedNodes()[systemId]; !ok {
+			continue
+		}
+		delete(server.OwnedNodes(), systemId)
+		node := server.Nodes()[systemId]
+		releasedNodes = append(releasedNodes, node.GetMap())
+	}
+	res, err := json.MarshalIndent(releasedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodesTopLevelHandler handles a request for /api/<version>/nodes/
+// (with no node id following as part of the path).
+func nodesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Node listing operation.
+		nodeListingHandler(server, w, r)
+	case r.Method == "GET" && op == "deployment_status":
+		// Node deployment_status operation.
+		nodeDeploymentStatusHandler(server, w, r)
+	case r.Method == "POST" && op == "acquire":
+		nodesAcquireHandler(server, w, r)
+	case r.Method == "POST" && op == "release":
+		nodesReleaseHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// AddNodeDetails stores node details, expected in XML format.
+func (server *TestServer) AddNodeDetails(systemId, xmlText string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	server.nodeDetails[systemId] = xmlText
+}
+
+const lldpXML = `
+<?xml version="1.0" encoding="UTF-8"?>
+<lldp label="LLDP neighbors"/>`
+
+// nodeDetailesHandler handles requests for '/api/<version>/nodes/<system_id>/?op=details'.
+func nodeDetailsHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string) {
+	attrs := make(map[string]interface{})
+	attrs["lldp"] = lldpXML
+	xmlText, _ := server.nodeDetails[systemId]
+	attrs["lshw"] = []byte(xmlText)
+	res, err := bson.Marshal(attrs)
+	checkError(err)
+	w.Header().Set("Content-Type", "application/bson")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// filesHandler handles requests for '/api/<version>/files/*'.
+func filesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	fileURLRE := getFileURLRE(server.version)
+	fileURLMatch := fileURLRE.FindStringSubmatch(r.URL.Path)
+	fileListingURL := getFilesEndpoint(server.version)
+	switch {
+	case r.Method == "GET" && op == "list" && r.URL.Path == fileListingURL:
+		// File listing operation.
+		fileListingHandler(server, w, r)
+	case op == "get" && r.Method == "GET" && r.URL.Path == fileListingURL:
+		getFileHandler(server, w, r)
+	case op == "add" && r.Method == "POST" && r.URL.Path == fileListingURL:
+		addFileHandler(server, w, r)
+	case fileURLMatch != nil:
+		// Request for a single file.
+		fileHandler(server, w, r, fileURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+
+}
+
+// listFilenames returns the names of those uploaded files whose names start
+// with the given prefix, sorted lexicographically.
+func listFilenames(server *TestServer, prefix string) []string {
+	var filenames = make([]string, 0)
+	for filename := range server.files {
+		if strings.HasPrefix(filename, prefix) {
+			filenames = append(filenames, filename)
+		}
+	}
+	sort.Strings(filenames)
+	return filenames
+}
+
+// stripFileContent copies a map of attributes representing an uploaded file,
+// but with the "content" attribute removed.
+func stripContent(original map[string]JSONObject) map[string]JSONObject {
+	newMap := make(map[string]JSONObject, len(original)-1)
+	for key, value := range original {
+		if key != "content" {
+			newMap[key] = value
+		}
+	}
+	return newMap
+}
+
+// fileListingHandler handles requests for '/api/<version>/files/?op=list'.
+func fileListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	prefix := values.Get("prefix")
+	filenames := listFilenames(server, prefix)
+
+	// Build a sorted list of the files as map[string]JSONObject objects.
+	convertedFiles := make([]map[string]JSONObject, 0)
+	for _, filename := range filenames {
+		// The "content" attribute is not in the listing.
+		fileMap := stripContent(server.files[filename].GetMap())
+		convertedFiles = append(convertedFiles, fileMap)
+	}
+	res, err := json.MarshalIndent(convertedFiles, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// fileHandler handles requests for '/api/<version>/files/<filename>/'.
+func fileHandler(server *TestServer, w http.ResponseWriter, r *http.Request, filename string, operation string) {
+	switch {
+	case r.Method == "DELETE":
+		delete(server.files, filename)
+		w.WriteHeader(http.StatusOK)
+	case r.Method == "GET":
+		// Retrieve a file's information (including content) as a JSON
+		// object.
+		file, ok := server.files[filename]
+		if !ok {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+		jsonText, err := json.MarshalIndent(file, "", "  ")
+		if err != nil {
+			panic(err)
+		}
+		w.WriteHeader(http.StatusOK)
+		w.Write(jsonText)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// InternalError replies to the request with an HTTP 500 internal error.
+func InternalError(w http.ResponseWriter, r *http.Request, err error) {
+	http.Error(w, err.Error(), http.StatusInternalServerError)
+}
+
+// getFileHandler handles requests for
+// '/api/<version>/files/?op=get&filename=filename'.
+func getFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	filename := values.Get("filename")
+	file, found := server.files[filename]
+	if !found {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	base64Content, err := file.GetField("content")
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	content, err := base64.StdEncoding.DecodeString(base64Content)
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	w.Write(content)
+}
+
+func readMultipart(upload *multipart.FileHeader) ([]byte, error) {
+	file, err := upload.Open()
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	reader := bufio.NewReader(file)
+	return ioutil.ReadAll(reader)
+}
+
+// filesHandler handles requests for '/api/<version>/files/?op=add&filename=filename'.
+func addFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseMultipartForm(10000000)
+	checkError(err)
+
+	filename := r.Form.Get("filename")
+	if filename == "" {
+		panic("upload has no filename")
+	}
+
+	uploads := r.MultipartForm.File
+	if len(uploads) != 1 {
+		panic("the payload should contain one file and one file only")
+	}
+	var upload *multipart.FileHeader
+	for _, uploadContent := range uploads {
+		upload = uploadContent[0]
+	}
+	content, err := readMultipart(upload)
+	checkError(err)
+	server.NewFile(filename, content)
+	w.WriteHeader(http.StatusOK)
+}
+
+// networkListConnectedMACSHandler handles requests for '/api/<version>/networks/<network>/?op=list_connected_macs'
+func networkListConnectedMACSHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	networkURLRE := getNetworkURLRE(server.version)
+	networkURLREMatch := networkURLRE.FindStringSubmatch(r.URL.Path)
+	if networkURLREMatch == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	networkName := networkURLREMatch[1]
+	convertedMacAddresses := []map[string]JSONObject{}
+	if macAddresses, ok := server.macAddressesPerNetwork[networkName]; ok {
+		for _, macAddress := range macAddresses {
+			m, err := macAddress.GetMap()
+			checkError(err)
+			convertedMacAddresses = append(convertedMacAddresses, m)
+		}
+	}
+	res, err := json.MarshalIndent(convertedMacAddresses, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// networksHandler handles requests for '/api/<version>/networks/?node=system_id'.
+func networksHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only networks GET operation implemented")
+	}
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	systemId := values.Get("node")
+	if op == "list_connected_macs" {
+		networkListConnectedMACSHandler(server, w, r)
+		return
+	}
+	if op != "" {
+		panic("only list_connected_macs and default operations implemented")
+	}
+	if systemId == "" {
+		panic("network missing associated node system id")
+	}
+	networks := []MAASObject{}
+	if networkNames, hasNetworks := server.networksPerNode[systemId]; hasNetworks {
+		networks = make([]MAASObject, len(networkNames))
+		for i, networkName := range networkNames {
+			networks[i] = server.networks[networkName]
+		}
+	}
+	res, err := json.MarshalIndent(networks, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// ipAddressesHandler handles requests for '/api/<version>/ipaddresses/'.
+func ipAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.Form
+	op := values.Get("op")
+
+	switch r.Method {
+	case "GET":
+		if op != "" {
+			panic("expected empty op for GET, got " + op)
+		}
+		listIPAddressesHandler(server, w, r)
+		return
+	case "POST":
+		switch op {
+		case "reserve":
+			reserveIPAddressHandler(server, w, r, values.Get("network"), values.Get("requested_address"))
+			return
+		case "release":
+			releaseIPAddressHandler(server, w, r, values.Get("ip"))
+			return
+		default:
+			panic("expected op=release|reserve for POST, got " + op)
+		}
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func marshalIPAddress(server *TestServer, ipAddress string) (JSONObject, error) {
+	jsonTemplate := `{"alloc_type": 4, "ip": %q, "resource_uri": %q, "created": %q}`
+	uri := getIPAddressesEndpoint(server.version)
+	now := time.Now().UTC().Format(time.RFC3339)
+	bytes := []byte(fmt.Sprintf(jsonTemplate, ipAddress, uri, now))
+	return Parse(server.client, bytes)
+}
+
+func badRequestError(w http.ResponseWriter, err error) {
+	w.WriteHeader(http.StatusBadRequest)
+	fmt.Fprint(w, err.Error())
+}
+
+func listIPAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	results := []MAASObject{}
+	for _, ips := range server.IPAddresses() {
+		for _, ip := range ips {
+			jsonObj, err := marshalIPAddress(server, ip)
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			maasObj, err := jsonObj.GetMAASObject()
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			results = append(results, maasObj)
+		}
+	}
+	res, err := json.MarshalIndent(results, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func reserveIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, network, reqAddress string) {
+	_, ipNet, err := net.ParseCIDR(network)
+	if err != nil {
+		badRequestError(w, fmt.Errorf("Invalid network parameter %s", network))
+		return
+	}
+	if reqAddress != "" {
+		// Validate "requested_address" parameter.
+		reqIP := net.ParseIP(reqAddress)
+		if reqIP == nil {
+			badRequestError(w, fmt.Errorf("failed to detect a valid IP address from u'%s'", reqAddress))
+			return
+		}
+		if !ipNet.Contains(reqIP) {
+			badRequestError(w, fmt.Errorf("%s is not inside the range %s", reqAddress, ipNet.String()))
+			return
+		}
+	}
+	// Find the network name matching the parsed CIDR.
+	foundNetworkName := ""
+	for netName, netObj := range server.networks {
+		// Get the "ip" and "netmask" attributes of the network.
+		netIP, err := netObj.GetField("ip")
+		checkError(err)
+		netMask, err := netObj.GetField("netmask")
+		checkError(err)
+
+		// Convert the netmask string to net.IPMask.
+		parts := strings.Split(netMask, ".")
+		ipMask := make(net.IPMask, len(parts))
+		for i, part := range parts {
+			intPart, err := strconv.Atoi(part)
+			checkError(err)
+			ipMask[i] = byte(intPart)
+		}
+		netNet := &net.IPNet{IP: net.ParseIP(netIP), Mask: ipMask}
+		if netNet.String() == network {
+			// Exact match found.
+			foundNetworkName = netName
+			break
+		}
+	}
+	if foundNetworkName == "" {
+		badRequestError(w, fmt.Errorf("No network found matching %s", network))
+		return
+	}
+	ips, found := server.ipAddressesPerNetwork[foundNetworkName]
+	if !found {
+		// This will be the first address.
+		ips = []string{}
+	}
+	reservedIP := ""
+	if reqAddress != "" {
+		// Use what the user provided. NOTE: Because this is testing
+		// code, no duplicates check is done.
+		reservedIP = reqAddress
+	} else {
+		// Generate an IP in the network range by incrementing the
+		// last byte of the network's IP.
+		firstIP := ipNet.IP
+		firstIP[len(firstIP)-1] += byte(len(ips) + 1)
+		reservedIP = firstIP.String()
+	}
+	ips = append(ips, reservedIP)
+	server.ipAddressesPerNetwork[foundNetworkName] = ips
+	jsonObj, err := marshalIPAddress(server, reservedIP)
+	checkError(err)
+	maasObj, err := jsonObj.GetMAASObject()
+	checkError(err)
+	res, err := json.MarshalIndent(maasObj, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func releaseIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, ip string) {
+	if netIP := net.ParseIP(ip); netIP == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if server.RemoveIPAddress(ip) {
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// versionHandler handles requests for '/api/<version>/version/'.
+func versionHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only version GET operation implemented")
+	}
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, server.versionJSON)
+}
+
+// nodegroupsHandler handles requests for '/api/<version>/nodegroups/*'.
+func nodegroupsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	bootimagesURLRE := getBootimagesURLRE(server.version)
+	bootimagesURLMatch := bootimagesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsInterfacesURLRE := getNodegroupsInterfacesURLRE(server.version)
+	nodegroupsInterfacesURLMatch := nodegroupsInterfacesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodegroupsURL:
+		nodegroupsTopLevelHandler(server, w, r, op)
+	case bootimagesURLMatch != nil:
+		bootimagesHandler(server, w, r, bootimagesURLMatch[1], op)
+	case nodegroupsInterfacesURLMatch != nil:
+		nodegroupsInterfacesHandler(server, w, r, nodegroupsInterfacesURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodegroupsTopLevelHandler handles requests for '/api/<version>/nodegroups/'.
+func nodegroupsTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	if r.Method != "GET" || op != "list" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	nodegroups := []JSONObject{}
+	for uuid := range server.bootImages {
+		attrs := map[string]interface{}{
+			"uuid":      uuid,
+			resourceURI: getNodegroupURL(server.version, uuid),
+		}
+		obj := maasify(server.client, attrs)
+		nodegroups = append(nodegroups, obj)
+	}
+
+	res, err := json.MarshalIndent(nodegroups, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// bootimagesHandler handles requests for '/api/<version>/nodegroups/<uuid>/boot-images/'.
+func bootimagesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	bootImages, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	res, err := json.MarshalIndent(bootImages, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodegroupsInterfacesHandler handles requests for '/api/<version>/nodegroups/<uuid>/interfaces/'
+func nodegroupsInterfacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	_, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	interfaces, ok := server.nodegroupsInterfaces[nodegroupUUID]
+	if !ok {
+		// we already checked the nodegroup exists, so return an empty list
+		interfaces = []JSONObject{}
+	}
+	res, err := json.MarshalIndent(interfaces, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// zonesHandler handles requests for '/api/<version>/zones/'.
+func zonesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	if len(server.zones) == 0 {
+		// Until a zone is registered, behave as if the endpoint
+		// does not exist. This way we can simulate older MAAS
+		// servers that do not support zones.
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	zones := make([]JSONObject, 0, len(server.zones))
+	for _, zone := range server.zones {
+		zones = append(zones, zone)
+	}
+	res, err := json.MarshalIndent(zones, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go b/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go
new file mode 100644
index 0000000..c6c1617
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+)
+
+func getSpacesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/spaces/", version)
+}
+
+// TestSpace is the MAAS API space representation
+type TestSpace struct {
+	Name        string       `json:"name"`
+	Subnets     []TestSubnet `json:"subnets"`
+	ResourceURI string       `json:"resource_uri"`
+	ID          uint         `json:"id"`
+}
+
+// spacesHandler handles requests for '/api/<version>/spaces/'.
+func spacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	if op != "" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	spacesURLRE := regexp.MustCompile(`/spaces/(.+?)/`)
+	spacesURLMatch := spacesURLRE.FindStringSubmatch(r.URL.Path)
+	spacesURL := getSpacesEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if spacesURLMatch != nil {
+		ID, err = NameOrIDToID(spacesURLMatch[1], server.spaceNameToID, 1, uint(len(server.spaces)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.spaces) == 0 {
+			// Until a space is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support spaces.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == spacesURL {
+			var spaces []*TestSpace
+			// Iterating by id rather than a dictionary iteration
+			// preserves the order of the spaces in the result.
+			for i := uint(1); i < server.nextSpace; i++ {
+				s, ok := server.spaces[i]
+				if ok {
+					server.setSubnetsOnSpace(s)
+					spaces = append(spaces, s)
+				}
+			}
+			err = json.NewEncoder(w).Encode(spaces)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			err = json.NewEncoder(w).Encode(server.spaces[ID])
+		}
+		checkError(err)
+	case "POST":
+		//server.NewSpace(r.Body)
+	case "PUT":
+		//server.UpdateSpace(r.Body)
+	case "DELETE":
+		delete(server.spaces, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// CreateSpace is used to create new spaces on the server.
+type CreateSpace struct {
+	Name string `json:"name"`
+}
+
+func decodePostedSpace(spaceJSON io.Reader) CreateSpace {
+	var postedSpace CreateSpace
+	decoder := json.NewDecoder(spaceJSON)
+	err := decoder.Decode(&postedSpace)
+	checkError(err)
+	return postedSpace
+}
+
+// NewSpace creates a space in the test server
+func (server *TestServer) NewSpace(spaceJSON io.Reader) *TestSpace {
+	postedSpace := decodePostedSpace(spaceJSON)
+	newSpace := &TestSpace{Name: postedSpace.Name}
+	newSpace.ID = server.nextSpace
+	newSpace.ResourceURI = fmt.Sprintf("/api/%s/spaces/%d/", server.version, int(server.nextSpace))
+	server.spaces[server.nextSpace] = newSpace
+	server.spaceNameToID[newSpace.Name] = newSpace.ID
+
+	server.nextSpace++
+	return newSpace
+}
+
+// setSubnetsOnSpace fetches the subnets for the specified space and adds them
+// to it.
+func (server *TestServer) setSubnetsOnSpace(space *TestSpace) {
+	subnets := []TestSubnet{}
+	for i := uint(1); i < server.nextSubnet; i++ {
+		subnet, ok := server.subnets[i]
+		if ok && subnet.Space == space.Name {
+			subnets = append(subnets, subnet)
+		}
+	}
+	space.Subnets = subnets
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go b/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go
new file mode 100644
index 0000000..5438669
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go
@@ -0,0 +1,396 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+func getSubnetsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/subnets/", version)
+}
+
+// CreateSubnet is used to receive new subnets via the MAAS API
+type CreateSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	// VLAN this subnet belongs to. Currently ignored.
+	// TODO: Defaults to the default VLAN
+	// for the provided fabric or defaults to the default VLAN
+	// in the default fabric.
+	VLAN *uint `json:"vlan"`
+
+	// Fabric for the subnet. Currently ignored.
+	// TODO: Defaults to the fabric the provided
+	// VLAN belongs to or defaults to the default fabric.
+	Fabric *uint `json:"fabric"`
+
+	// VID of the VLAN this subnet belongs to. Currently ignored.
+	// TODO: Only used when vlan
+	// is not provided. Picks the VLAN with this VID in the provided
+	// fabric or the default fabric if one is not given.
+	VID *uint `json:"vid"`
+
+	// This is used for updates (PUT) and is ignored by create (POST)
+	ID uint `json:"id"`
+}
+
+// TestSubnet is the MAAS API subnet representation
+type TestSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	VLAN       TestVLAN `json:"vlan"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	ResourceURI        string         `json:"resource_uri"`
+	ID                 uint           `json:"id"`
+	InUseIPAddresses   []IP           `json:"-"`
+	FixedAddressRanges []AddressRange `json:"-"`
+}
+
+// AddFixedAddressRange adds an AddressRange to the list of fixed address ranges
+// that subnet stores.
+func (server *TestServer) AddFixedAddressRange(subnetID uint, ar AddressRange) {
+	subnet := server.subnets[subnetID]
+	ar.startUint = IPFromString(ar.Start).UInt64()
+	ar.endUint = IPFromString(ar.End).UInt64()
+	subnet.FixedAddressRanges = append(subnet.FixedAddressRanges, ar)
+	server.subnets[subnetID] = subnet
+}
+
+// subnetsHandler handles requests for '/api/<version>/subnets/'.
+func subnetsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	var err error
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	includeRangesString := strings.ToLower(values.Get("include_ranges"))
+	subnetsURLRE := regexp.MustCompile(`/subnets/(.+?)/`)
+	subnetsURLMatch := subnetsURLRE.FindStringSubmatch(r.URL.Path)
+	subnetsURL := getSubnetsEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if subnetsURLMatch != nil {
+		ID, err = NameOrIDToID(subnetsURLMatch[1], server.subnetNameToID, 1, uint(len(server.subnets)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	var includeRanges bool
+	switch includeRangesString {
+	case "true", "yes", "1":
+		includeRanges = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.subnets) == 0 {
+			// Until a subnet is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support subnets.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == subnetsURL {
+			var subnets []TestSubnet
+			for i := uint(1); i < server.nextSubnet; i++ {
+				s, ok := server.subnets[i]
+				if ok {
+					subnets = append(subnets, s)
+				}
+			}
+			PrettyJsonWriter(subnets, w)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			switch op {
+			case "unreserved_ip_ranges":
+				PrettyJsonWriter(server.subnetUnreservedIPRanges(server.subnets[ID]), w)
+			case "reserved_ip_ranges":
+				PrettyJsonWriter(server.subnetReservedIPRanges(server.subnets[ID]), w)
+			case "statistics":
+				PrettyJsonWriter(server.subnetStatistics(server.subnets[ID], includeRanges), w)
+			default:
+				PrettyJsonWriter(server.subnets[ID], w)
+			}
+		}
+		checkError(err)
+	case "POST":
+		server.NewSubnet(r.Body)
+	case "PUT":
+		server.UpdateSubnet(r.Body)
+	case "DELETE":
+		delete(server.subnets, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+type addressList []IP
+
+func (a addressList) Len() int           { return len(a) }
+func (a addressList) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a addressList) Less(i, j int) bool { return a[i].UInt64() < a[j].UInt64() }
+
+// AddressRange is used to generate reserved IP address range lists
+type AddressRange struct {
+	Start        string `json:"start"`
+	startUint    uint64
+	End          string `json:"end"`
+	endUint      uint64
+	Purpose      []string `json:"purpose,omitempty"`
+	NumAddresses uint     `json:"num_addresses"`
+}
+
+// AddressRangeList is a list of AddressRange
+type AddressRangeList struct {
+	ar []AddressRange
+}
+
+// Append appends a new AddressRange to an AddressRangeList
+func (ranges *AddressRangeList) Append(startIP, endIP IP) {
+	var i AddressRange
+	i.Start, i.End = startIP.String(), endIP.String()
+	i.startUint, i.endUint = startIP.UInt64(), endIP.UInt64()
+	i.NumAddresses = uint(1 + endIP.UInt64() - startIP.UInt64())
+	i.Purpose = startIP.Purpose
+	ranges.ar = append(ranges.ar, i)
+}
+
+func appendRangesToIPList(subnet TestSubnet, ipAddresses *[]IP) {
+	for _, r := range subnet.FixedAddressRanges {
+		for v := r.startUint; v <= r.endUint; v++ {
+			ip := IPFromInt64(v)
+			ip.Purpose = r.Purpose
+			*ipAddresses = append(*ipAddresses, ip)
+		}
+	}
+}
+
+func (server *TestServer) subnetUnreservedIPRanges(subnet TestSubnet) []AddressRange {
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+
+	// We need the first and last address in the subnet
+	var ranges AddressRangeList
+	var startIP, endIP, lastUsableIP IP
+
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+	startIP = IPFromNetIP(ipNet.IP)
+	// Start with the lowest usable address in the range, which is 1 above
+	// what net.ParseCIDR will give back.
+	startIP.SetUInt64(startIP.UInt64() + 1)
+
+	ones, bits := ipNet.Mask.Size()
+	set := ^((^uint64(0)) << uint(bits-ones))
+
+	// The last usable address is one below the broadcast address, which is
+	// what you get by bitwise ORing 'set' with any IP address in the subnet.
+	lastUsableIP.SetUInt64((startIP.UInt64() | set) - 1)
+
+	for _, endIP = range ipAddresses {
+		end := endIP.UInt64()
+
+		if endIP.UInt64() == startIP.UInt64() {
+			if endIP.UInt64() != lastUsableIP.UInt64() {
+				startIP.SetUInt64(end + 1)
+			}
+			continue
+		}
+
+		if end == lastUsableIP.UInt64() {
+			continue
+		}
+
+		ranges.Append(startIP, IPFromInt64(end-1))
+		startIP.SetUInt64(end + 1)
+	}
+
+	if startIP.UInt64() != lastUsableIP.UInt64() {
+		ranges.Append(startIP, lastUsableIP)
+	}
+
+	return ranges.ar
+}
+
+func (server *TestServer) subnetReservedIPRanges(subnet TestSubnet) []AddressRange {
+	var ranges AddressRangeList
+	var startIP, thisIP IP
+
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+	if len(ipAddresses) == 0 {
+		ar := ranges.ar
+		if ar == nil {
+			ar = []AddressRange{}
+		}
+		return ar
+	}
+
+	startIP = ipAddresses[0]
+	lastIP := ipAddresses[0]
+	for _, thisIP = range ipAddresses {
+		var purposeMissmatch bool
+		for i, p := range thisIP.Purpose {
+			if startIP.Purpose[i] != p {
+				purposeMissmatch = true
+			}
+		}
+		if (thisIP.UInt64() != lastIP.UInt64() && thisIP.UInt64() != lastIP.UInt64()+1) || purposeMissmatch {
+			ranges.Append(startIP, lastIP)
+			startIP = thisIP
+		}
+		lastIP = thisIP
+	}
+
+	if len(ranges.ar) == 0 || ranges.ar[len(ranges.ar)-1].endUint != lastIP.UInt64() {
+		ranges.Append(startIP, lastIP)
+	}
+
+	return ranges.ar
+}
+
+// SubnetStats holds statistics about a subnet
+type SubnetStats struct {
+	NumAvailable     uint           `json:"num_available"`
+	LargestAvailable uint           `json:"largest_available"`
+	NumUnavailable   uint           `json:"num_unavailable"`
+	TotalAddresses   uint           `json:"total_addresses"`
+	Usage            float32        `json:"usage"`
+	UsageString      string         `json:"usage_string"`
+	Ranges           []AddressRange `json:"ranges"`
+}
+
+func (server *TestServer) subnetStatistics(subnet TestSubnet, includeRanges bool) SubnetStats {
+	var stats SubnetStats
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+
+	ones, bits := ipNet.Mask.Size()
+	stats.TotalAddresses = (1 << uint(bits-ones)) - 2
+	stats.NumUnavailable = uint(len(subnet.InUseIPAddresses))
+	stats.NumAvailable = stats.TotalAddresses - stats.NumUnavailable
+	stats.Usage = float32(stats.NumUnavailable) / float32(stats.TotalAddresses)
+	stats.UsageString = fmt.Sprintf("%0.1f%%", stats.Usage*100)
+
+	// Calculate stats.LargestAvailable - the largest contiguous block of IP addresses available
+	reserved := server.subnetUnreservedIPRanges(subnet)
+	for _, addressRange := range reserved {
+		if addressRange.NumAddresses > stats.LargestAvailable {
+			stats.LargestAvailable = addressRange.NumAddresses
+		}
+	}
+
+	if includeRanges {
+		stats.Ranges = reserved
+	}
+
+	return stats
+}
+
+func decodePostedSubnet(subnetJSON io.Reader) CreateSubnet {
+	var postedSubnet CreateSubnet
+	decoder := json.NewDecoder(subnetJSON)
+	err := decoder.Decode(&postedSubnet)
+	checkError(err)
+	if postedSubnet.DNSServers == nil {
+		postedSubnet.DNSServers = []string{}
+	}
+	return postedSubnet
+}
+
+// UpdateSubnet creates a subnet in the test server
+func (server *TestServer) UpdateSubnet(subnetJSON io.Reader) TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	updatedSubnet := subnetFromCreateSubnet(postedSubnet)
+	server.subnets[updatedSubnet.ID] = updatedSubnet
+	return updatedSubnet
+}
+
+// NewSubnet creates a subnet in the test server
+func (server *TestServer) NewSubnet(subnetJSON io.Reader) *TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	newSubnet := subnetFromCreateSubnet(postedSubnet)
+	newSubnet.ID = server.nextSubnet
+	server.subnets[server.nextSubnet] = newSubnet
+	server.subnetNameToID[newSubnet.Name] = newSubnet.ID
+
+	server.nextSubnet++
+	return &newSubnet
+}
+
+// NodeNetworkInterface represents a network interface attached to a node
+type NodeNetworkInterface struct {
+	Name  string        `json:"name"`
+	Links []NetworkLink `json:"links"`
+}
+
+// Node represents a node
+type Node struct {
+	SystemID   string                 `json:"system_id"`
+	Interfaces []NodeNetworkInterface `json:"interface_set"`
+}
+
+// NetworkLink represents a MAAS network link
+type NetworkLink struct {
+	ID     uint        `json:"id"`
+	Mode   string      `json:"mode"`
+	Subnet *TestSubnet `json:"subnet"`
+}
+
+// SetNodeNetworkLink records that the given node + interface are in subnet
+func (server *TestServer) SetNodeNetworkLink(SystemID string, nodeNetworkInterface NodeNetworkInterface) {
+	for i, ni := range server.nodeMetadata[SystemID].Interfaces {
+		if ni.Name == nodeNetworkInterface.Name {
+			server.nodeMetadata[SystemID].Interfaces[i] = nodeNetworkInterface
+			return
+		}
+	}
+	n := server.nodeMetadata[SystemID]
+	n.Interfaces = append(n.Interfaces, nodeNetworkInterface)
+	server.nodeMetadata[SystemID] = n
+}
+
+// subnetFromCreateSubnet creates a subnet in the test server
+func subnetFromCreateSubnet(postedSubnet CreateSubnet) TestSubnet {
+	var newSubnet TestSubnet
+	newSubnet.DNSServers = postedSubnet.DNSServers
+	newSubnet.Name = postedSubnet.Name
+	newSubnet.Space = postedSubnet.Space
+	//TODO: newSubnet.VLAN = server.postedSubnetVLAN
+	newSubnet.GatewayIP = postedSubnet.GatewayIP
+	newSubnet.CIDR = postedSubnet.CIDR
+	newSubnet.ID = postedSubnet.ID
+	return newSubnet
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go b/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go
new file mode 100644
index 0000000..8f941f1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go
@@ -0,0 +1,119 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"errors"
+	"net"
+	"net/http"
+	"strconv"
+)
+
+// NameOrIDToID takes a string that contains eiter an integer ID or the
+// name of a thing. It returns the integer ID contained or mapped to or panics.
+func NameOrIDToID(v string, nameToID map[string]uint, minID, maxID uint) (ID uint, err error) {
+	ID, ok := nameToID[v]
+	if !ok {
+		intID, err := strconv.Atoi(v)
+		if err != nil {
+			return 0, err
+		}
+		ID = uint(intID)
+	}
+
+	if ID < minID || ID > maxID {
+		return 0, errors.New("ID out of range")
+	}
+
+	return ID, nil
+}
+
+// IP is an enhanced net.IP
+type IP struct {
+	netIP   net.IP
+	Purpose []string
+}
+
+// IPFromNetIP creates a IP from a net.IP.
+func IPFromNetIP(netIP net.IP) IP {
+	var ip IP
+	ip.netIP = netIP
+	return ip
+}
+
+// IPFromString creates a new IP from a string IP address representation
+func IPFromString(v string) IP {
+	return IPFromNetIP(net.ParseIP(v))
+}
+
+// IPFromInt64 creates a new IP from a uint64 IP address representation
+func IPFromInt64(v uint64) IP {
+	var ip IP
+	ip.SetUInt64(v)
+	return ip
+}
+
+// To4 converts the IPv4 address ip to a 4-byte representation. If ip is not
+// an IPv4 address, To4 returns nil.
+func (ip IP) To4() net.IP {
+	return ip.netIP.To4()
+}
+
+// To16 converts the IP address ip to a 16-byte representation. If ip is not
+// an IP address (it is the wrong length), To16 returns nil.
+func (ip IP) To16() net.IP {
+	return ip.netIP.To16()
+}
+
+func (ip IP) String() string {
+	return ip.netIP.String()
+}
+
+// UInt64 returns a uint64 holding the IP address
+func (ip IP) UInt64() uint64 {
+	if len(ip.netIP) == 0 {
+		return uint64(0)
+	}
+
+	if ip.To4() != nil {
+		return uint64(binary.BigEndian.Uint32([]byte(ip.To4())))
+	}
+
+	return binary.BigEndian.Uint64([]byte(ip.To16()))
+}
+
+// SetUInt64 sets the IP value to v
+func (ip *IP) SetUInt64(v uint64) {
+	if len(ip.netIP) == 0 {
+		// If we don't have allocated storage make an educated guess
+		// at if the address we received is an IPv4 or IPv6 address.
+		if v == (v & 0x00000000ffffFFFF) {
+			// Guessing IPv4
+			ip.netIP = net.ParseIP("0.0.0.0")
+		} else {
+			ip.netIP = net.ParseIP("2001:4860:0:2001::68")
+		}
+	}
+
+	bb := new(bytes.Buffer)
+	var first int
+	if ip.To4() != nil {
+		binary.Write(bb, binary.BigEndian, uint32(v))
+		first = len(ip.netIP) - 4
+	} else {
+		binary.Write(bb, binary.BigEndian, v)
+	}
+	copy(ip.netIP[first:], bb.Bytes())
+}
+
+func PrettyJsonWriter(thing interface{}, w http.ResponseWriter) {
+	var out bytes.Buffer
+	b, err := json.MarshalIndent(thing, "", "  ")
+	checkError(err)
+	out.Write(b)
+	out.WriteTo(w)
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go b/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go
new file mode 100644
index 0000000..e81eaaa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go
@@ -0,0 +1,33 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+)
+
+func getVLANsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/vlans/", version)
+}
+
+// TestVLAN is the MAAS API VLAN representation
+type TestVLAN struct {
+	Name   string `json:"name"`
+	Fabric string `json:"fabric"`
+	VID    uint   `json:"vid"`
+
+	ResourceURI string `json:"resource_uri"`
+	ID          uint   `json:"id"`
+}
+
+// PostedVLAN is the MAAS API posted VLAN representation
+type PostedVLAN struct {
+	Name string `json:"name"`
+	VID  uint   `json:"vid"`
+}
+
+func vlansHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	//TODO
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/urlparams.go b/automation/vendor/github.com/juju/gomaasapi/urlparams.go
new file mode 100644
index 0000000..a6bab6e
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/urlparams.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// URLParams wraps url.Values to easily add values, but skipping empty ones.
+type URLParams struct {
+	Values url.Values
+}
+
+// NewURLParams allocates a new URLParams type.
+func NewURLParams() *URLParams {
+	return &URLParams{Values: make(url.Values)}
+}
+
+// MaybeAdd adds the (name, value) pair iff value is not empty.
+func (p *URLParams) MaybeAdd(name, value string) {
+	if value != "" {
+		p.Values.Add(name, value)
+	}
+}
+
+// MaybeAddInt adds the (name, value) pair iff value is not zero.
+func (p *URLParams) MaybeAddInt(name string, value int) {
+	if value != 0 {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddBool adds the (name, value) pair iff value is true.
+func (p *URLParams) MaybeAddBool(name string, value bool) {
+	if value {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddMany adds the (name, value) for each value in values iff
+// value is not empty.
+func (p *URLParams) MaybeAddMany(name string, values []string) {
+	for _, value := range values {
+		p.MaybeAdd(name, value)
+	}
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/util.go b/automation/vendor/github.com/juju/gomaasapi/util.go
new file mode 100644
index 0000000..3f95ac9
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/util.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+)
+
+// JoinURLs joins a base URL and a subpath together.
+// Regardless of whether baseURL ends in a trailing slash (or even multiple
+// trailing slashes), or whether there are any leading slashes at the begining
+// of path, the two will always be joined together by a single slash.
+func JoinURLs(baseURL, path string) string {
+	return strings.TrimRight(baseURL, "/") + "/" + strings.TrimLeft(path, "/")
+}
+
+// EnsureTrailingSlash appends a slash at the end of the given string unless
+// there already is one.
+// This is used to create the kind of normalized URLs that Django expects.
+// (to avoid Django's redirection when an URL does not ends with a slash.)
+func EnsureTrailingSlash(URL string) string {
+	if strings.HasSuffix(URL, "/") {
+		return URL
+	}
+	return URL + "/"
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/vlan.go b/automation/vendor/github.com/juju/gomaasapi/vlan.go
new file mode 100644
index 0000000..c509d42
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/vlan.go
@@ -0,0 +1,154 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type vlan struct {
+	// Add the controller in when we need to do things with the vlan.
+	// controller Controller
+
+	resourceURI string
+
+	id     int
+	name   string
+	fabric string
+
+	vid  int
+	mtu  int
+	dhcp bool
+
+	primaryRack   string
+	secondaryRack string
+}
+
+// ID implements VLAN.
+func (v *vlan) ID() int {
+	return v.id
+}
+
+// Name implements VLAN.
+func (v *vlan) Name() string {
+	return v.name
+}
+
+// Fabric implements VLAN.
+func (v *vlan) Fabric() string {
+	return v.fabric
+}
+
+// VID implements VLAN.
+func (v *vlan) VID() int {
+	return v.vid
+}
+
+// MTU implements VLAN.
+func (v *vlan) MTU() int {
+	return v.mtu
+}
+
+// DHCP implements VLAN.
+func (v *vlan) DHCP() bool {
+	return v.dhcp
+}
+
+// PrimaryRack implements VLAN.
+func (v *vlan) PrimaryRack() string {
+	return v.primaryRack
+}
+
+// SecondaryRack implements VLAN.
+func (v *vlan) SecondaryRack() string {
+	return v.secondaryRack
+}
+
+func readVLANs(controllerVersion version.Number, source interface{}) ([]*vlan, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range vlanDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no vlan read func for version %s", controllerVersion)
+	}
+	readFunc := vlanDeserializationFuncs[deserialisationVersion]
+	return readVLANList(valid, readFunc)
+}
+
+func readVLANList(sourceList []interface{}, readFunc vlanDeserializationFunc) ([]*vlan, error) {
+	result := make([]*vlan, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for vlan %d, %T", i, value)
+		}
+		vlan, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "vlan %d", i)
+		}
+		result = append(result, vlan)
+	}
+	return result, nil
+}
+
+type vlanDeserializationFunc func(map[string]interface{}) (*vlan, error)
+
+var vlanDeserializationFuncs = map[version.Number]vlanDeserializationFunc{
+	twoDotOh: vlan_2_0,
+}
+
+func vlan_2_0(source map[string]interface{}) (*vlan, error) {
+	fields := schema.Fields{
+		"id":           schema.ForceInt(),
+		"resource_uri": schema.String(),
+		"name":         schema.OneOf(schema.Nil(""), schema.String()),
+		"fabric":       schema.String(),
+		"vid":          schema.ForceInt(),
+		"mtu":          schema.ForceInt(),
+		"dhcp_on":      schema.Bool(),
+		// racks are not always set.
+		"primary_rack":   schema.OneOf(schema.Nil(""), schema.String()),
+		"secondary_rack": schema.OneOf(schema.Nil(""), schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	// Since the primary and secondary racks are optional, we use the two
+	// part cast assignment. If the case fails, then we get the default value
+	// we care about, which is the empty string.
+	primary_rack, _ := valid["primary_rack"].(string)
+	secondary_rack, _ := valid["secondary_rack"].(string)
+	name, _ := valid["name"].(string)
+
+	result := &vlan{
+		resourceURI:   valid["resource_uri"].(string),
+		id:            valid["id"].(int),
+		name:          name,
+		fabric:        valid["fabric"].(string),
+		vid:           valid["vid"].(int),
+		mtu:           valid["mtu"].(int),
+		dhcp:          valid["dhcp_on"].(bool),
+		primaryRack:   primary_rack,
+		secondaryRack: secondary_rack,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/zone.go b/automation/vendor/github.com/juju/gomaasapi/zone.go
new file mode 100644
index 0000000..6f10cb4
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/zone.go
@@ -0,0 +1,97 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type zone struct {
+	// Add the controller in when we need to do things with the zone.
+	// controller Controller
+
+	resourceURI string
+
+	name        string
+	description string
+}
+
+// Name implements Zone.
+func (z *zone) Name() string {
+	return z.name
+}
+
+// Description implements Zone.
+func (z *zone) Description() string {
+	return z.description
+}
+
+func readZones(controllerVersion version.Number, source interface{}) ([]*zone, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range zoneDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no zone read func for version %s", controllerVersion)
+	}
+	readFunc := zoneDeserializationFuncs[deserialisationVersion]
+	return readZoneList(valid, readFunc)
+}
+
+// readZoneList expects the values of the sourceList to be string maps.
+func readZoneList(sourceList []interface{}, readFunc zoneDeserializationFunc) ([]*zone, error) {
+	result := make([]*zone, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for zone %d, %T", i, value)
+		}
+		zone, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "zone %d", i)
+		}
+		result = append(result, zone)
+	}
+	return result, nil
+}
+
+type zoneDeserializationFunc func(map[string]interface{}) (*zone, error)
+
+var zoneDeserializationFuncs = map[version.Number]zoneDeserializationFunc{
+	twoDotOh: zone_2_0,
+}
+
+func zone_2_0(source map[string]interface{}) (*zone, error) {
+	fields := schema.Fields{
+		"name":         schema.String(),
+		"description":  schema.String(),
+		"resource_uri": schema.String(),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &zone{
+		name:        valid["name"].(string),
+		description: valid["description"].(string),
+		resourceURI: valid["resource_uri"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/LICENSE b/automation/vendor/github.com/juju/loggo/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/loggo/Makefile b/automation/vendor/github.com/juju/loggo/Makefile
new file mode 100644
index 0000000..89afa49
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/loggo > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/loggo/README.md b/automation/vendor/github.com/juju/loggo/README.md
new file mode 100644
index 0000000..a73a9db
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/README.md
@@ -0,0 +1,711 @@
+
+# loggo
+    import "github.com/juju/loggo"
+
+[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)
+
+### Module level logging for Go
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+
+
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+
+
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+
+
+
+## Constants
+``` go
+const DefaultWriterName = "default"
+```
+DefaultWriterName is the name of the default writer for
+a Context.
+
+
+## Variables
+``` go
+var (
+    // SeverityColor defines the colors for the levels output by the ColorWriter.
+    SeverityColor = map[Level]*ansiterm.Context{
+        TRACE:   ansiterm.Foreground(ansiterm.Default),
+        DEBUG:   ansiterm.Foreground(ansiterm.Green),
+        INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+        WARNING: ansiterm.Foreground(ansiterm.Yellow),
+        ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+        CRITICAL: &ansiterm.Context{
+            Foreground: ansiterm.White,
+            Background: ansiterm.Red,
+        },
+    }
+    // LocationColor defines the colors for the location output by the ColorWriter.
+    LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+```
+``` go
+var TimeFormat = initTimeFormat()
+```
+TimeFormat is the time format used for the default writer.
+This can be set with the environment variable LOGGO_TIME_FORMAT.
+
+
+## func ConfigureLoggers
+``` go
+func ConfigureLoggers(specification string) error
+```
+ConfigureLoggers configures loggers according to the given string
+specification, which specifies a set of modules and their associated
+logging levels.  Loggers are colon- or semicolon-separated; each
+module is specified as <modulename>=<level>.  White space outside of
+module names and levels is ignored.  The root module is specified
+with the name "<root>".
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+## func DefaultFormatter
+``` go
+func DefaultFormatter(entry Entry) string
+```
+DefaultFormatter returns the parameters separated by spaces except for
+filename and line which are separated by a colon.  The timestamp is shown
+to second resolution in UTC. For example:
+
+
+	2016-07-02 15:04:05
+
+
+## func LoggerInfo
+``` go
+func LoggerInfo() string
+```
+LoggerInfo returns information about the configured loggers and their
+logging levels. The information is returned in the format expected by
+ConfigureLoggers. Loggers with UNSPECIFIED level will not
+be included.
+
+
+## func RegisterWriter
+``` go
+func RegisterWriter(name string, writer Writer) error
+```
+RegisterWriter adds the writer to the list of writers in the DefaultContext
+that get notified when logging.  If there is already a registered writer
+with that name, an error is returned.
+
+
+## func ResetLogging
+``` go
+func ResetLogging()
+```
+ResetLogging iterates through the known modules and sets the levels of all
+to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+removes all writers in the DefaultContext and puts the original default
+writer back as the only writer.
+
+
+## func ResetWriters
+``` go
+func ResetWriters()
+```
+ResetWriters puts the list of writers back into the initial state.
+
+
+
+## type Config
+``` go
+type Config map[string]Level
+```
+Config is a mapping of logger module names to logging severity levels.
+
+
+
+
+
+
+
+
+
+### func ParseConfigString
+``` go
+func ParseConfigString(specification string) (Config, error)
+```
+ParseConfigString parses a logger configuration string into a map of logger
+names and their associated log level. This method is provided to allow
+other programs to pre-validate a configuration string rather than just
+calling ConfigureLoggers.
+
+Logging modules are colon- or semicolon-separated; each module is specified
+as <modulename>=<level>.  White space outside of module names and levels is
+ignored.  The root module is specified with the name "<root>".
+
+As a special case, a log level may be specified on its own.
+This is equivalent to specifying the level of the root module,
+so "DEBUG" is equivalent to `<root>=DEBUG`
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+
+
+### func (Config) String
+``` go
+func (c Config) String() string
+```
+String returns a logger configuration string that may be parsed
+using ParseConfigurationString.
+
+
+
+## type Context
+``` go
+type Context struct {
+    // contains filtered or unexported fields
+}
+```
+Context produces loggers for a hierarchy of modules. The context holds
+a collection of hierarchical loggers and their writers.
+
+
+
+
+
+
+
+
+
+### func DefaultContext
+``` go
+func DefaultContext() *Context
+```
+DefaultContext returns the global default logging context.
+
+
+### func NewContext
+``` go
+func NewContext(rootLevel Level) *Context
+```
+NewLoggers returns a new Context with no writers set.
+If the root level is UNSPECIFIED, WARNING is used.
+
+
+
+
+### func (\*Context) AddWriter
+``` go
+func (c *Context) AddWriter(name string, writer Writer) error
+```
+AddWriter adds a writer to the list to be called for each logging call.
+The name cannot be empty, and the writer cannot be nil. If an existing
+writer exists with the specified name, an error is returned.
+
+
+
+### func (\*Context) ApplyConfig
+``` go
+func (c *Context) ApplyConfig(config Config)
+```
+ApplyConfig configures the logging modules according to the provided config.
+
+
+
+### func (\*Context) CompleteConfig
+``` go
+func (c *Context) CompleteConfig() Config
+```
+CompleteConfig returns all the loggers and their defined levels,
+even if that level is UNSPECIFIED.
+
+
+
+### func (\*Context) Config
+``` go
+func (c *Context) Config() Config
+```
+Config returns the current configuration of the Loggers. Loggers
+with UNSPECIFIED level will not be included.
+
+
+
+### func (\*Context) GetLogger
+``` go
+func (c *Context) GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name, creating it and
+its parents if necessary.
+
+
+
+### func (\*Context) RemoveWriter
+``` go
+func (c *Context) RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter remotes the specified writer. If a writer is not found with
+the specified name an error is returned. The writer that was removed is also
+returned.
+
+
+
+### func (\*Context) ReplaceWriter
+``` go
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error)
+```
+ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+followed by AddWriter with the same name. The replaced writer is returned.
+
+
+
+### func (\*Context) ResetLoggerLevels
+``` go
+func (c *Context) ResetLoggerLevels()
+```
+ResetLoggerLevels iterates through the known logging modules and sets the
+levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+
+
+
+### func (\*Context) ResetWriters
+``` go
+func (c *Context) ResetWriters()
+```
+ResetWriters is generally only used in testing and removes all the writers.
+
+
+
+## type Entry
+``` go
+type Entry struct {
+    // Level is the severity of the log message.
+    Level Level
+    // Module is the dotted module name from the logger.
+    Module string
+    // Filename is the full path the file that logged the message.
+    Filename string
+    // Line is the line number of the Filename.
+    Line int
+    // Timestamp is when the log message was created
+    Timestamp time.Time
+    // Message is the formatted string from teh log call.
+    Message string
+}
+```
+Entry represents a single log message.
+
+
+
+
+
+
+
+
+
+
+
+## type Level
+``` go
+type Level uint32
+```
+Level holds a severity level.
+
+
+
+``` go
+const (
+    UNSPECIFIED Level = iota
+    TRACE
+    DEBUG
+    INFO
+    WARNING
+    ERROR
+    CRITICAL
+)
+```
+The severity levels. Higher values are more considered more
+important.
+
+
+
+
+
+
+
+### func ParseLevel
+``` go
+func ParseLevel(level string) (Level, bool)
+```
+ParseLevel converts a string representation of a logging level to a
+Level. It returns the level and whether it was valid or not.
+
+
+
+
+### func (Level) Short
+``` go
+func (level Level) Short() string
+```
+Short returns a five character string to use in
+aligned logging output.
+
+
+
+### func (Level) String
+``` go
+func (level Level) String() string
+```
+String implements Stringer.
+
+
+
+## type Logger
+``` go
+type Logger struct {
+    // contains filtered or unexported fields
+}
+```
+A Logger represents a logging module. It has an associated logging
+level which can be changed; messages of lesser severity will
+be dropped. Loggers have a hierarchical relationship - see
+the package documentation.
+
+The zero Logger value is usable - any messages logged
+to it will be sent to the root Logger.
+
+
+
+
+
+
+
+
+
+### func GetLogger
+``` go
+func GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name,
+creating it and its parents if necessary.
+
+
+
+
+### func (Logger) Criticalf
+``` go
+func (logger Logger) Criticalf(message string, args ...interface{})
+```
+Criticalf logs the printf-formatted message at critical level.
+
+
+
+### func (Logger) Debugf
+``` go
+func (logger Logger) Debugf(message string, args ...interface{})
+```
+Debugf logs the printf-formatted message at debug level.
+
+
+
+### func (Logger) EffectiveLogLevel
+``` go
+func (logger Logger) EffectiveLogLevel() Level
+```
+EffectiveLogLevel returns the effective min log level of
+the receiver - that is, messages with a lesser severity
+level will be discarded.
+
+If the log level of the receiver is unspecified,
+it will be taken from the effective log level of its
+parent.
+
+
+
+### func (Logger) Errorf
+``` go
+func (logger Logger) Errorf(message string, args ...interface{})
+```
+Errorf logs the printf-formatted message at error level.
+
+
+
+### func (Logger) Infof
+``` go
+func (logger Logger) Infof(message string, args ...interface{})
+```
+Infof logs the printf-formatted message at info level.
+
+
+
+### func (Logger) IsDebugEnabled
+``` go
+func (logger Logger) IsDebugEnabled() bool
+```
+IsDebugEnabled returns whether debugging is enabled
+at debug level.
+
+
+
+### func (Logger) IsErrorEnabled
+``` go
+func (logger Logger) IsErrorEnabled() bool
+```
+IsErrorEnabled returns whether debugging is enabled
+at error level.
+
+
+
+### func (Logger) IsInfoEnabled
+``` go
+func (logger Logger) IsInfoEnabled() bool
+```
+IsInfoEnabled returns whether debugging is enabled
+at info level.
+
+
+
+### func (Logger) IsLevelEnabled
+``` go
+func (logger Logger) IsLevelEnabled(level Level) bool
+```
+IsLevelEnabled returns whether debugging is enabled
+for the given log level.
+
+
+
+### func (Logger) IsTraceEnabled
+``` go
+func (logger Logger) IsTraceEnabled() bool
+```
+IsTraceEnabled returns whether debugging is enabled
+at trace level.
+
+
+
+### func (Logger) IsWarningEnabled
+``` go
+func (logger Logger) IsWarningEnabled() bool
+```
+IsWarningEnabled returns whether debugging is enabled
+at warning level.
+
+
+
+### func (Logger) LogCallf
+``` go
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{})
+```
+LogCallf logs a printf-formatted message at the given level.
+The location of the call is indicated by the calldepth argument.
+A calldepth of 1 means the function that called this function.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) LogLevel
+``` go
+func (logger Logger) LogLevel() Level
+```
+LogLevel returns the configured min log level of the logger.
+
+
+
+### func (Logger) Logf
+``` go
+func (logger Logger) Logf(level Level, message string, args ...interface{})
+```
+Logf logs a printf-formatted message at the given level.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) Name
+``` go
+func (logger Logger) Name() string
+```
+Name returns the logger's module name.
+
+
+
+### func (Logger) SetLogLevel
+``` go
+func (logger Logger) SetLogLevel(level Level)
+```
+SetLogLevel sets the severity level of the given logger.
+The root logger cannot be set to UNSPECIFIED level.
+See EffectiveLogLevel for how this affects the
+actual messages logged.
+
+
+
+### func (Logger) Tracef
+``` go
+func (logger Logger) Tracef(message string, args ...interface{})
+```
+Tracef logs the printf-formatted message at trace level.
+
+
+
+### func (Logger) Warningf
+``` go
+func (logger Logger) Warningf(message string, args ...interface{})
+```
+Warningf logs the printf-formatted message at warning level.
+
+
+
+## type TestWriter
+``` go
+type TestWriter struct {
+    // contains filtered or unexported fields
+}
+```
+TestWriter is a useful Writer for testing purposes.  Each component of the
+logging message is stored in the Log array.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*TestWriter) Clear
+``` go
+func (writer *TestWriter) Clear()
+```
+Clear removes any saved log messages.
+
+
+
+### func (\*TestWriter) Log
+``` go
+func (writer *TestWriter) Log() []Entry
+```
+Log returns a copy of the current logged values.
+
+
+
+### func (\*TestWriter) Write
+``` go
+func (writer *TestWriter) Write(entry Entry)
+```
+Write saves the params as members in the TestLogValues struct appended to the Log array.
+
+
+
+## type Writer
+``` go
+type Writer interface {
+    // Write writes a message to the Writer with the given level and module
+    // name. The filename and line hold the file name and line number of the
+    // code that is generating the log message; the time stamp holds the time
+    // the log message was generated, and message holds the log message
+    // itself.
+    Write(entry Entry)
+}
+```
+Writer is implemented by any recipient of log messages.
+
+
+
+
+
+
+
+
+
+### func NewColorWriter
+``` go
+func NewColorWriter(writer io.Writer) Writer
+```
+NewColorWriter will write out colored severity levels if the writer is
+outputting to a terminal.
+
+
+### func NewMinimumLevelWriter
+``` go
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer
+```
+NewMinLevelWriter returns a Writer that will only pass on the Write calls
+to the provided writer if the log level is at or above the specified
+minimum level.
+
+
+### func NewSimpleWriter
+``` go
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer
+```
+NewSimpleWriter returns a new writer that writes log messages to the given
+io.Writer formatting the messages with the given formatter.
+
+
+### func RemoveWriter
+``` go
+func RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter removes the Writer identified by 'name' and returns it.
+If the Writer is not found, an error is returned.
+
+
+### func ReplaceDefaultWriter
+``` go
+func ReplaceDefaultWriter(writer Writer) (Writer, error)
+```
+ReplaceDefaultWriter is a convenience method that does the equivalent of
+RemoveWriter and then RegisterWriter with the name "default".  The previous
+default writer, if any is returned.
+
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/loggo/config.go b/automation/vendor/github.com/juju/loggo/config.go
new file mode 100644
index 0000000..1b3eaa5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/config.go
@@ -0,0 +1,96 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Config is a mapping of logger module names to logging severity levels.
+type Config map[string]Level
+
+// String returns a logger configuration string that may be parsed
+// using ParseConfigurationString.
+func (c Config) String() string {
+	if c == nil {
+		return ""
+	}
+	// output in alphabetical order.
+	names := []string{}
+	for name := range c {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	var entries []string
+	for _, name := range names {
+		level := c[name]
+		if name == "" {
+			name = rootString
+		}
+		entry := fmt.Sprintf("%s=%s", name, level)
+		entries = append(entries, entry)
+	}
+	return strings.Join(entries, ";")
+}
+
+func parseConfigValue(value string) (string, Level, error) {
+	pair := strings.SplitN(value, "=", 2)
+	if len(pair) < 2 {
+		return "", UNSPECIFIED, fmt.Errorf("config value expected '=', found %q", value)
+	}
+	name := strings.TrimSpace(pair[0])
+	if name == "" {
+		return "", UNSPECIFIED, fmt.Errorf("config value %q has missing module name", value)
+	}
+
+	levelStr := strings.TrimSpace(pair[1])
+	level, ok := ParseLevel(levelStr)
+	if !ok {
+		return "", UNSPECIFIED, fmt.Errorf("unknown severity level %q", levelStr)
+	}
+	if name == rootString {
+		name = ""
+	}
+	return name, level, nil
+}
+
+// ParseConfigString parses a logger configuration string into a map of logger
+// names and their associated log level. This method is provided to allow
+// other programs to pre-validate a configuration string rather than just
+// calling ConfigureLoggers.
+//
+// Logging modules are colon- or semicolon-separated; each module is specified
+// as <modulename>=<level>.  White space outside of module names and levels is
+// ignored.  The root module is specified with the name "<root>".
+//
+// As a special case, a log level may be specified on its own.
+// This is equivalent to specifying the level of the root module,
+// so "DEBUG" is equivalent to `<root>=DEBUG`
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ParseConfigString(specification string) (Config, error) {
+	specification = strings.TrimSpace(specification)
+	if specification == "" {
+		return nil, nil
+	}
+	cfg := make(Config)
+	if level, ok := ParseLevel(specification); ok {
+		cfg[""] = level
+		return cfg, nil
+	}
+
+	values := strings.FieldsFunc(specification, func(r rune) bool { return r == ';' || r == ':' })
+	for _, value := range values {
+		name, level, err := parseConfigValue(value)
+		if err != nil {
+			return nil, err
+		}
+		cfg[name] = level
+	}
+	return cfg, nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/context.go b/automation/vendor/github.com/juju/loggo/context.go
new file mode 100644
index 0000000..f5739d9
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/context.go
@@ -0,0 +1,198 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+// Context produces loggers for a hierarchy of modules. The context holds
+// a collection of hierarchical loggers and their writers.
+type Context struct {
+	root *module
+
+	// Perhaps have one mutex?
+	modulesMutex sync.Mutex
+	modules      map[string]*module
+
+	writersMutex sync.Mutex
+	writers      map[string]Writer
+
+	// writeMuxtex is used to serialise write operations.
+	writeMutex sync.Mutex
+}
+
+// NewLoggers returns a new Context with no writers set.
+// If the root level is UNSPECIFIED, WARNING is used.
+func NewContext(rootLevel Level) *Context {
+	if rootLevel < TRACE || rootLevel > CRITICAL {
+		rootLevel = WARNING
+	}
+	context := &Context{
+		modules: make(map[string]*module),
+		writers: make(map[string]Writer),
+	}
+	context.root = &module{
+		level:   rootLevel,
+		context: context,
+	}
+	context.modules[""] = context.root
+	return context
+}
+
+// GetLogger returns a Logger for the given module name, creating it and
+// its parents if necessary.
+func (c *Context) GetLogger(name string) Logger {
+	name = strings.TrimSpace(strings.ToLower(name))
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	return Logger{c.getLoggerModule(name)}
+}
+
+func (c *Context) getLoggerModule(name string) *module {
+	if name == rootString {
+		name = ""
+	}
+	impl, found := c.modules[name]
+	if found {
+		return impl
+	}
+	parentName := ""
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		parentName = name[0:i]
+	}
+	parent := c.getLoggerModule(parentName)
+	impl = &module{name, UNSPECIFIED, parent, c}
+	c.modules[name] = impl
+	return impl
+}
+
+// Config returns the current configuration of the Loggers. Loggers
+// with UNSPECIFIED level will not be included.
+func (c *Context) Config() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		if module.level != UNSPECIFIED {
+			result[name] = module.level
+		}
+	}
+	return result
+}
+
+// CompleteConfig returns all the loggers and their defined levels,
+// even if that level is UNSPECIFIED.
+func (c *Context) CompleteConfig() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		result[name] = module.level
+	}
+	return result
+}
+
+// ApplyConfig configures the logging modules according to the provided config.
+func (c *Context) ApplyConfig(config Config) {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	for name, level := range config {
+		module := c.getLoggerModule(name)
+		module.setLevel(level)
+	}
+}
+
+// ResetLoggerLevels iterates through the known logging modules and sets the
+// levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+func (c *Context) ResetLoggerLevels() {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	// Setting the root module to UNSPECIFIED will set it to WARNING.
+	for _, module := range c.modules {
+		module.setLevel(UNSPECIFIED)
+	}
+}
+
+func (c *Context) write(entry Entry) {
+	c.writeMutex.Lock()
+	defer c.writeMutex.Unlock()
+	for _, writer := range c.getWriters() {
+		writer.Write(entry)
+	}
+}
+
+func (c *Context) getWriters() []Writer {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	var result []Writer
+	for _, writer := range c.writers {
+		result = append(result, writer)
+	}
+	return result
+}
+
+// AddWriter adds a writer to the list to be called for each logging call.
+// The name cannot be empty, and the writer cannot be nil. If an existing
+// writer exists with the specified name, an error is returned.
+func (c *Context) AddWriter(name string, writer Writer) error {
+	if name == "" {
+		return fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	if _, found := c.writers[name]; found {
+		return fmt.Errorf("context already has a writer named %q", name)
+	}
+	c.writers[name] = writer
+	return nil
+}
+
+// RemoveWriter remotes the specified writer. If a writer is not found with
+// the specified name an error is returned. The writer that was removed is also
+// returned.
+func (c *Context) RemoveWriter(name string) (Writer, error) {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	delete(c.writers, name)
+	return reg, nil
+}
+
+// ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+// followed by AddWriter with the same name. The replaced writer is returned.
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error) {
+	if name == "" {
+		return nil, fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return nil, fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	oldWriter := reg
+	c.writers[name] = writer
+	return oldWriter, nil
+}
+
+// ResetWriters is generally only used in testing and removes all the writers.
+func (c *Context) ResetWriters() {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	c.writers = make(map[string]Writer)
+}
diff --git a/automation/vendor/github.com/juju/loggo/dependencies.tsv b/automation/vendor/github.com/juju/loggo/dependencies.tsv
new file mode 100644
index 0000000..20daf32
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/dependencies.tsv
@@ -0,0 +1,5 @@
+github.com/juju/ansiterm	git	c368f42cb4b32a70389cded05c7345d9ccdce889	2016-08-17T02:52:20Z
+github.com/lunixbochs/vtclean	git	4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36	2016-01-25T03:51:06Z
+github.com/mattn/go-colorable	git	ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8	2016-07-31T23:54:17Z
+github.com/mattn/go-isatty	git	66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8	2016-08-06T12:27:52Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
diff --git a/automation/vendor/github.com/juju/loggo/doc.go b/automation/vendor/github.com/juju/loggo/doc.go
new file mode 100644
index 0000000..bb19cd5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+Module level logging for Go
+
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+*/
+package loggo
diff --git a/automation/vendor/github.com/juju/loggo/entry.go b/automation/vendor/github.com/juju/loggo/entry.go
new file mode 100644
index 0000000..b61aa54
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/entry.go
@@ -0,0 +1,22 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import "time"
+
+// Entry represents a single log message.
+type Entry struct {
+	// Level is the severity of the log message.
+	Level Level
+	// Module is the dotted module name from the logger.
+	Module string
+	// Filename is the full path the file that logged the message.
+	Filename string
+	// Line is the line number of the Filename.
+	Line int
+	// Timestamp is when the log message was created
+	Timestamp time.Time
+	// Message is the formatted string from teh log call.
+	Message string
+}
diff --git a/automation/vendor/github.com/juju/loggo/formatter.go b/automation/vendor/github.com/juju/loggo/formatter.go
new file mode 100644
index 0000000..ef8aa7a
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/formatter.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// DefaultFormatter returns the parameters separated by spaces except for
+// filename and line which are separated by a colon.  The timestamp is shown
+// to second resolution in UTC. For example:
+//   2016-07-02 15:04:05
+func DefaultFormatter(entry Entry) string {
+	ts := entry.Timestamp.In(time.UTC).Format("2006-01-02 15:04:05")
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+	return fmt.Sprintf("%s %s %s %s:%d %s", ts, entry.Level, entry.Module, filename, entry.Line, entry.Message)
+}
+
+// TimeFormat is the time format used for the default writer.
+// This can be set with the environment variable LOGGO_TIME_FORMAT.
+var TimeFormat = initTimeFormat()
+
+func initTimeFormat() string {
+	format := os.Getenv("LOGGO_TIME_FORMAT")
+	if format != "" {
+		return format
+	}
+	return "15:04:05"
+}
+
+func formatTime(ts time.Time) string {
+	return ts.Format(TimeFormat)
+}
diff --git a/automation/vendor/github.com/juju/loggo/global.go b/automation/vendor/github.com/juju/loggo/global.go
new file mode 100644
index 0000000..7cf95ca
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/global.go
@@ -0,0 +1,85 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+var (
+	defaultContext = newDefaultContxt()
+)
+
+func newDefaultContxt() *Context {
+	ctx := NewContext(WARNING)
+	ctx.AddWriter(DefaultWriterName, defaultWriter())
+	return ctx
+}
+
+// DefaultContext returns the global default logging context.
+func DefaultContext() *Context {
+	return defaultContext
+}
+
+// LoggerInfo returns information about the configured loggers and their
+// logging levels. The information is returned in the format expected by
+// ConfigureLoggers. Loggers with UNSPECIFIED level will not
+// be included.
+func LoggerInfo() string {
+	return defaultContext.Config().String()
+}
+
+// GetLogger returns a Logger for the given module name,
+// creating it and its parents if necessary.
+func GetLogger(name string) Logger {
+	return defaultContext.GetLogger(name)
+}
+
+// ResetLogging iterates through the known modules and sets the levels of all
+// to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+// removes all writers in the DefaultContext and puts the original default
+// writer back as the only writer.
+func ResetLogging() {
+	defaultContext.ResetLoggerLevels()
+	defaultContext.ResetWriters()
+}
+
+// ResetWriters puts the list of writers back into the initial state.
+func ResetWriters() {
+	defaultContext.ResetWriters()
+}
+
+// ReplaceDefaultWriter is a convenience method that does the equivalent of
+// RemoveWriter and then RegisterWriter with the name "default".  The previous
+// default writer, if any is returned.
+func ReplaceDefaultWriter(writer Writer) (Writer, error) {
+	return defaultContext.ReplaceWriter(DefaultWriterName, writer)
+}
+
+// RegisterWriter adds the writer to the list of writers in the DefaultContext
+// that get notified when logging.  If there is already a registered writer
+// with that name, an error is returned.
+func RegisterWriter(name string, writer Writer) error {
+	return defaultContext.AddWriter(name, writer)
+}
+
+// RemoveWriter removes the Writer identified by 'name' and returns it.
+// If the Writer is not found, an error is returned.
+func RemoveWriter(name string) (Writer, error) {
+	return defaultContext.RemoveWriter(name)
+}
+
+// ConfigureLoggers configures loggers according to the given string
+// specification, which specifies a set of modules and their associated
+// logging levels.  Loggers are colon- or semicolon-separated; each
+// module is specified as <modulename>=<level>.  White space outside of
+// module names and levels is ignored.  The root module is specified
+// with the name "<root>".
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ConfigureLoggers(specification string) error {
+	config, err := ParseConfigString(specification)
+	if err != nil {
+		return err
+	}
+	defaultContext.ApplyConfig(config)
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/level.go b/automation/vendor/github.com/juju/loggo/level.go
new file mode 100644
index 0000000..f6a5c4f
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/level.go
@@ -0,0 +1,102 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"strings"
+	"sync/atomic"
+)
+
+// The severity levels. Higher values are more considered more
+// important.
+const (
+	UNSPECIFIED Level = iota
+	TRACE
+	DEBUG
+	INFO
+	WARNING
+	ERROR
+	CRITICAL
+)
+
+// Level holds a severity level.
+type Level uint32
+
+// ParseLevel converts a string representation of a logging level to a
+// Level. It returns the level and whether it was valid or not.
+func ParseLevel(level string) (Level, bool) {
+	level = strings.ToUpper(level)
+	switch level {
+	case "UNSPECIFIED":
+		return UNSPECIFIED, true
+	case "TRACE":
+		return TRACE, true
+	case "DEBUG":
+		return DEBUG, true
+	case "INFO":
+		return INFO, true
+	case "WARN", "WARNING":
+		return WARNING, true
+	case "ERROR":
+		return ERROR, true
+	case "CRITICAL":
+		return CRITICAL, true
+	default:
+		return UNSPECIFIED, false
+	}
+}
+
+// String implements Stringer.
+func (level Level) String() string {
+	switch level {
+	case UNSPECIFIED:
+		return "UNSPECIFIED"
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO"
+	case WARNING:
+		return "WARNING"
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITICAL"
+	default:
+		return "<unknown>"
+	}
+}
+
+// Short returns a five character string to use in
+// aligned logging output.
+func (level Level) Short() string {
+	switch level {
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO "
+	case WARNING:
+		return "WARN "
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITC"
+	default:
+		return "     "
+	}
+}
+
+// get atomically gets the value of the given level.
+func (level *Level) get() Level {
+	return Level(atomic.LoadUint32((*uint32)(level)))
+}
+
+// set atomically sets the value of the receiver
+// to the given level.
+func (level *Level) set(newLevel Level) {
+	atomic.StoreUint32((*uint32)(level), uint32(newLevel))
+}
diff --git a/automation/vendor/github.com/juju/loggo/logger.go b/automation/vendor/github.com/juju/loggo/logger.go
new file mode 100644
index 0000000..fbdfd9e
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/logger.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+)
+
+// A Logger represents a logging module. It has an associated logging
+// level which can be changed; messages of lesser severity will
+// be dropped. Loggers have a hierarchical relationship - see
+// the package documentation.
+//
+// The zero Logger value is usable - any messages logged
+// to it will be sent to the root Logger.
+type Logger struct {
+	impl *module
+}
+
+func (logger Logger) getModule() *module {
+	if logger.impl == nil {
+		return defaultContext.root
+	}
+	return logger.impl
+}
+
+// Name returns the logger's module name.
+func (logger Logger) Name() string {
+	return logger.getModule().Name()
+}
+
+// LogLevel returns the configured min log level of the logger.
+func (logger Logger) LogLevel() Level {
+	return logger.getModule().level
+}
+
+// EffectiveLogLevel returns the effective min log level of
+// the receiver - that is, messages with a lesser severity
+// level will be discarded.
+//
+// If the log level of the receiver is unspecified,
+// it will be taken from the effective log level of its
+// parent.
+func (logger Logger) EffectiveLogLevel() Level {
+	return logger.getModule().getEffectiveLogLevel()
+}
+
+// SetLogLevel sets the severity level of the given logger.
+// The root logger cannot be set to UNSPECIFIED level.
+// See EffectiveLogLevel for how this affects the
+// actual messages logged.
+func (logger Logger) SetLogLevel(level Level) {
+	logger.getModule().setLevel(level)
+}
+
+// Logf logs a printf-formatted message at the given level.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) Logf(level Level, message string, args ...interface{}) {
+	logger.LogCallf(2, level, message, args...)
+}
+
+// LogCallf logs a printf-formatted message at the given level.
+// The location of the call is indicated by the calldepth argument.
+// A calldepth of 1 means the function that called this function.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{}) {
+	module := logger.getModule()
+	if !module.willWrite(level) {
+		return
+	}
+	// Gather time, and filename, line number.
+	now := time.Now() // get this early.
+	// Param to Caller is the call depth.  Since this method is called from
+	// the Logger methods, we want the place that those were called from.
+	_, file, line, ok := runtime.Caller(calldepth + 1)
+	if !ok {
+		file = "???"
+		line = 0
+	}
+	// Trim newline off format string, following usual
+	// Go logging conventions.
+	if len(message) > 0 && message[len(message)-1] == '\n' {
+		message = message[0 : len(message)-1]
+	}
+
+	// To avoid having a proliferation of Info/Infof methods,
+	// only use Sprintf if there are any args, and rely on the
+	// `go vet` tool for the obvious cases where someone has forgotten
+	// to provide an arg.
+	formattedMessage := message
+	if len(args) > 0 {
+		formattedMessage = fmt.Sprintf(message, args...)
+	}
+	module.write(Entry{
+		Level:     level,
+		Filename:  file,
+		Line:      line,
+		Timestamp: now,
+		Message:   formattedMessage,
+	})
+}
+
+// Criticalf logs the printf-formatted message at critical level.
+func (logger Logger) Criticalf(message string, args ...interface{}) {
+	logger.Logf(CRITICAL, message, args...)
+}
+
+// Errorf logs the printf-formatted message at error level.
+func (logger Logger) Errorf(message string, args ...interface{}) {
+	logger.Logf(ERROR, message, args...)
+}
+
+// Warningf logs the printf-formatted message at warning level.
+func (logger Logger) Warningf(message string, args ...interface{}) {
+	logger.Logf(WARNING, message, args...)
+}
+
+// Infof logs the printf-formatted message at info level.
+func (logger Logger) Infof(message string, args ...interface{}) {
+	logger.Logf(INFO, message, args...)
+}
+
+// Debugf logs the printf-formatted message at debug level.
+func (logger Logger) Debugf(message string, args ...interface{}) {
+	logger.Logf(DEBUG, message, args...)
+}
+
+// Tracef logs the printf-formatted message at trace level.
+func (logger Logger) Tracef(message string, args ...interface{}) {
+	logger.Logf(TRACE, message, args...)
+}
+
+// IsLevelEnabled returns whether debugging is enabled
+// for the given log level.
+func (logger Logger) IsLevelEnabled(level Level) bool {
+	return logger.getModule().willWrite(level)
+}
+
+// IsErrorEnabled returns whether debugging is enabled
+// at error level.
+func (logger Logger) IsErrorEnabled() bool {
+	return logger.IsLevelEnabled(ERROR)
+}
+
+// IsWarningEnabled returns whether debugging is enabled
+// at warning level.
+func (logger Logger) IsWarningEnabled() bool {
+	return logger.IsLevelEnabled(WARNING)
+}
+
+// IsInfoEnabled returns whether debugging is enabled
+// at info level.
+func (logger Logger) IsInfoEnabled() bool {
+	return logger.IsLevelEnabled(INFO)
+}
+
+// IsDebugEnabled returns whether debugging is enabled
+// at debug level.
+func (logger Logger) IsDebugEnabled() bool {
+	return logger.IsLevelEnabled(DEBUG)
+}
+
+// IsTraceEnabled returns whether debugging is enabled
+// at trace level.
+func (logger Logger) IsTraceEnabled() bool {
+	return logger.IsLevelEnabled(TRACE)
+}
diff --git a/automation/vendor/github.com/juju/loggo/module.go b/automation/vendor/github.com/juju/loggo/module.go
new file mode 100644
index 0000000..8153be5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/module.go
@@ -0,0 +1,61 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+// Do not change rootName: modules.resolve() will misbehave if it isn't "".
+const (
+	rootString       = "<root>"
+	defaultRootLevel = WARNING
+	defaultLevel     = UNSPECIFIED
+)
+
+type module struct {
+	name    string
+	level   Level
+	parent  *module
+	context *Context
+}
+
+// Name returns the module's name.
+func (module *module) Name() string {
+	if module.name == "" {
+		return rootString
+	}
+	return module.name
+}
+
+func (m *module) willWrite(level Level) bool {
+	if level < TRACE || level > CRITICAL {
+		return false
+	}
+	return level >= m.getEffectiveLogLevel()
+}
+
+func (module *module) getEffectiveLogLevel() Level {
+	// Note: the root module is guaranteed to have a
+	// specified logging level, so acts as a suitable sentinel
+	// for this loop.
+	for {
+		if level := module.level.get(); level != UNSPECIFIED {
+			return level
+		}
+		module = module.parent
+	}
+	panic("unreachable")
+}
+
+// setLevel sets the severity level of the given module.
+// The root module cannot be set to UNSPECIFIED level.
+func (module *module) setLevel(level Level) {
+	// The root module can't be unspecified.
+	if module.name == "" && level == UNSPECIFIED {
+		level = WARNING
+	}
+	module.level.set(level)
+}
+
+func (m *module) write(entry Entry) {
+	entry.Module = m.name
+	m.context.write(entry)
+}
diff --git a/automation/vendor/github.com/juju/loggo/testwriter.go b/automation/vendor/github.com/juju/loggo/testwriter.go
new file mode 100644
index 0000000..b20e470
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/testwriter.go
@@ -0,0 +1,40 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"path"
+	"sync"
+)
+
+// TestWriter is a useful Writer for testing purposes.  Each component of the
+// logging message is stored in the Log array.
+type TestWriter struct {
+	mu  sync.Mutex
+	log []Entry
+}
+
+// Write saves the params as members in the TestLogValues struct appended to the Log array.
+func (writer *TestWriter) Write(entry Entry) {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	entry.Filename = path.Base(entry.Filename)
+	writer.log = append(writer.log, entry)
+}
+
+// Clear removes any saved log messages.
+func (writer *TestWriter) Clear() {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	writer.log = nil
+}
+
+// Log returns a copy of the current logged values.
+func (writer *TestWriter) Log() []Entry {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	v := make([]Entry, len(writer.log))
+	copy(v, writer.log)
+	return v
+}
diff --git a/automation/vendor/github.com/juju/loggo/writer.go b/automation/vendor/github.com/juju/loggo/writer.go
new file mode 100644
index 0000000..b3fe3e5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/writer.go
@@ -0,0 +1,113 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/ansiterm"
+)
+
+// DefaultWriterName is the name of the default writer for
+// a Context.
+const DefaultWriterName = "default"
+
+// Writer is implemented by any recipient of log messages.
+type Writer interface {
+	// Write writes a message to the Writer with the given level and module
+	// name. The filename and line hold the file name and line number of the
+	// code that is generating the log message; the time stamp holds the time
+	// the log message was generated, and message holds the log message
+	// itself.
+	Write(entry Entry)
+}
+
+// NewMinLevelWriter returns a Writer that will only pass on the Write calls
+// to the provided writer if the log level is at or above the specified
+// minimum level.
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer {
+	return &minLevelWriter{
+		writer: writer,
+		level:  minLevel,
+	}
+}
+
+type minLevelWriter struct {
+	writer Writer
+	level  Level
+}
+
+// Write writes the log record.
+func (w minLevelWriter) Write(entry Entry) {
+	if entry.Level < w.level {
+		return
+	}
+	w.writer.Write(entry)
+}
+
+type simpleWriter struct {
+	writer    io.Writer
+	formatter func(entry Entry) string
+}
+
+// NewSimpleWriter returns a new writer that writes log messages to the given
+// io.Writer formatting the messages with the given formatter.
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer {
+	if formatter == nil {
+		formatter = DefaultFormatter
+	}
+	return &simpleWriter{writer, formatter}
+}
+
+func (simple *simpleWriter) Write(entry Entry) {
+	logLine := simple.formatter(entry)
+	fmt.Fprintln(simple.writer, logLine)
+}
+
+func defaultWriter() Writer {
+	return NewColorWriter(os.Stderr)
+}
+
+type colorWriter struct {
+	writer *ansiterm.Writer
+}
+
+var (
+	// SeverityColor defines the colors for the levels output by the ColorWriter.
+	SeverityColor = map[Level]*ansiterm.Context{
+		TRACE:   ansiterm.Foreground(ansiterm.Default),
+		DEBUG:   ansiterm.Foreground(ansiterm.Green),
+		INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+		WARNING: ansiterm.Foreground(ansiterm.Yellow),
+		ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+		CRITICAL: &ansiterm.Context{
+			Foreground: ansiterm.White,
+			Background: ansiterm.Red,
+		},
+	}
+	// LocationColor defines the colors for the location output by the ColorWriter.
+	LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+
+// NewColorWriter will write out colored severity levels if the writer is
+// outputting to a terminal.
+func NewColorWriter(writer io.Writer) Writer {
+	return &colorWriter{ansiterm.NewWriter(writer)}
+}
+
+// Write implements Writer.
+func (w *colorWriter) Write(entry Entry) {
+	ts := formatTime(entry.Timestamp)
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+
+	fmt.Fprintf(w.writer, "%s ", ts)
+	SeverityColor[entry.Level].Fprintf(w.writer, entry.Level.Short())
+	fmt.Fprintf(w.writer, " %s ", entry.Module)
+	LocationColor.Fprintf(w.writer, "%s:%d ", filename, entry.Line)
+	fmt.Fprintln(w.writer, entry.Message)
+}
diff --git a/automation/vendor/github.com/juju/schema/LICENSE b/automation/vendor/github.com/juju/schema/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/schema/README.md b/automation/vendor/github.com/juju/schema/README.md
new file mode 100644
index 0000000..78ad8a1
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/README.md
@@ -0,0 +1,5 @@
+juju/schema
+===========
+
+This package provides helpers for coercing dynamically typed data structures
+into known forms.
diff --git a/automation/vendor/github.com/juju/schema/checker.go b/automation/vendor/github.com/juju/schema/checker.go
new file mode 100644
index 0000000..8682136
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/checker.go
@@ -0,0 +1,71 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"strings"
+)
+
+// The Coerce method of the Checker interface is called recursively when
+// v is being validated.  If err is nil, newv is used as the new value
+// at the recursion point.  If err is non-nil, v is taken as invalid and
+// may be either ignored or error out depending on where in the schema
+// checking process the error happened. Checkers like OneOf may continue
+// with an alternative, for instance.
+type Checker interface {
+	Coerce(v interface{}, path []string) (newv interface{}, err error)
+}
+
+// Any returns a Checker that succeeds with any input value and
+// results in the value itself unprocessed.
+func Any() Checker {
+	return anyC{}
+}
+
+type anyC struct{}
+
+func (c anyC) Coerce(v interface{}, path []string) (interface{}, error) {
+	return v, nil
+}
+
+// OneOf returns a Checker that attempts to Coerce the value with each
+// of the provided checkers. The value returned by the first checker
+// that succeeds will be returned by the OneOf checker itself.  If no
+// checker succeeds, OneOf will return an error on coercion.
+func OneOf(options ...Checker) Checker {
+	return oneOfC{options}
+}
+
+type oneOfC struct {
+	options []Checker
+}
+
+func (c oneOfC) Coerce(v interface{}, path []string) (interface{}, error) {
+	for _, o := range c.options {
+		newv, err := o.Coerce(v, path)
+		if err == nil {
+			return newv, nil
+		}
+	}
+	return nil, error_{"", v, path}
+}
+
+// pathAsPrefix returns a string consisting of the path elements
+// suitable for using as the prefix of an error message. If path
+// starts with a ".", the dot is omitted.
+func pathAsPrefix(path []string) string {
+	if len(path) == 0 {
+		return ""
+	}
+	var s string
+	if path[0] == "." {
+		s = strings.Join(path[1:], "")
+	} else {
+		s = strings.Join(path, "")
+	}
+	if s == "" {
+		return ""
+	}
+	return s + ": "
+}
diff --git a/automation/vendor/github.com/juju/schema/const.go b/automation/vendor/github.com/juju/schema/const.go
new file mode 100644
index 0000000..cbd03b8
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/const.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Const returns a Checker that only succeeds if the input matches
+// value exactly.  The value is compared with reflect.DeepEqual.
+func Const(value interface{}) Checker {
+	return constC{value}
+}
+
+type constC struct {
+	value interface{}
+}
+
+func (c constC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, c.value) {
+		return v, nil
+	}
+	return nil, error_{fmt.Sprintf("%#v", c.value), v, path}
+}
+
+// Nil returns a Checker that only succeeds if the input is nil. To tweak the
+// error message, valueLabel can contain a label of the value being checked to
+// be empty, e.g. "my special name". If valueLabel is "", "value" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.Nil("widget").Coerce(42, nil) will return an error message
+// like `expected empty widget, got int(42)`.
+//
+// Example 2:
+// schema.Nil("").Coerce("", nil) will return an error message like
+// `expected empty value, got string("")`.
+func Nil(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "value"
+	}
+	return nilC{valueLabel}
+}
+
+type nilC struct {
+	valueLabel string
+}
+
+func (c nilC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, nil) {
+		return v, nil
+	}
+	label := fmt.Sprintf("empty %s", c.valueLabel)
+	return nil, error_{label, v, path}
+}
diff --git a/automation/vendor/github.com/juju/schema/errors.go b/automation/vendor/github.com/juju/schema/errors.go
new file mode 100644
index 0000000..f62f58e
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/errors.go
@@ -0,0 +1,25 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+)
+
+type error_ struct {
+	want string
+	got  interface{}
+	path []string
+}
+
+func (e error_) Error() string {
+	path := pathAsPrefix(e.path)
+	if e.want == "" {
+		return fmt.Sprintf("%sunexpected value %#v", path, e.got)
+	}
+	if e.got == nil {
+		return fmt.Sprintf("%sexpected %s, got nothing", path, e.want)
+	}
+	return fmt.Sprintf("%sexpected %s, got %T(%#v)", path, e.want, e.got, e.got)
+}
diff --git a/automation/vendor/github.com/juju/schema/fieldmap.go b/automation/vendor/github.com/juju/schema/fieldmap.go
new file mode 100644
index 0000000..765f7e3
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/fieldmap.go
@@ -0,0 +1,170 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Omit is a marker for FieldMap and StructFieldMap defaults parameter.
+// If a field is not present in the map and defaults to Omit, the missing
+// field will be ommitted from the coerced map as well.
+var Omit omit
+
+type omit struct{}
+
+type Fields map[string]Checker
+type Defaults map[string]interface{}
+
+// FieldMap returns a Checker that accepts a map value with defined
+// string keys. Every key has an independent checker associated,
+// and processing will only succeed if all the values succeed
+// individually. If a field fails to be processed, processing stops
+// and returns with the underlying error.
+//
+// Fields in defaults will be set to the provided value if not present
+// in the coerced map. If the default value is schema.Omit, the
+// missing field will be omitted from the coerced map.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, false}
+}
+
+// StrictFieldMap returns a Checker that acts as the one returned by FieldMap,
+// but the Checker returns an error if it encounters an unknown key.
+func StrictFieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, true}
+}
+
+type fieldMapC struct {
+	fields   Fields
+	defaults Defaults
+	strict   bool
+}
+
+var stringType = reflect.TypeOf("")
+
+func hasStrictStringKeys(rv reflect.Value) bool {
+	if rv.Type().Key() == stringType {
+		return true
+	}
+	if rv.Type().Key().Kind() != reflect.Interface {
+		return false
+	}
+	for _, k := range rv.MapKeys() {
+		if k.Elem().Type() != stringType {
+			return false
+		}
+	}
+	return true
+}
+
+func (c fieldMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+	if !hasStrictStringKeys(rv) {
+		return nil, error_{"map[string]", v, path}
+	}
+
+	if c.strict {
+		for _, k := range rv.MapKeys() {
+			ks := k.String()
+			if _, ok := c.fields[ks]; !ok {
+				return nil, fmt.Errorf("%sunknown key %q (value %#v)", pathAsPrefix(path), ks, rv.MapIndex(k).Interface())
+			}
+		}
+	}
+
+	vpath := append(path, ".", "?")
+
+	out := make(map[string]interface{}, rv.Len())
+	for k, checker := range c.fields {
+		valuev := rv.MapIndex(reflect.ValueOf(k))
+		var value interface{}
+		if valuev.IsValid() {
+			value = valuev.Interface()
+		} else if dflt, ok := c.defaults[k]; ok {
+			if dflt == Omit {
+				continue
+			}
+			value = dflt
+		}
+		vpath[len(vpath)-1] = k
+		newv, err := checker.Coerce(value, vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[k] = newv
+	}
+	for k, v := range c.defaults {
+		if v == Omit {
+			continue
+		}
+		if _, ok := out[k]; !ok {
+			checker, ok := c.fields[k]
+			if !ok {
+				return nil, fmt.Errorf("got default value for unknown field %q", k)
+			}
+			vpath[len(vpath)-1] = k
+			newv, err := checker.Coerce(v, vpath)
+			if err != nil {
+				return nil, err
+			}
+			out[k] = newv
+		}
+	}
+	return out, nil
+}
+
+// FieldMapSet returns a Checker that accepts a map value checked
+// against one of several FieldMap checkers.  The actual checker
+// used is the first one whose checker associated with the selector
+// field processes the map correctly. If no checker processes
+// the selector value correctly, an error is returned.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMapSet(selector string, maps []Checker) Checker {
+	fmaps := make([]fieldMapC, len(maps))
+	for i, m := range maps {
+		if fmap, ok := m.(fieldMapC); ok {
+			if checker, _ := fmap.fields[selector]; checker == nil {
+				panic("FieldMapSet has a FieldMap with a missing selector")
+			}
+			fmaps[i] = fmap
+		} else {
+			panic("FieldMapSet got a non-FieldMap checker")
+		}
+	}
+	return mapSetC{selector, fmaps}
+}
+
+type mapSetC struct {
+	selector string
+	fmaps    []fieldMapC
+}
+
+func (c mapSetC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	var selector interface{}
+	selectorv := rv.MapIndex(reflect.ValueOf(c.selector))
+	if selectorv.IsValid() {
+		selector = selectorv.Interface()
+		for _, fmap := range c.fmaps {
+			_, err := fmap.fields[c.selector].Coerce(selector, path)
+			if err != nil {
+				continue
+			}
+			return fmap.Coerce(v, path)
+		}
+	}
+	return nil, error_{"supported selector", selector, append(path, ".", c.selector)}
+}
diff --git a/automation/vendor/github.com/juju/schema/lists.go b/automation/vendor/github.com/juju/schema/lists.go
new file mode 100644
index 0000000..635425a
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/lists.go
@@ -0,0 +1,44 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// List returns a Checker that accepts a slice value with values
+// that are processed with the elem checker.  If any element of the
+// provided slice value fails to be processed, processing will stop
+// and return with the obtained error.
+//
+// The coerced output value has type []interface{}.
+func List(elem Checker) Checker {
+	return listC{elem}
+}
+
+type listC struct {
+	elem Checker
+}
+
+func (c listC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Slice {
+		return nil, error_{"list", v, path}
+	}
+
+	path = append(path, "[", "?", "]")
+
+	l := rv.Len()
+	out := make([]interface{}, 0, l)
+	for i := 0; i != l; i++ {
+		path[len(path)-2] = strconv.Itoa(i)
+		elem, err := c.elem.Coerce(rv.Index(i).Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, elem)
+	}
+	return out, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/maps.go b/automation/vendor/github.com/juju/schema/maps.go
new file mode 100644
index 0000000..31242a0
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/maps.go
@@ -0,0 +1,93 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Map returns a Checker that accepts a map value. Every key and value
+// in the map are processed with the respective checker, and if any
+// value fails to be coerced, processing stops and returns with the
+// underlying error.
+//
+// The coerced output value has type map[interface{}]interface{}.
+func Map(key Checker, value Checker) Checker {
+	return mapC{key, value}
+}
+
+type mapC struct {
+	key   Checker
+	value Checker
+}
+
+func (c mapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+
+	l := rv.Len()
+	out := make(map[interface{}]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := c.key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk] = newv
+	}
+	return out, nil
+}
+
+// StringMap returns a Checker that accepts a map value. Every key in
+// the map must be a string, and every value in the map are processed
+// with the provided checker. If any value fails to be coerced,
+// processing stops and returns with the underlying error.
+//
+// The coerced output value has type map[string]interface{}.
+func StringMap(value Checker) Checker {
+	return stringMapC{value}
+}
+
+type stringMapC struct {
+	value Checker
+}
+
+func (c stringMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+	key := String()
+
+	l := rv.Len()
+	out := make(map[string]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk.(string)] = newv
+	}
+	return out, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/numeric.go b/automation/vendor/github.com/juju/schema/numeric.go
new file mode 100644
index 0000000..ec88e56
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/numeric.go
@@ -0,0 +1,197 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// Bool returns a Checker that accepts boolean values only.
+func Bool() Checker {
+	return boolC{}
+}
+
+type boolC struct{}
+
+func (c boolC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch reflect.TypeOf(v).Kind() {
+		case reflect.Bool:
+			return v, nil
+		case reflect.String:
+			val, err := strconv.ParseBool(reflect.ValueOf(v).String())
+			if err == nil {
+				return val, nil
+			}
+		}
+	}
+	return nil, error_{"bool", v, path}
+}
+
+// Int returns a Checker that accepts any integer value, and returns
+// the same value consistently typed as an int64.
+func Int() Checker {
+	return intC{}
+}
+
+type intC struct{}
+
+func (c intC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"int", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Int:
+	case reflect.Int8:
+	case reflect.Int16:
+	case reflect.Int32:
+	case reflect.Int64:
+	case reflect.String:
+		val, err := strconv.ParseInt(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"int", v, path}
+		}
+	default:
+		return nil, error_{"int", v, path}
+	}
+	return reflect.ValueOf(v).Int(), nil
+}
+
+// Uint returns a Checker that accepts any integer or unsigned value, and
+// returns the same value consistently typed as an uint64. If the integer
+// value is negative an error is raised.
+func Uint() Checker {
+	return uintC{}
+}
+
+type uintC struct{}
+
+func (c uintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"uint", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return reflect.ValueOf(v).Uint(), nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		val := reflect.ValueOf(v).Int()
+		if val < 0 {
+			return nil, error_{"uint", v, path}
+		}
+		// All positive int64 values fit into uint64.
+		return uint64(val), nil
+	case reflect.String:
+		val, err := strconv.ParseUint(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"uint", v, path}
+		}
+	default:
+		return nil, error_{"uint", v, path}
+	}
+}
+
+// ForceInt returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an int. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure.
+func ForceInt() Checker {
+	return forceIntC{}
+}
+
+type forceIntC struct{}
+
+func (c forceIntC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseInt(vstr, 0, 64)
+			if err == nil {
+				return int(intValue), nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				return int(floatValue), nil
+			}
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return int(reflect.ValueOf(v).Int()), nil
+		case reflect.Float32, reflect.Float64:
+			return int(reflect.ValueOf(v).Float()), nil
+		}
+	}
+	return nil, error_{"number", v, path}
+}
+
+// ForceUint returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an uint64. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure. If the integer
+// value is negative an error is raised.
+func ForceUint() Checker {
+	return forceUintC{}
+}
+
+type forceUintC struct{}
+
+func (c forceUintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseUint(vstr, 0, 64)
+			if err == nil {
+				return intValue, nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				if floatValue < 0 {
+					return nil, error_{"uint", v, path}
+				}
+				return uint64(floatValue), nil
+			}
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return reflect.ValueOf(v).Uint(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			val := reflect.ValueOf(v).Int()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			// All positive int64 values fit into uint64.
+			return uint64(val), nil
+		case reflect.Float32, reflect.Float64:
+			val := reflect.ValueOf(v).Float()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			return uint64(val), nil
+		}
+	}
+	return nil, error_{"uint", v, path}
+}
+
+// Float returns a Checker that accepts any float value, and returns
+// the same value consistently typed as a float64.
+func Float() Checker {
+	return floatC{}
+}
+
+type floatC struct{}
+
+func (c floatC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"float", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Float32:
+	case reflect.Float64:
+	default:
+		return nil, error_{"float", v, path}
+	}
+	return reflect.ValueOf(v).Float(), nil
+}
diff --git a/automation/vendor/github.com/juju/schema/size.go b/automation/vendor/github.com/juju/schema/size.go
new file mode 100644
index 0000000..1ad0caa
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/size.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"github.com/juju/utils"
+	"reflect"
+)
+
+// Size returns a Checker that accepts a string value, and returns
+// the parsed string as a size in mebibytes see: https://godoc.org/github.com/juju/utils#ParseSize
+func Size() Checker {
+	return sizeC{}
+}
+
+type sizeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c sizeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string", v, path}
+	}
+
+	typeOf := reflect.TypeOf(v).Kind()
+	if typeOf != reflect.String {
+		return nil, error_{"string", v, path}
+	}
+
+	value := reflect.ValueOf(v).String()
+	if value == "" {
+		return nil, error_{"empty string", v, path}
+	}
+
+	v, err := utils.ParseSize(value)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return v, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/strings.go b/automation/vendor/github.com/juju/schema/strings.go
new file mode 100644
index 0000000..75f6120
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/strings.go
@@ -0,0 +1,155 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+)
+
+// String returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func String() Checker {
+	return stringC{}
+}
+
+type stringC struct{}
+
+func (c stringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		return reflect.ValueOf(v).String(), nil
+	}
+	return nil, error_{"string", v, path}
+}
+
+// URL returns a Checker that accepts a string value that must be parseable as a
+// URL, and returns a *net.URL.
+func URL() Checker {
+	return urlC{}
+}
+
+type urlC struct{}
+
+func (c urlC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		u, err := url.Parse(s)
+		if err != nil {
+			return nil, error_{"valid url", s, path}
+		}
+		return u, nil
+	}
+	return nil, error_{"url string", v, path}
+}
+
+// SimpleRegexp returns a checker that accepts a string value that is
+// a valid regular expression and returns it unprocessed.
+func SimpleRegexp() Checker {
+	return sregexpC{}
+}
+
+type sregexpC struct{}
+
+func (c sregexpC) Coerce(v interface{}, path []string) (interface{}, error) {
+	// XXX The regexp package happens to be extremely simple right now.
+	//     Once exp/regexp goes mainstream, we'll have to update this
+	//     logic to use a more widely accepted regexp subset.
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		_, err := regexp.Compile(s)
+		if err != nil {
+			return nil, error_{"valid regexp", s, path}
+		}
+		return v, nil
+	}
+	return nil, error_{"regexp string", v, path}
+}
+
+// UUID returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func UUID() Checker {
+	return uuidC{}
+}
+
+type uuidC struct{}
+
+var uuidregex = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`)
+
+func (c uuidC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		uuid := reflect.ValueOf(v).String()
+		if uuidregex.MatchString(uuid) {
+			return uuid, nil
+		}
+	}
+	return nil, error_{"uuid", v, path}
+}
+
+// Stringified returns a checker that accepts a bool/int/float/string
+// value and returns its string. Other value types may be supported by
+// passing in their checkers.
+func Stringified(checkers ...Checker) Checker {
+	return stringifiedC{
+		checkers: checkers,
+	}
+}
+
+type stringifiedC struct {
+	checkers []Checker
+}
+
+func (c stringifiedC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if newStr, err := String().Coerce(v, path); err == nil {
+		return newStr, nil
+	}
+	_, err := OneOf(append(c.checkers,
+		Bool(),
+		Int(),
+		Float(),
+		String(),
+		URL(),
+	)...).Coerce(v, path)
+	if err != nil {
+		return nil, err
+	}
+	return fmt.Sprintf("%#v", v), nil
+}
+
+// NonEmptyString returns a Checker that only accepts non-empty strings. To
+// tweak the error message, valueLabel can contain a label of the value being
+// checked, e.g. "my special name". If valueLabel is "", "string" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.NonEmptyString("widget").Coerce("", nil) will return an error message
+// like `expected non-empty widget, got string("")`.
+//
+// Example 2:
+// schema.NonEmptyString("").Coerce("", nil) will return an error message like
+// `expected non-empty string, got string("")`.
+func NonEmptyString(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "string"
+	}
+	return nonEmptyStringC{valueLabel}
+}
+
+type nonEmptyStringC struct {
+	valueLabel string
+}
+
+func (c nonEmptyStringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	label := fmt.Sprintf("non-empty %s", c.valueLabel)
+	invalidError := error_{label, v, path}
+
+	if v == nil || reflect.TypeOf(v).Kind() != reflect.String {
+		return nil, invalidError
+	}
+	if stringValue := reflect.ValueOf(v).String(); stringValue != "" {
+		return stringValue, nil
+	}
+	return nil, invalidError
+}
diff --git a/automation/vendor/github.com/juju/schema/time.go b/automation/vendor/github.com/juju/schema/time.go
new file mode 100644
index 0000000..9521a2a
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/time.go
@@ -0,0 +1,41 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// Time returns a Checker that accepts a string value, and returns
+// the parsed time.Time value. Emtpy strings are considered empty times.
+func Time() Checker {
+	return timeC{}
+}
+
+type timeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Time", v, path}
+	}
+	var empty time.Time
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.Parse(time.RFC3339Nano, vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Time", v, path}
+	}
+}
diff --git a/automation/vendor/github.com/juju/schema/time_duration.go b/automation/vendor/github.com/juju/schema/time_duration.go
new file mode 100644
index 0000000..71e75fe
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/time_duration.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// TimeDuration returns a Checker that accepts a string value, and returns
+// the parsed time.Duration value. Emtpy strings are considered empty time.Duration
+func TimeDuration() Checker {
+	return timeDurationC{}
+}
+
+type timeDurationC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeDurationC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Duration", v, path}
+	}
+
+	var empty time.Duration
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.ParseDuration(vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Duration", v, path}
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/LICENSE b/automation/vendor/github.com/juju/utils/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/utils/LICENSE.golang b/automation/vendor/github.com/juju/utils/LICENSE.golang
new file mode 100644
index 0000000..953076d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/LICENSE.golang
@@ -0,0 +1,32 @@
+This licence applies to the following files:
+
+* filepath/stdlib.go
+* filepath/stdlibmatch.go
+
+Copyright (c) 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/github.com/juju/utils/Makefile b/automation/vendor/github.com/juju/utils/Makefile
new file mode 100644
index 0000000..9c69f32
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/Makefile
@@ -0,0 +1,10 @@
+PROJECT := github.com/juju/utils
+
+check-licence:
+	@(fgrep -rl "Licensed under the LGPLv3" .;\
+		fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" .;\
+		find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \
+		xargs -I {} echo FAIL: licence missed: {}
+
+check: check-licence
+	go test $(PROJECT)/...
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/utils/README.md b/automation/vendor/github.com/juju/utils/README.md
new file mode 100644
index 0000000..ef08948
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/README.md
@@ -0,0 +1,4 @@
+juju/utils
+============
+
+This package provides general utility packages and functions.
diff --git a/automation/vendor/github.com/juju/utils/attempt.go b/automation/vendor/github.com/juju/utils/attempt.go
new file mode 100644
index 0000000..3becab2
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/attempt.go
@@ -0,0 +1,80 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"time"
+)
+
+// The Attempt and AttemptStrategy types are copied from those in launchpad.net/goamz/aws.
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully.
+type AttemptStrategy struct {
+	Total time.Duration // total duration of attempt.
+	Delay time.Duration // interval between each try in the burst.
+	Min   int           // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+	strategy AttemptStrategy
+	last     time.Time
+	end      time.Time
+	force    bool
+	count    int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+	now := time.Now()
+	return &Attempt{
+		strategy: s,
+		last:     now,
+		end:      now.Add(s.Total),
+		force:    true,
+	}
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+// It always returns true the first time it is called - we are guaranteed to
+// make at least one attempt.
+func (a *Attempt) Next() bool {
+	now := time.Now()
+	sleep := a.nextSleep(now)
+	if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+		return false
+	}
+	a.force = false
+	if sleep > 0 && a.count > 0 {
+		time.Sleep(sleep)
+		now = time.Now()
+	}
+	a.count++
+	a.last = now
+	return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+	sleep := a.strategy.Delay - now.Sub(a.last)
+	if sleep < 0 {
+		return 0
+	}
+	return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+	if a.force || a.strategy.Min > a.count {
+		return true
+	}
+	now := time.Now()
+	if now.Add(a.nextSleep(now)).Before(a.end) {
+		a.force = true
+		return true
+	}
+	return false
+}
diff --git a/automation/vendor/github.com/juju/utils/clock/clock.go b/automation/vendor/github.com/juju/utils/clock/clock.go
new file mode 100644
index 0000000..59a511d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/clock/clock.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import "time"
+
+// Clock provides an interface for dealing with clocks.
+type Clock interface {
+	// Now returns the current clock time.
+	Now() time.Time
+
+	// After waits for the duration to elapse and then sends the
+	// current time on the returned channel.
+	After(time.Duration) <-chan time.Time
+
+	// AfterFunc waits for the duration to elapse and then calls f in its own goroutine.
+	// It returns a Timer that can be used to cancel the call using its Stop method.
+	AfterFunc(d time.Duration, f func()) Timer
+
+	// NewTimer creates a new Timer that will send the current time
+	// on its channel after at least duration d.
+	NewTimer(d time.Duration) Timer
+}
+
+// Alarm returns a channel that will have the time sent on it at some point
+// after the supplied time occurs.
+//
+// This is short for c.After(t.Sub(c.Now())).
+func Alarm(c Clock, t time.Time) <-chan time.Time {
+	return c.After(t.Sub(c.Now()))
+}
+
+// The Timer type represents a single event.
+// A Timer must be created with AfterFunc.
+// This interface follows time.Timer's methods but provides easier mocking.
+type Timer interface {
+	// When the Timer expires, the current time will be sent on the
+	// channel returned from Chan, unless the Timer was created by
+	// AfterFunc.
+	Chan() <-chan time.Time
+
+	// Reset changes the timer to expire after duration d.
+	// It returns true if the timer had been active, false if
+	// the timer had expired or been stopped.
+	Reset(time.Duration) bool
+
+	// Stop prevents the Timer from firing. It returns true if
+	// the call stops the timer, false if the timer has already expired or been stopped.
+	// Stop does not close the channel, to prevent a read
+	// from the channel succeeding incorrectly.
+	Stop() bool
+}
diff --git a/automation/vendor/github.com/juju/utils/clock/wall.go b/automation/vendor/github.com/juju/utils/clock/wall.go
new file mode 100644
index 0000000..9bfc351
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/clock/wall.go
@@ -0,0 +1,47 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import (
+	"time"
+)
+
+// WallClock exposes wall-clock time via the Clock interface.
+var WallClock wallClock
+
+// ensure that WallClock does actually implement the Clock interface.
+var _ Clock = WallClock
+
+// WallClock exposes wall-clock time as returned by time.Now.
+type wallClock struct{}
+
+// Now is part of the Clock interface.
+func (wallClock) Now() time.Time {
+	return time.Now()
+}
+
+// After implements Clock.After.
+func (wallClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+// AfterFunc implements Clock.AfterFunc.
+func (wallClock) AfterFunc(d time.Duration, f func()) Timer {
+	return wallTimer{time.AfterFunc(d, f)}
+}
+
+// NewTimer implements Clock.NewTimer.
+func (wallClock) NewTimer(d time.Duration) Timer {
+	return wallTimer{time.NewTimer(d)}
+}
+
+// wallTimer implements the Timer interface.
+type wallTimer struct {
+	*time.Timer
+}
+
+// Chan implements Timer.Chan.
+func (t wallTimer) Chan() <-chan time.Time {
+	return t.C
+}
diff --git a/automation/vendor/github.com/juju/utils/command.go b/automation/vendor/github.com/juju/utils/command.go
new file mode 100644
index 0000000..4bd51cd
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/command.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os/exec"
+)
+
+// RunCommand executes the command and return the combined output.
+func RunCommand(command string, args ...string) (output string, err error) {
+	cmd := exec.Command(command, args...)
+	out, err := cmd.CombinedOutput()
+	output = string(out)
+	if err != nil {
+		return output, err
+	}
+	return output, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/dependencies.tsv b/automation/vendor/github.com/juju/utils/dependencies.tsv
new file mode 100644
index 0000000..76d098e
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/dependencies.tsv
@@ -0,0 +1,24 @@
+github.com/juju/cmd	git	7c57a7d5a20602e4563a83f2d530283ca0e6f481	2016-08-10T12:53:08Z
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/gnuflag	git	4e76c56581859c14d9d87e1ddbe29e1c0f10195f	2016-08-09T16:52:14Z
+github.com/juju/httpprof	git	14bf14c307672fd2456bdbf35d19cf0ccd3cf565	2014-12-17T16:00:36Z
+github.com/juju/httprequest	git	89d547093c45e293599088cc63e805c6f1205dc0	2016-03-02T10:09:58Z
+github.com/juju/loggo	git	15901ae4de786d05edae84a27c93d3fbef66c91e	2016-08-04T22:15:26Z
+github.com/juju/retry	git	62c62032529169c7ec02fa48f93349604c345e1f	2015-10-29T02:48:21Z
+github.com/juju/testing	git	7177264a582e2c00d08277eaa91d88f8eb0fd869	2016-09-26T12:59:16Z
+github.com/juju/version	git	4ae6172c00626779a5a462c3e3d22fc0e889431a	2016-06-03T19:49:58Z
+github.com/julienschmidt/httprouter	git	77a895ad01ebc98a4dc95d8355bc825ce80a56f6	2015-10-13T22:55:20Z
+github.com/masterzen/azure-sdk-for-go	git	ee4f0065d00cd12b542f18f5bc45799e88163b12	2016-07-20T05:16:58Z
+github.com/masterzen/simplexml	git	4572e39b1ab9fe03ee513ce6fc7e289e98482190	2016-06-08T18:30:07Z
+github.com/masterzen/winrm	git	7a535cd943fccaeed196718896beec3fb51aff41	2016-08-04T09:38:27Z
+github.com/masterzen/xmlpath	git	13f4951698adc0fa9c1dda3e275d489a24201161	2014-02-18T18:59:01Z
+github.com/nu7hatch/gouuid	git	179d4d0c4d8d407a32af483c2354df1d2c91e6c3	2016-02-18t18:59:01Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+golang.org/x/net	git	ea47fc708ee3e20177f3ca3716217c4ab75942cb	2015-08-29T23:03:18Z
+golang.org/x/text	git	b01949dc0793a9af5e4cb3fce4d42999e76e8ca1	2016-05-25T23:07:23Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/errgo.v1	git	66cb46252b94c1f3d65646f54ee8043ab38d766c	2015-10-07T15:31:57Z
+gopkg.in/juju/names.v2	git	e38bc90539f22af61a9c656d35068bd5f0a5b30a	2016-05-25T23:07:23Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/tomb.v1	git	dd632973f1e7218eb1089048e0798ec9ae7dceb8	2014-10-24T13:56:13Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/automation/vendor/github.com/juju/utils/file.go b/automation/vendor/github.com/juju/utils/file.go
new file mode 100644
index 0000000..75fe5a6
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file.go
@@ -0,0 +1,149 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+)
+
+// UserHomeDir returns the home directory for the specified user, or the
+// home directory for the current user if the specified user is empty.
+func UserHomeDir(userName string) (hDir string, err error) {
+	if userName == "" {
+		// TODO (wallyworld) - fix tests on Windows
+		// Ordinarily, we'd always use user.Current() to get the current user
+		// and then get the HomeDir from that. But our tests rely on poking
+		// a value into $HOME in order to override the normal home dir for the
+		// current user. So we're forced to use Home() to make the tests pass.
+		// All of our tests currently construct paths with the default user in
+		// mind eg "~/foo".
+		return Home(), nil
+	}
+	hDir, err = homeDir(userName)
+	if err != nil {
+		return "", err
+	}
+	return hDir, nil
+}
+
+// Only match paths starting with ~ (~user/test, ~/test). This will prevent
+// accidental expansion on Windows when short form paths are present (C:\users\ADMINI~1\test)
+var userHomePathRegexp = regexp.MustCompile("(^~(?P<user>[^/]*))(?P<path>.*)")
+
+// NormalizePath expands a path containing ~ to its absolute form,
+// and removes any .. or . path elements.
+func NormalizePath(dir string) (string, error) {
+	if userHomePathRegexp.MatchString(dir) {
+		user := userHomePathRegexp.ReplaceAllString(dir, "$user")
+		userHomeDir, err := UserHomeDir(user)
+		if err != nil {
+			return "", err
+		}
+		dir = userHomePathRegexp.ReplaceAllString(dir, fmt.Sprintf("%s$path", userHomeDir))
+	}
+	return filepath.Clean(dir), nil
+}
+
+// EnsureBaseDir ensures that path is always prefixed by baseDir,
+// allowing for the fact that path might have a Window drive letter in
+// it.
+func EnsureBaseDir(baseDir, path string) string {
+	if baseDir == "" {
+		return path
+	}
+	volume := filepath.VolumeName(path)
+	return filepath.Join(baseDir, path[len(volume):])
+}
+
+// JoinServerPath joins any number of path elements into a single path, adding
+// a path separator (based on the current juju server OS) if necessary. The
+// result is Cleaned; in particular, all empty strings are ignored.
+func JoinServerPath(elem ...string) string {
+	return path.Join(elem...)
+}
+
+// UniqueDirectory returns "path/name" if that directory doesn't exist.  If it
+// does, the method starts appending .1, .2, etc until a unique name is found.
+func UniqueDirectory(path, name string) (string, error) {
+	dir := filepath.Join(path, name)
+	_, err := os.Stat(dir)
+	if os.IsNotExist(err) {
+		return dir, nil
+	}
+	for i := 1; ; i++ {
+		dir := filepath.Join(path, fmt.Sprintf("%s.%d", name, i))
+		_, err := os.Stat(dir)
+		if os.IsNotExist(err) {
+			return dir, nil
+		} else if err != nil {
+			return "", err
+		}
+	}
+}
+
+// CopyFile writes the contents of the given source file to dest.
+func CopyFile(dest, source string) error {
+	df, err := os.Create(dest)
+	if err != nil {
+		return err
+	}
+	f, err := os.Open(source)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = io.Copy(df, f)
+	return err
+}
+
+// AtomicWriteFileAndChange atomically writes the filename with the
+// given contents and calls the given function after the contents were
+// written, but before the file is renamed.
+func AtomicWriteFileAndChange(filename string, contents []byte, change func(*os.File) error) (err error) {
+	dir, file := filepath.Split(filename)
+	f, err := ioutil.TempFile(dir, file)
+	if err != nil {
+		return fmt.Errorf("cannot create temp file: %v", err)
+	}
+	defer f.Close()
+	defer func() {
+		if err != nil {
+			// Don't leave the temp file lying around on error.
+			// Close the file before removing. Trying to remove an open file on
+			// Windows will fail.
+			f.Close()
+			os.Remove(f.Name())
+		}
+	}()
+	if _, err := f.Write(contents); err != nil {
+		return fmt.Errorf("cannot write %q contents: %v", filename, err)
+	}
+	if err := change(f); err != nil {
+		return err
+	}
+	f.Close()
+	if err := ReplaceFile(f.Name(), filename); err != nil {
+		return fmt.Errorf("cannot replace %q with %q: %v", f.Name(), filename, err)
+	}
+	return nil
+}
+
+// AtomicWriteFile atomically writes the filename with the given
+// contents and permissions, replacing any existing file at the same
+// path.
+func AtomicWriteFile(filename string, contents []byte, perms os.FileMode) (err error) {
+	return AtomicWriteFileAndChange(filename, contents, func(f *os.File) error {
+		// FileMod.Chmod() is not implemented on Windows, however, os.Chmod() is
+		if err := os.Chmod(f.Name(), perms); err != nil {
+			return fmt.Errorf("cannot set permissions: %v", err)
+		}
+		return nil
+	})
+}
diff --git a/automation/vendor/github.com/juju/utils/file_unix.go b/automation/vendor/github.com/juju/utils/file_unix.go
new file mode 100644
index 0000000..4380290
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file_unix.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"os/user"
+	"strconv"
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+func homeDir(userName string) (string, error) {
+	u, err := user.Lookup(userName)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return u.HomeDir, nil
+}
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+//
+// On unix systems, an error may occur with a successful move, if the source
+// file location cannot be unlinked.
+func MoveFile(source, destination string) (bool, error) {
+	err := os.Link(source, destination)
+	if err != nil {
+		return false, err
+	}
+	err = os.Remove(source)
+	if err != nil {
+		return true, err
+	}
+	return true, nil
+}
+
+// ReplaceFile atomically replaces the destination file or directory
+// with the source. The errors that are returned are identical to
+// those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
+
+// MakeFileURL returns a file URL if a directory is passed in else it does nothing
+func MakeFileURL(in string) string {
+	if strings.HasPrefix(in, "/") {
+		return "file://" + in
+	}
+	return in
+}
+
+// ChownPath sets the uid and gid of path to match that of the user
+// specified.
+func ChownPath(path, username string) error {
+	u, err := user.Lookup(username)
+	if err != nil {
+		return fmt.Errorf("cannot lookup %q user id: %v", username, err)
+	}
+	uid, err := strconv.Atoi(u.Uid)
+	if err != nil {
+		return fmt.Errorf("invalid user id %q: %v", u.Uid, err)
+	}
+	gid, err := strconv.Atoi(u.Gid)
+	if err != nil {
+		return fmt.Errorf("invalid group id %q: %v", u.Gid, err)
+	}
+	return os.Chown(path, uid, gid)
+}
diff --git a/automation/vendor/github.com/juju/utils/file_windows.go b/automation/vendor/github.com/juju/utils/file_windows.go
new file mode 100644
index 0000000..9bb3936
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file_windows.go
@@ -0,0 +1,142 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"syscall"
+	"unsafe"
+
+	"github.com/juju/errors"
+)
+
+const (
+	movefile_replace_existing = 0x1
+	movefile_write_through    = 0x8
+)
+
+//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+func MoveFile(source, destination string) (bool, error) {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_write_through); err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	return true, nil
+
+}
+
+// ReplaceFile atomically replaces the destination file or directory with the source.
+// The errors that are returned are identical to those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_replace_existing|movefile_write_through); err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	return nil
+}
+
+// MakeFileURL returns a proper file URL for the given path/directory
+func MakeFileURL(in string) string {
+	in = filepath.ToSlash(in)
+	// for windows at least should be <letter>: to be considered valid
+	// so we cant do anything with less than that.
+	if len(in) < 2 {
+		return in
+	}
+	if string(in[1]) != ":" {
+		return in
+	}
+	// since go 1.6 http client will only take this format.
+	return "file://" + in
+}
+
+func getUserSID(username string) (string, error) {
+	sid, _, _, e := syscall.LookupSID("", username)
+	if e != nil {
+		return "", e
+	}
+	sidStr, err := sid.String()
+	return sidStr, err
+}
+
+func readRegString(h syscall.Handle, key string) (value string, err error) {
+	var typ uint32
+	var buf uint32
+
+	// Get size of registry key
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, nil, &buf)
+	if err != nil {
+		return value, err
+	}
+
+	n := make([]uint16, buf/2+1)
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, (*byte)(unsafe.Pointer(&n[0])), &buf)
+	if err != nil {
+		return value, err
+	}
+	return syscall.UTF16ToString(n[:]), err
+}
+
+func homeFromRegistry(sid string) (string, error) {
+	var h syscall.Handle
+	// This key will exist on all platforms we support the agent on (windows server 2008 and above)
+	keyPath := fmt.Sprintf("Software\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\%s", sid)
+	err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+		syscall.StringToUTF16Ptr(keyPath),
+		0, syscall.KEY_READ, &h)
+	if err != nil {
+		return "", err
+	}
+	defer syscall.RegCloseKey(h)
+	str, err := readRegString(h, "ProfileImagePath")
+	if err != nil {
+		return "", err
+	}
+	return str, nil
+}
+
+// homeDir returns a local user home dir on Windows
+// user.Lookup() does not populate Gid and HomeDir on Windows,
+// so we get it from the registry
+func homeDir(user string) (string, error) {
+	u, err := getUserSID(user)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return homeFromRegistry(u)
+}
+
+// ChownPath is not implemented for Windows.
+func ChownPath(path, username string) error {
+	// This only exists to allow building on Windows. User lookup and
+	// file ownership needs to be handled in a completely different
+	// way and hasn't yet been implemented.
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/utils/gomaxprocs.go b/automation/vendor/github.com/juju/utils/gomaxprocs.go
new file mode 100644
index 0000000..5977a86
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/gomaxprocs.go
@@ -0,0 +1,26 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"runtime"
+)
+
+var gomaxprocs = runtime.GOMAXPROCS
+var numCPU = runtime.NumCPU
+
+// UseMultipleCPUs sets GOMAXPROCS to the number of CPU cores unless it has
+// already been overridden by the GOMAXPROCS environment variable.
+func UseMultipleCPUs() {
+	if envGOMAXPROCS := os.Getenv("GOMAXPROCS"); envGOMAXPROCS != "" {
+		n := gomaxprocs(0)
+		logger.Debugf("GOMAXPROCS already set in environment to %q, %d internally",
+			envGOMAXPROCS, n)
+		return
+	}
+	n := numCPU()
+	logger.Debugf("setting GOMAXPROCS to %d", n)
+	gomaxprocs(n)
+}
diff --git a/automation/vendor/github.com/juju/utils/home_unix.go b/automation/vendor/github.com/juju/utils/home_unix.go
new file mode 100644
index 0000000..6b450be
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/home_unix.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+// +build !windows
+
+package utils
+
+import (
+	"os"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return os.Getenv("HOME")
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	return os.Setenv("HOME", s)
+}
diff --git a/automation/vendor/github.com/juju/utils/home_windows.go b/automation/vendor/github.com/juju/utils/home_windows.go
new file mode 100644
index 0000000..e61225c
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/home_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"path/filepath"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"))
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	v := filepath.VolumeName(s)
+	if v != "" {
+		if err := os.Setenv("HOMEDRIVE", v); err != nil {
+			return err
+		}
+	}
+	return os.Setenv("HOMEPATH", s[len(v):])
+}
diff --git a/automation/vendor/github.com/juju/utils/http-1_4.go b/automation/vendor/github.com/juju/utils/http-1_4.go
new file mode 100644
index 0000000..87bd5b1
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http-1_4.go
@@ -0,0 +1,24 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build !go1.7
+
+package utils
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+)
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+func installHTTPDialShim(t *http.Transport) {
+	t.Dial = func(network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return net.Dial(network, addr)
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/http-1_7.go b/automation/vendor/github.com/juju/utils/http-1_7.go
new file mode 100644
index 0000000..d676dcb
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http-1_7.go
@@ -0,0 +1,35 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build go1.7
+
+package utils
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+)
+
+var ctxtDialer = &net.Dialer{
+	Timeout:   30 * time.Second,
+	KeepAlive: 30 * time.Second,
+}
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+//
+// Note that this is Go version dependent because in Go 1.7 and above,
+// the DialContext field was introduced (and set in http.DefaultTransport)
+// which overrides the Dial field.
+func installHTTPDialShim(t *http.Transport) {
+	t.DialContext = func(ctxt context.Context, network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return ctxtDialer.DialContext(ctxt, network, addr)
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/http.go b/automation/vendor/github.com/juju/utils/http.go
new file mode 100644
index 0000000..2f5ddf3
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http.go
@@ -0,0 +1,117 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+)
+
+var insecureClient = (*http.Client)(nil)
+var insecureClientMutex = sync.Mutex{}
+
+func init() {
+	defaultTransport := http.DefaultTransport.(*http.Transport)
+	installHTTPDialShim(defaultTransport)
+	registerFileProtocol(defaultTransport)
+}
+
+// registerFileProtocol registers support for file:// URLs on the given transport.
+func registerFileProtocol(transport *http.Transport) {
+	transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+}
+
+// SSLHostnameVerification is used as a switch for when a given provider might
+// use self-signed credentials and we should not try to verify the hostname on
+// the TLS/SSL certificates
+type SSLHostnameVerification bool
+
+const (
+	// VerifySSLHostnames ensures we verify the hostname on the certificate
+	// matches the host we are connecting and is signed
+	VerifySSLHostnames = SSLHostnameVerification(true)
+	// NoVerifySSLHostnames informs us to skip verifying the hostname
+	// matches a valid certificate
+	NoVerifySSLHostnames = SSLHostnameVerification(false)
+)
+
+// GetHTTPClient returns either a standard http client or
+// non validating client depending on the value of verify.
+func GetHTTPClient(verify SSLHostnameVerification) *http.Client {
+	if verify == VerifySSLHostnames {
+		return GetValidatingHTTPClient()
+	}
+	return GetNonValidatingHTTPClient()
+}
+
+// GetValidatingHTTPClient returns a new http.Client that
+// verifies the server's certificate chain and hostname.
+func GetValidatingHTTPClient() *http.Client {
+	return &http.Client{}
+}
+
+// GetNonValidatingHTTPClient returns a new http.Client that
+// does not verify the server's certificate chain and hostname.
+func GetNonValidatingHTTPClient() *http.Client {
+	return &http.Client{
+		Transport: NewHttpTLSTransport(&tls.Config{
+			InsecureSkipVerify: true,
+		}),
+	}
+}
+
+// BasicAuthHeader creates a header that contains just the "Authorization"
+// entry.  The implementation was originally taked from net/http but this is
+// needed externally from the http request object in order to use this with
+// our websockets. See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt
+// "To receive authorization, the client sends the userid and password,
+// separated by a single colon (":") character, within a base64 encoded string
+// in the credentials."
+func BasicAuthHeader(username, password string) http.Header {
+	auth := username + ":" + password
+	encoded := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+	return http.Header{
+		"Authorization": {encoded},
+	}
+}
+
+// ParseBasicAuth attempts to find an Authorization header in the supplied
+// http.Header and if found parses it as a Basic header. See 2 (end of page 4)
+// http://www.ietf.org/rfc/rfc2617.txt "To receive authorization, the client
+// sends the userid and password, separated by a single colon (":") character,
+// within a base64 encoded string in the credentials."
+func ParseBasicAuthHeader(h http.Header) (userid, password string, err error) {
+	parts := strings.Fields(h.Get("Authorization"))
+	if len(parts) != 2 || parts[0] != "Basic" {
+		return "", "", fmt.Errorf("invalid or missing HTTP auth header")
+	}
+	// Challenge is a base64-encoded "tag:pass" string.
+	// See RFC 2617, Section 2.
+	challenge, err := base64.StdEncoding.DecodeString(parts[1])
+	if err != nil {
+		return "", "", fmt.Errorf("invalid HTTP auth encoding")
+	}
+	tokens := strings.SplitN(string(challenge), ":", 2)
+	if len(tokens) != 2 {
+		return "", "", fmt.Errorf("invalid HTTP auth contents")
+	}
+	return tokens[0], tokens[1], nil
+}
+
+// OutgoingAccessAllowed determines whether connections other than
+// localhost can be dialled.
+var OutgoingAccessAllowed = true
+
+func isLocalAddr(addr string) bool {
+	host, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		return false
+	}
+	return host == "localhost" || net.ParseIP(host).IsLoopback()
+}
diff --git a/automation/vendor/github.com/juju/utils/isubuntu.go b/automation/vendor/github.com/juju/utils/isubuntu.go
new file mode 100644
index 0000000..d85ed9a
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/isubuntu.go
@@ -0,0 +1,17 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+)
+
+// IsUbuntu executes lxb_release to see if the host OS is Ubuntu.
+func IsUbuntu() bool {
+	out, err := RunCommand("lsb_release", "-i", "-s")
+	if err != nil {
+		return false
+	}
+	return strings.TrimSpace(out) == "Ubuntu"
+}
diff --git a/automation/vendor/github.com/juju/utils/limiter.go b/automation/vendor/github.com/juju/utils/limiter.go
new file mode 100644
index 0000000..60bd066
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/limiter.go
@@ -0,0 +1,59 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+)
+
+type empty struct{}
+type limiter chan empty
+
+// Limiter represents a limited resource (eg a semaphore).
+type Limiter interface {
+	// Acquire another unit of the resource.
+	// Acquire returns false to indicate there is no more availability,
+	// until another entity calls Release.
+	Acquire() bool
+	// AcquireWait requests a unit of resource, but blocks until one is
+	// available.
+	AcquireWait()
+	// Release returns a unit of the resource. Calling Release when there
+	// are no units Acquired is an error.
+	Release() error
+}
+
+func NewLimiter(max int) Limiter {
+	return make(limiter, max)
+}
+
+// Acquire requests some resources that you can return later
+// It returns 'true' if there are resources available, but false if they are
+// not. Callers are responsible for calling Release if this returns true, but
+// should not release if this returns false.
+func (l limiter) Acquire() bool {
+	e := empty{}
+	select {
+	case l <- e:
+		return true
+	default:
+		return false
+	}
+}
+
+// AcquireWait waits for the resource to become available before returning.
+func (l limiter) AcquireWait() {
+	e := empty{}
+	l <- e
+}
+
+// Release returns the resource to the available pool.
+func (l limiter) Release() error {
+	select {
+	case <-l:
+		return nil
+	default:
+		return fmt.Errorf("Release without an associated Acquire")
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/multireader.go b/automation/vendor/github.com/juju/utils/multireader.go
new file mode 100644
index 0000000..b8431f9
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/multireader.go
@@ -0,0 +1,189 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io"
+	"sort"
+
+	"github.com/juju/errors"
+)
+
+// SizeReaderAt combines io.ReaderAt with a Size method.
+type SizeReaderAt interface {
+	// Size returns the size of the data readable
+	// from the reader.
+	Size() int64
+	io.ReaderAt
+}
+
+// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt
+// (and Size), instead of just a reader.
+//
+// Note: this implementation was taken from a talk given
+// by Brad Fitzpatrick as OSCON 2013.
+//
+// http://talks.golang.org/2013/oscon-dl.slide#49
+// https://github.com/golang/talks/blob/master/2013/oscon-dl/server-compose.go
+func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {
+	m := &multiReaderAt{
+		parts: make([]offsetAndSource, 0, len(parts)),
+	}
+	var off int64
+	for _, p := range parts {
+		m.parts = append(m.parts, offsetAndSource{off, p})
+		off += p.Size()
+	}
+	m.size = off
+	return m
+}
+
+type offsetAndSource struct {
+	off int64
+	SizeReaderAt
+}
+
+type multiReaderAt struct {
+	parts []offsetAndSource
+	size  int64
+}
+
+func (m *multiReaderAt) Size() int64 {
+	return m.size
+}
+
+func (m *multiReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+	wantN := len(p)
+
+	// Skip past the requested offset.
+	skipParts := sort.Search(len(m.parts), func(i int) bool {
+		// This function returns whether parts[i] will
+		// contribute any bytes to our output.
+		part := m.parts[i]
+		return part.off+part.Size() > off
+	})
+	parts := m.parts[skipParts:]
+
+	// How far to skip in the first part.
+	needSkip := off
+	if len(parts) > 0 {
+		needSkip -= parts[0].off
+	}
+
+	for len(parts) > 0 && len(p) > 0 {
+		readP := p
+		partSize := parts[0].Size()
+		if int64(len(readP)) > partSize-needSkip {
+			readP = readP[:partSize-needSkip]
+		}
+		pn, err0 := parts[0].ReadAt(readP, needSkip)
+		if err0 != nil {
+			return n, err0
+		}
+		n += pn
+		p = p[pn:]
+		if int64(pn)+needSkip == partSize {
+			parts = parts[1:]
+		}
+		needSkip = 0
+	}
+
+	if n != wantN {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// NewMultiReaderSeeker returns an io.ReadSeeker that combines
+// all the given readers into a single one. It assumes that
+// all the seekers are initially positioned at the start.
+func NewMultiReaderSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+	sreaders := make([]SizeReaderAt, len(readers))
+	for i, r := range readers {
+		r1, err := newSizeReaderAt(r)
+		if err != nil {
+			panic(err)
+		}
+		sreaders[i] = r1
+	}
+	return &readSeeker{
+		r: NewMultiReaderAt(sreaders...),
+	}
+}
+
+// newSizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+// Note that it doesn't strictly adhere to the ReaderAt
+// contract because it's not safe to call ReadAt concurrently.
+// This doesn't matter because io.ReadSeeker doesn't
+// need to be thread-safe and this is only used in that
+// context.
+func newSizeReaderAt(r io.ReadSeeker) (SizeReaderAt, error) {
+	size, err := r.Seek(0, 2)
+	if err != nil {
+		return nil, err
+	}
+	return &sizeReaderAt{
+		r:    r,
+		size: size,
+		off:  size,
+	}, nil
+}
+
+// sizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+type sizeReaderAt struct {
+	r    io.ReadSeeker
+	size int64
+	off  int64
+}
+
+// ReadAt implemnts SizeReaderAt.ReadAt.
+func (r *sizeReaderAt) ReadAt(buf []byte, off int64) (n int, err error) {
+	if off != r.off {
+		_, err = r.r.Seek(off, 0)
+		if err != nil {
+			return 0, err
+		}
+		r.off = off
+	}
+	n, err = io.ReadFull(r.r, buf)
+	r.off += int64(n)
+	return n, err
+}
+
+// Size implemnts SizeReaderAt.Size.
+func (r *sizeReaderAt) Size() int64 {
+	return r.size
+}
+
+// readSeeker adapts a SizeReaderAt to an io.ReadSeeker.
+type readSeeker struct {
+	r   SizeReaderAt
+	off int64
+}
+
+// Seek implements io.Seeker.Seek.
+func (r *readSeeker) Seek(off int64, whence int) (int64, error) {
+	switch whence {
+	case 0:
+	case 1:
+		off += r.off
+	case 2:
+		off = r.r.Size() + off
+	}
+	if off < 0 {
+		return 0, errors.New("negative position")
+	}
+	r.off = off
+	return off, nil
+}
+
+// Read implements io.Reader.Read.
+func (r *readSeeker) Read(buf []byte) (int, error) {
+	n, err := r.r.ReadAt(buf, r.off)
+	r.off += int64(n)
+	if err == io.ErrUnexpectedEOF {
+		err = io.EOF
+	}
+	return n, err
+}
diff --git a/automation/vendor/github.com/juju/utils/naturalsort.go b/automation/vendor/github.com/juju/utils/naturalsort.go
new file mode 100644
index 0000000..d337093
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/naturalsort.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"unicode"
+)
+
+// SortStringsNaturally sorts strings according to their natural sort order.
+func SortStringsNaturally(s []string) []string {
+	sort.Sort(naturally(s))
+	return s
+}
+
+type naturally []string
+
+func (n naturally) Len() int {
+	return len(n)
+}
+
+func (n naturally) Swap(a, b int) {
+	n[a], n[b] = n[b], n[a]
+}
+
+// Less sorts by non-numeric prefix and numeric suffix
+// when one exists.
+func (n naturally) Less(a, b int) bool {
+	aVal := n[a]
+	bVal := n[b]
+
+	for {
+		// If bVal is empty, then aVal can't be less than it.
+		if bVal == "" {
+			return false
+		}
+		// If aVal is empty here, then is must be less than bVal.
+		if aVal == "" {
+			return true
+		}
+
+		aPrefix, aNumber, aRemainder := splitAtNumber(aVal)
+		bPrefix, bNumber, bRemainder := splitAtNumber(bVal)
+		if aPrefix != bPrefix {
+			return aPrefix < bPrefix
+		}
+		if aNumber != bNumber {
+			return aNumber < bNumber
+		}
+
+		// Everything is the same so far, try again with the remainer.
+		aVal = aRemainder
+		bVal = bRemainder
+	}
+}
+
+// splitAtNumber splits given string at the first digit, returning the
+// prefix before the number, the integer represented by the first
+// series of digits, and the remainder of the string after the first
+// series of digits. If no digits are present, the number is returned
+// as -1 and the remainder is empty.
+func splitAtNumber(str string) (string, int, string) {
+	i := indexOfDigit(str)
+	if i == -1 {
+		// no numbers
+		return str, -1, ""
+	}
+	j := i + indexOfNonDigit(str[i:])
+	n, err := strconv.Atoi(str[i:j])
+	if err != nil {
+		panic(fmt.Sprintf("parsing number %v: %v", str[i:j], err)) // should never happen
+	}
+	return str[:i], n, str[j:]
+}
+
+func indexOfDigit(str string) int {
+	for i, rune := range str {
+		if unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return -1
+}
+
+func indexOfNonDigit(str string) int {
+	for i, rune := range str {
+		if !unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return len(str)
+}
diff --git a/automation/vendor/github.com/juju/utils/network.go b/automation/vendor/github.com/juju/utils/network.go
new file mode 100644
index 0000000..505a6e9
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/network.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/juju/loggo"
+)
+
+var logger = loggo.GetLogger("juju.utils")
+
+// GetIPv4Address iterates through the addresses expecting the format from
+// func (ifi *net.Interface) Addrs() ([]net.Addr, error)
+func GetIPv4Address(addresses []net.Addr) (string, error) {
+	for _, addr := range addresses {
+		ip, _, err := net.ParseCIDR(addr.String())
+		if err != nil {
+			return "", err
+		}
+		ipv4 := ip.To4()
+		if ipv4 == nil {
+			continue
+		}
+		return ipv4.String(), nil
+	}
+	return "", fmt.Errorf("no addresses match")
+}
+
+// GetAddressForInterface looks for the network interface
+// and returns the IPv4 address from the possible addresses.
+func GetAddressForInterface(interfaceName string) (string, error) {
+	iface, err := net.InterfaceByName(interfaceName)
+	if err != nil {
+		logger.Errorf("cannot find network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	addrs, err := iface.Addrs()
+	if err != nil {
+		logger.Errorf("cannot get addresses for network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	return GetIPv4Address(addrs)
+}
diff --git a/automation/vendor/github.com/juju/utils/os.go b/automation/vendor/github.com/juju/utils/os.go
new file mode 100644
index 0000000..146f793
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/os.go
@@ -0,0 +1,41 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// These are the names of the operating systems recognized by Go.
+const (
+	OSWindows   = "windows"
+	OSDarwin    = "darwin"
+	OSDragonfly = "dragonfly"
+	OSFreebsd   = "freebsd"
+	OSLinux     = "linux"
+	OSNacl      = "nacl"
+	OSNetbsd    = "netbsd"
+	OSOpenbsd   = "openbsd"
+	OSSolaris   = "solaris"
+)
+
+// OSUnix is the list of unix-like operating systems recognized by Go.
+// See http://golang.org/src/path/filepath/path_unix.go.
+var OSUnix = []string{
+	OSDarwin,
+	OSDragonfly,
+	OSFreebsd,
+	OSLinux,
+	OSNacl,
+	OSNetbsd,
+	OSOpenbsd,
+	OSSolaris,
+}
+
+// OSIsUnix determines whether or not the given OS name is one of the
+// unix-like operating systems recognized by Go.
+func OSIsUnix(os string) bool {
+	for _, goos := range OSUnix {
+		if os == goos {
+			return true
+		}
+	}
+	return false
+}
diff --git a/automation/vendor/github.com/juju/utils/password.go b/automation/vendor/github.com/juju/utils/password.go
new file mode 100644
index 0000000..914e8e4
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/password.go
@@ -0,0 +1,92 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"crypto/sha512"
+	"encoding/base64"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+// CompatSalt is because Juju 1.16 and older used a hard-coded salt to compute
+// the password hash for all users and agents
+var CompatSalt = string([]byte{0x75, 0x82, 0x81, 0xca})
+
+const randomPasswordBytes = 18
+
+// MinAgentPasswordLength describes how long agent passwords should be. We
+// require this length because we assume enough entropy in the Agent password
+// that it is safe to not do extra rounds of iterated hashing.
+var MinAgentPasswordLength = base64.StdEncoding.EncodedLen(randomPasswordBytes)
+
+// RandomBytes returns n random bytes.
+func RandomBytes(n int) ([]byte, error) {
+	buf := make([]byte, n)
+	_, err := io.ReadFull(rand.Reader, buf)
+	if err != nil {
+		return nil, fmt.Errorf("cannot read random bytes: %v", err)
+	}
+	return buf, nil
+}
+
+// RandomPassword generates a random base64-encoded password.
+func RandomPassword() (string, error) {
+	b, err := RandomBytes(randomPasswordBytes)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// RandomSalt generates a random base64 data suitable for using as a password
+// salt The pbkdf2 guideline is to use 8 bytes of salt, so we do 12 raw bytes
+// into 16 base64 bytes. (The alternative is 6 raw into 8 base64).
+func RandomSalt() (string, error) {
+	b, err := RandomBytes(12)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// FastInsecureHash specifies whether a fast, insecure version of the hash
+// algorithm will be used.  Changing this will cause PasswordHash to
+// produce incompatible passwords.  It should only be changed for
+// testing purposes - to make tests run faster.
+var FastInsecureHash = false
+
+// UserPasswordHash returns base64-encoded one-way hash password that is
+// computationally hard to crack by iterating through possible passwords.
+func UserPasswordHash(password string, salt string) string {
+	if salt == "" {
+		panic("salt is not allowed to be empty")
+	}
+	iter := 8192
+	if FastInsecureHash {
+		iter = 1
+	}
+	// Generate 18 byte passwords because we know that MongoDB
+	// uses the MD5 sum of the password anyway, so there's
+	// no point in using more bytes. (18 so we don't get base 64
+	// padding characters).
+	h := pbkdf2.Key([]byte(password), []byte(salt), iter, 18, sha512.New)
+	return base64.StdEncoding.EncodeToString(h)
+}
+
+// AgentPasswordHash returns base64-encoded one-way hash of password. This is
+// not suitable for User passwords because those will have limited entropy (see
+// UserPasswordHash). However, since we generate long random passwords for
+// agents, we can trust that there is sufficient entropy to prevent brute force
+// search. And using a faster hash allows us to restart the state machines and
+// have 1000s of agents log in in a reasonable amount of time.
+func AgentPasswordHash(password string) string {
+	sum := sha512.New()
+	sum.Write([]byte(password))
+	h := sum.Sum(nil)
+	return base64.StdEncoding.EncodeToString(h[:18])
+}
diff --git a/automation/vendor/github.com/juju/utils/randomstring.go b/automation/vendor/github.com/juju/utils/randomstring.go
new file mode 100644
index 0000000..662f514
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/randomstring.go
@@ -0,0 +1,42 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Can be used as a sane default argument for RandomString
+var (
+	LowerAlpha = []rune("abcdefghijklmnopqrstuvwxyz")
+	UpperAlpha = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	Digits     = []rune("0123456789")
+)
+
+var (
+	randomStringMu   sync.Mutex
+	randomStringRand *rand.Rand
+)
+
+func init() {
+	randomStringRand = rand.New(
+		rand.NewSource(time.Now().UnixNano()),
+	)
+}
+
+// RandomString will return a string of length n that will only
+// contain runes inside validRunes
+func RandomString(n int, validRunes []rune) string {
+	randomStringMu.Lock()
+	defer randomStringMu.Unlock()
+
+	runes := make([]rune, n)
+	for i := range runes {
+		runes[i] = validRunes[randomStringRand.Intn(len(validRunes))]
+	}
+
+	return string(runes)
+}
diff --git a/automation/vendor/github.com/juju/utils/relativeurl.go b/automation/vendor/github.com/juju/utils/relativeurl.go
new file mode 100644
index 0000000..b70f1d5
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/relativeurl.go
@@ -0,0 +1,62 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+// RelativeURLPath returns a relative URL path that is lexically
+// equivalent to targpath when interpreted by url.URL.ResolveReference.
+// On success, the returned path will always be non-empty and relative
+// to basePath, even if basePath and targPath share no elements.
+//
+// It is assumed that both basePath and targPath are normalized
+// (have no . or .. elements).
+//
+// An error is returned if basePath or targPath are not absolute paths.
+func RelativeURLPath(basePath, targPath string) (string, error) {
+	if !strings.HasPrefix(basePath, "/") {
+		return "", errors.New("non-absolute base URL")
+	}
+	if !strings.HasPrefix(targPath, "/") {
+		return "", errors.New("non-absolute target URL")
+	}
+	baseParts := strings.Split(basePath, "/")
+	targParts := strings.Split(targPath, "/")
+
+	// For the purposes of dotdot, the last element of
+	// the paths are irrelevant. We save the last part
+	// of the target path for later.
+	lastElem := targParts[len(targParts)-1]
+	baseParts = baseParts[0 : len(baseParts)-1]
+	targParts = targParts[0 : len(targParts)-1]
+
+	// Find the common prefix between the two paths:
+	var i int
+	for ; i < len(baseParts); i++ {
+		if i >= len(targParts) || baseParts[i] != targParts[i] {
+			break
+		}
+	}
+	dotdotCount := len(baseParts) - i
+	targOnly := targParts[i:]
+	result := make([]string, 0, dotdotCount+len(targOnly)+1)
+	for i := 0; i < dotdotCount; i++ {
+		result = append(result, "..")
+	}
+	result = append(result, targOnly...)
+	result = append(result, lastElem)
+	final := strings.Join(result, "/")
+	if final == "" {
+		// If the final result is empty, the last element must
+		// have been empty, so the target was slash terminated
+		// and there were no previous elements, so "."
+		// is appropriate.
+		final = "."
+	}
+	return final, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/set/ints.go b/automation/vendor/github.com/juju/utils/set/ints.go
new file mode 100644
index 0000000..02009ea
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/ints.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Ints represents the classic "set" data structure, and contains ints.
+type Ints map[int]bool
+
+// NewInts creates and initializes an Ints and populates it with
+// initial values as specified in the parameters.
+func NewInts(initial ...int) Ints {
+	result := make(Ints)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (is Ints) Size() int {
+	return len(is)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (is Ints) IsEmpty() bool {
+	return len(is) == 0
+}
+
+// Add puts a value into the set.
+func (is Ints) Add(value int) {
+	if is == nil {
+		panic("uninitalised set")
+	}
+	is[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (is Ints) Remove(value int) {
+	delete(is, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (is Ints) Contains(value int) bool {
+	_, exists := is[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (is Ints) Values() []int {
+	result := make([]int, len(is))
+	i := 0
+	for key := range is {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (is Ints) SortedValues() []int {
+	values := is.Values()
+	sort.Ints(values)
+	return values
+}
+
+// Union returns a new Ints representing a union of the elments in the
+// method target and the parameter.
+func (is Ints) Union(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Ints representing a intersection of the elments in the
+// method target and the parameter.
+func (is Ints) Intersection(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Ints representing all the values in the
+// target that are not in the parameter.
+func (is Ints) Difference(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/set/strings.go b/automation/vendor/github.com/juju/utils/set/strings.go
new file mode 100644
index 0000000..b89f932
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/strings.go
@@ -0,0 +1,112 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Strings represents the classic "set" data structure, and contains strings.
+type Strings map[string]bool
+
+// NewStrings creates and initializes a Strings and populates it with
+// initial values as specified in the parameters.
+func NewStrings(initial ...string) Strings {
+	result := make(Strings)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (s Strings) Size() int {
+	return len(s)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (s Strings) IsEmpty() bool {
+	return len(s) == 0
+}
+
+// Add puts a value into the set.
+func (s Strings) Add(value string) {
+	if s == nil {
+		panic("uninitalised set")
+	}
+	s[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (s Strings) Remove(value string) {
+	delete(s, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (s Strings) Contains(value string) bool {
+	_, exists := s[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (s Strings) Values() []string {
+	result := make([]string, len(s))
+	i := 0
+	for key := range s {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (s Strings) SortedValues() []string {
+	values := s.Values()
+	sort.Strings(values)
+	return values
+}
+
+// Union returns a new Strings representing a union of the elments in the
+// method target and the parameter.
+func (s Strings) Union(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Strings representing a intersection of the elments in the
+// method target and the parameter.
+func (s Strings) Intersection(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Strings representing all the values in the
+// target that are not in the parameter.
+func (s Strings) Difference(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/set/tags.go b/automation/vendor/github.com/juju/utils/set/tags.go
new file mode 100644
index 0000000..0fb6f87
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/tags.go
@@ -0,0 +1,150 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+
+	"github.com/juju/errors"
+	"gopkg.in/juju/names.v2"
+)
+
+// Tags represents the Set data structure, it implements tagSet
+// and contains names.Tags.
+type Tags map[names.Tag]bool
+
+// NewTags creates and initializes a Tags and populates it with
+// inital values as specified in the parameters.
+func NewTags(initial ...names.Tag) Tags {
+	result := make(Tags)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// NewTagsFromStrings creates and initializes a Tags and populates it
+// by using names.ParseTag on the initial values specified in the parameters.
+func NewTagsFromStrings(initial ...string) (Tags, error) {
+	result := make(Tags)
+	for _, value := range initial {
+		tag, err := names.ParseTag(value)
+		if err != nil {
+			return result, errors.Trace(err)
+		}
+		result.Add(tag)
+	}
+	return result, nil
+}
+
+// Size returns the number of elements in the set.
+func (t Tags) Size() int {
+	return len(t)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (t Tags) IsEmpty() bool {
+	return len(t) == 0
+}
+
+// Add puts a value into the set.
+func (t Tags) Add(value names.Tag) {
+	if t == nil {
+		panic("uninitalised set")
+	}
+	t[value] = true
+}
+
+// Remove takes a value out of the set.  If value wasn't in the set to start
+// with, this method silently succeeds.
+func (t Tags) Remove(value names.Tag) {
+	delete(t, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (t Tags) Contains(value names.Tag) bool {
+	_, exists := t[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (t Tags) Values() []names.Tag {
+	result := make([]names.Tag, len(t))
+	i := 0
+	for key := range t {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// stringValues returns a list of strings that represent a names.Tag
+// Used internally by the SortedValues method.
+func (t Tags) stringValues() []string {
+	result := make([]string, t.Size())
+	i := 0
+	for key := range t {
+		result[i] = key.String()
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (t Tags) SortedValues() []names.Tag {
+	values := t.stringValues()
+	sort.Strings(values)
+
+	result := make([]names.Tag, len(values))
+	for i, value := range values {
+		// We already know only good strings can live in the Tags set
+		// so we can safely ignore the error here.
+		tag, _ := names.ParseTag(value)
+		result[i] = tag
+	}
+	return result
+}
+
+// Union returns a new Tags representing a union of the elments in the
+// method target and the parameter.
+func (t Tags) Union(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Tags representing a intersection of the elments in the
+// method target and the parameter.
+func (t Tags) Intersection(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Tags representing all the values in the
+// target that are not in the parameter.
+func (t Tags) Difference(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/size.go b/automation/vendor/github.com/juju/utils/size.go
new file mode 100644
index 0000000..8f6f88d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/size.go
@@ -0,0 +1,78 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"github.com/juju/errors"
+)
+
+// ParseSize parses the string as a size, in mebibytes.
+//
+// The string must be a is a non-negative number with
+// an optional multiplier suffix (M, G, T, P, E, Z, or Y).
+// If the suffix is not specified, "M" is implied.
+func ParseSize(str string) (MB uint64, err error) {
+	// Find the first non-digit/period:
+	i := strings.IndexFunc(str, func(r rune) bool {
+		return r != '.' && !unicode.IsDigit(r)
+	})
+	var multiplier float64 = 1
+	if i > 0 {
+		suffix := str[i:]
+		multiplier = 0
+		for j := 0; j < len(sizeSuffixes); j++ {
+			base := string(sizeSuffixes[j])
+			// M, MB, or MiB are all valid.
+			switch suffix {
+			case base, base + "B", base + "iB":
+				multiplier = float64(sizeSuffixMultiplier(j))
+				break
+			}
+		}
+		if multiplier == 0 {
+			return 0, errors.Errorf("invalid multiplier suffix %q, expected one of %s", suffix, []byte(sizeSuffixes))
+		}
+		str = str[:i]
+	}
+
+	val, err := strconv.ParseFloat(str, 64)
+	if err != nil || val < 0 {
+		return 0, errors.Errorf("expected a non-negative number, got %q", str)
+	}
+	val *= multiplier
+	return uint64(math.Ceil(val)), nil
+}
+
+var sizeSuffixes = "MGTPEZY"
+
+func sizeSuffixMultiplier(i int) int {
+	return 1 << uint(i*10)
+}
+
+// SizeTracker tracks the number of bytes passing through
+// its Write method (which is otherwise a no-op).
+//
+// Use SizeTracker with io.MultiWriter() to track number of bytes
+// written. Use with io.TeeReader() to track number of bytes read.
+type SizeTracker struct {
+	// size is the number of bytes written so far.
+	size int64
+}
+
+// Size returns the number of bytes written so far.
+func (st SizeTracker) Size() int64 {
+	return st.size
+}
+
+// Write implements io.Writer.
+func (st *SizeTracker) Write(data []byte) (n int, err error) {
+	n = len(data)
+	st.size += int64(n)
+	return n, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go b/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go
new file mode 100644
index 0000000..7a0edd4
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+// The following are strings/regex-es which match common Unix error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `user: unknown user [a-z0-9_-]*`
+	NoSuchFileErrRegexp = `no such file or directory`
+	MkdirFailErrRegexp  = `.* not a directory`
+)
diff --git a/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go b/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go
new file mode 100644
index 0000000..b24d453
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go
@@ -0,0 +1,14 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// The following are strings/regex-es which match common Windows error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `No mapping between account names and security IDs was done\.`
+	NoSuchFileErrRegexp = `The system cannot find the (file|path) specified\.`
+	MkdirFailErrRegexp  = `mkdir .*` + NoSuchFileErrRegexp
+)
diff --git a/automation/vendor/github.com/juju/utils/timeit.go b/automation/vendor/github.com/juju/utils/timeit.go
new file mode 100644
index 0000000..172b593
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/timeit.go
@@ -0,0 +1,57 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"time"
+)
+
+type timer struct {
+	action     string
+	start      time.Time
+	depth      int
+	duration   time.Duration
+	subActions []*timer
+}
+
+func (t *timer) String() string {
+	this := fmt.Sprintf("%.3fs %*s%s\n", t.duration.Seconds(), t.depth, "", t.action)
+	for _, sub := range t.subActions {
+		this += sub.String()
+	}
+	return this
+}
+
+var stack []*timer
+
+// Start a timer, used for tracking time spent.
+// Generally used with either defer, as in:
+//  defer utils.Timeit("my func")()
+// Which will track how much time is spent in your function. Or
+// if you want to track the time spent in a function you are calling
+// then you would use:
+//  toc := utils.Timeit("anotherFunc()")
+//  anotherFunc()
+//  toc()
+// This tracks nested calls by indenting the output, and will print out the
+// full stack of timing when we reach the top of the stack.
+func Timeit(action string) func() {
+	cur := &timer{action: action, start: time.Now(), depth: len(stack)}
+	if len(stack) != 0 {
+		tip := stack[len(stack)-1]
+		tip.subActions = append(tip.subActions, cur)
+	}
+	stack = append(stack, cur)
+	return func() {
+		cur.duration = time.Since(cur.start)
+		if len(stack) == 0 || cur == stack[0] {
+			fmt.Fprint(os.Stderr, cur)
+			stack = nil
+		} else {
+			stack = stack[0 : len(stack)-1]
+		}
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/timer.go b/automation/vendor/github.com/juju/utils/timer.go
new file mode 100644
index 0000000..6b32f09
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/timer.go
@@ -0,0 +1,124 @@
+// Copyright 2015 Canonical Ltd.
+// Copyright 2015 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"time"
+
+	"github.com/juju/utils/clock"
+)
+
+// Countdown implements a timer that will call a provided function.
+// after a internally stored duration. The steps as well as min and max
+// durations are declared upon initialization and depend on
+// the particular implementation.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type Countdown interface {
+	// Reset stops the timer and resets its duration to the minimum one.
+	// Start must be called to start the timer again.
+	Reset()
+
+	// Start starts the internal timer.
+	// At the end of the timer, if Reset hasn't been called in the mean time
+	// Func will be called and the duration is increased for the next call.
+	Start()
+}
+
+// NewBackoffTimer creates and initializes a new BackoffTimer
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+func NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer {
+	return &BackoffTimer{
+		config:          config,
+		currentDuration: config.Min,
+	}
+}
+
+// BackoffTimer implements Countdown.
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimer struct {
+	config BackoffTimerConfig
+
+	timer           clock.Timer
+	currentDuration time.Duration
+}
+
+// BackoffTimerConfig is a helper struct for backoff timer
+// that encapsulates config information.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimerConfig struct {
+	// The minimum duration after which Func is called.
+	Min time.Duration
+
+	// The maximum duration after which Func is called.
+	Max time.Duration
+
+	// Determines whether a small randomization is applied to
+	// the duration.
+	Jitter bool
+
+	// The factor by which you want the duration to increase
+	// every time.
+	Factor int64
+
+	// Func is the function that will be called when the countdown reaches 0.
+	Func func()
+
+	// Clock provides the AfterFunc function used to call func.
+	// It is exposed here so it's easier to mock it in tests.
+	Clock clock.Clock
+}
+
+// Start implements the Timer interface.
+// Any existing timer execution is stopped before
+// a new one is created.
+func (t *BackoffTimer) Start() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	t.timer = t.config.Clock.AfterFunc(t.currentDuration, t.config.Func)
+
+	// Since it's a backoff timer we will increase
+	// the duration after each signal.
+	t.increaseDuration()
+}
+
+// Reset implements the Timer interface.
+func (t *BackoffTimer) Reset() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	if t.currentDuration > t.config.Min {
+		t.currentDuration = t.config.Min
+	}
+}
+
+// increaseDuration will increase the duration based on
+// the current value and the factor. If jitter is true
+// it will add a 0.3% jitter to the final value.
+func (t *BackoffTimer) increaseDuration() {
+	current := int64(t.currentDuration)
+	nextDuration := time.Duration(current * t.config.Factor)
+	if t.config.Jitter {
+		// Get a factor in [-1; 1].
+		randFactor := (rand.Float64() * 2) - 1
+		jitter := float64(nextDuration) * randFactor * 0.03
+		nextDuration = nextDuration + time.Duration(jitter)
+	}
+	if nextDuration > t.config.Max {
+		nextDuration = t.config.Max
+	}
+	t.currentDuration = nextDuration
+}
diff --git a/automation/vendor/github.com/juju/utils/tls.go b/automation/vendor/github.com/juju/utils/tls.go
new file mode 100644
index 0000000..b805af8
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/tls.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+)
+
+// NewHttpTLSTransport returns a new http.Transport constructed with the TLS config
+// and the necessary parameters for Juju.
+func NewHttpTLSTransport(tlsConfig *tls.Config) *http.Transport {
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	transport := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		TLSClientConfig:     tlsConfig,
+		DisableKeepAlives:   true,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+	installHTTPDialShim(transport)
+	registerFileProtocol(transport)
+	return transport
+}
+
+// knownGoodCipherSuites contains the list of secure cipher suites to use
+// with tls.Config. This list matches those that Go 1.6 implements from
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations.
+//
+// https://tools.ietf.org/html/rfc7525#section-4.2 excludes RSA exchange completely
+// so we could be more strict if all our clients will support
+// TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256/384. Unfortunately Go's crypto library
+// is limited and doesn't support DHE-RSA-AES256-GCM-SHA384 and
+// DHE-RSA-AES256-SHA256, which are part of the recommended set.
+//
+// Unfortunately we can't drop the RSA algorithms because our servers aren't
+// generating ECDHE keys.
+var knownGoodCipherSuites = []uint16{
+	// These are technically useless for Juju, since we use an RSA certificate,
+	// but they also don't hurt anything, and supporting an ECDSA certificate
+	// could be useful in the future.
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+
+	// Windows doesn't support GCM currently, so we need these for RSA support.
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+
+	// We need this so that we have at least one suite in common
+	// with the default gnutls installed for precise and trusty.
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+}
+
+// SecureTLSConfig returns a tls.Config that conforms to Juju's security
+// standards, so as to avoid known security vulnerabilities in certain
+// configurations.
+//
+// Currently it excludes RC4 implementations from the available ciphersuites,
+// requires ciphersuites that provide forward secrecy, and sets the minimum TLS
+// version to 1.2.
+func SecureTLSConfig() *tls.Config {
+	return &tls.Config{
+		CipherSuites: knownGoodCipherSuites,
+		MinVersion:   tls.VersionTLS12,
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/trivial.go b/automation/vendor/github.com/juju/utils/trivial.go
new file mode 100644
index 0000000..642e213
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/trivial.go
@@ -0,0 +1,147 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"unicode"
+)
+
+// TODO(ericsnow) Move the quoting helpers into the shell package?
+
+// ShQuote quotes s so that when read by bash, no metacharacters
+// within s will be interpreted as such.
+func ShQuote(s string) string {
+	// single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote
+	return `'` + strings.Replace(s, `'`, `'"'"'`, -1) + `'`
+}
+
+// WinPSQuote quotes s so that when read by powershell, no metacharacters
+// within s will be interpreted as such.
+func WinPSQuote(s string) string {
+	// See http://ss64.com/ps/syntax-esc.html#quotes.
+	// Double quotes inside single quotes are fine, double single quotes inside
+	// single quotes, not so much so. Having double quoted strings inside single
+	// quoted strings, ensure no expansion happens.
+	return `'` + strings.Replace(s, `'`, `"`, -1) + `'`
+}
+
+// WinCmdQuote quotes s so that when read by cmd.exe, no metacharacters
+// within s will be interpreted as such.
+func WinCmdQuote(s string) string {
+	// See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx.
+	quoted := winCmdQuote(s)
+	return winCmdEscapeMeta(quoted)
+}
+
+func winCmdQuote(s string) string {
+	var escaped string
+	for _, c := range s {
+		switch c {
+		case '\\', '"':
+			escaped += `\`
+		}
+		escaped += string(c)
+	}
+	return `"` + escaped + `"`
+}
+
+func winCmdEscapeMeta(str string) string {
+	const meta = `()%!^"<>&|`
+	var newStr string
+	for _, c := range str {
+		if strings.Contains(meta, string(c)) {
+			newStr += "^"
+		}
+		newStr += string(c)
+	}
+	return newStr
+}
+
+// CommandString flattens a sequence of command arguments into a
+// string suitable for executing in a shell, escaping slashes,
+// variables and quotes as necessary; each argument is double-quoted
+// if and only if necessary.
+func CommandString(args ...string) string {
+	var buf bytes.Buffer
+	for i, arg := range args {
+		needsQuotes := false
+		var argBuf bytes.Buffer
+		for _, r := range arg {
+			if unicode.IsSpace(r) {
+				needsQuotes = true
+			} else if r == '"' || r == '$' || r == '\\' {
+				needsQuotes = true
+				argBuf.WriteByte('\\')
+			}
+			argBuf.WriteRune(r)
+		}
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		if needsQuotes {
+			buf.WriteByte('"')
+			argBuf.WriteTo(&buf)
+			buf.WriteByte('"')
+		} else {
+			argBuf.WriteTo(&buf)
+		}
+	}
+	return buf.String()
+}
+
+// Gzip compresses the given data.
+func Gzip(data []byte) []byte {
+	var buf bytes.Buffer
+	w := gzip.NewWriter(&buf)
+	if _, err := w.Write(data); err != nil {
+		// Compression should never fail unless it fails
+		// to write to the underlying writer, which is a bytes.Buffer
+		// that never fails.
+		panic(err)
+	}
+	if err := w.Close(); err != nil {
+		panic(err)
+	}
+	return buf.Bytes()
+}
+
+// Gunzip uncompresses the given data.
+func Gunzip(data []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return nil, err
+	}
+	return ioutil.ReadAll(r)
+}
+
+// ReadSHA256 returns the SHA256 hash of the contents read from source
+// (hex encoded) and the size of the source in bytes.
+func ReadSHA256(source io.Reader) (string, int64, error) {
+	hash := sha256.New()
+	size, err := io.Copy(hash, source)
+	if err != nil {
+		return "", 0, err
+	}
+	digest := hex.EncodeToString(hash.Sum(nil))
+	return digest, size, nil
+}
+
+// ReadFileSHA256 is like ReadSHA256 but reads the contents of the
+// given file.
+func ReadFileSHA256(filename string) (string, int64, error) {
+	f, err := os.Open(filename)
+	if err != nil {
+		return "", 0, err
+	}
+	defer f.Close()
+	return ReadSHA256(f)
+}
diff --git a/automation/vendor/github.com/juju/utils/username.go b/automation/vendor/github.com/juju/utils/username.go
new file mode 100644
index 0000000..5107e3b
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/username.go
@@ -0,0 +1,77 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"os/user"
+
+	"github.com/juju/errors"
+)
+
+// ResolveSudo returns the original username if sudo was used. The
+// original username is extracted from the OS environment.
+func ResolveSudo(username string) string {
+	return resolveSudo(username, os.Getenv)
+}
+
+func resolveSudo(username string, getenvFunc func(string) string) string {
+	if username != "root" {
+		return username
+	}
+	// sudo was probably called, get the original user.
+	if username := getenvFunc("SUDO_USER"); username != "" {
+		return username
+	}
+	return username
+}
+
+// EnvUsername returns the username from the OS environment.
+func EnvUsername() (string, error) {
+	return os.Getenv("USER"), nil
+}
+
+// OSUsername returns the username of the current OS user (based on UID).
+func OSUsername() (string, error) {
+	u, err := user.Current()
+	if err != nil {
+		return "", errors.Trace(err)
+	}
+	return u.Username, nil
+}
+
+// ResolveUsername returns the username determined by the provided
+// functions. The functions are tried in the same order in which they
+// were passed in. An error returned from any of them is immediately
+// returned. If an empty string is returned then that signals that the
+// function did not find the username and the next function is tried.
+// Once a username is found, the provided resolveSudo func (if any) is
+// called with that username and the result is returned. If no username
+// is found then errors.NotFound is returned.
+func ResolveUsername(resolveSudo func(string) string, usernameFuncs ...func() (string, error)) (string, error) {
+	for _, usernameFunc := range usernameFuncs {
+		username, err := usernameFunc()
+		if err != nil {
+			return "", errors.Trace(err)
+		}
+		if username != "" {
+			if resolveSudo != nil {
+				if original := resolveSudo(username); original != "" {
+					username = original
+				}
+			}
+			return username, nil
+		}
+	}
+	return "", errors.NotFoundf("username")
+}
+
+// LocalUsername determines the current username on the local host.
+func LocalUsername() (string, error) {
+	username, err := ResolveUsername(ResolveSudo, EnvUsername, OSUsername)
+	if err != nil {
+		return "", errors.Annotatef(err, "cannot get current user from the environment: %v", os.Environ())
+	}
+	return username, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/uuid.go b/automation/vendor/github.com/juju/utils/uuid.go
new file mode 100644
index 0000000..2404efc
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/uuid.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"regexp"
+	"strings"
+)
+
+// UUID represent a universal identifier with 16 octets.
+type UUID [16]byte
+
+// regex for validating that the UUID matches RFC 4122.
+// This package generates version 4 UUIDs but
+// accepts any UUID version.
+// http://www.ietf.org/rfc/rfc4122.txt
+var (
+	block1 = "[0-9a-f]{8}"
+	block2 = "[0-9a-f]{4}"
+	block3 = "[0-9a-f]{4}"
+	block4 = "[0-9a-f]{4}"
+	block5 = "[0-9a-f]{12}"
+
+	UUIDSnippet = block1 + "-" + block2 + "-" + block3 + "-" + block4 + "-" + block5
+	validUUID   = regexp.MustCompile("^" + UUIDSnippet + "$")
+)
+
+func UUIDFromString(s string) (UUID, error) {
+	if !IsValidUUIDString(s) {
+		return UUID{}, fmt.Errorf("invalid UUID: %q", s)
+	}
+	s = strings.Replace(s, "-", "", 4)
+	raw, err := hex.DecodeString(s)
+	if err != nil {
+		return UUID{}, err
+	}
+	var uuid UUID
+	copy(uuid[:], raw)
+	return uuid, nil
+}
+
+// IsValidUUIDString returns true, if the given string matches a valid UUID (version 4, variant 2).
+func IsValidUUIDString(s string) bool {
+	return validUUID.MatchString(s)
+}
+
+// MustNewUUID returns a new uuid, if an error occurs it panics.
+func MustNewUUID() UUID {
+	uuid, err := NewUUID()
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// NewUUID generates a new version 4 UUID relying only on random numbers.
+func NewUUID() (UUID, error) {
+	uuid := UUID{}
+	if _, err := io.ReadFull(rand.Reader, []byte(uuid[0:16])); err != nil {
+		return UUID{}, err
+	}
+	// Set version (4) and variant (2) according to RfC 4122.
+	var version byte = 4 << 4
+	var variant byte = 8 << 4
+	uuid[6] = version | (uuid[6] & 15)
+	uuid[8] = variant | (uuid[8] & 15)
+	return uuid, nil
+}
+
+// Copy returns a copy of the UUID.
+func (uuid UUID) Copy() UUID {
+	uuidCopy := uuid
+	return uuidCopy
+}
+
+// Raw returns a copy of the UUID bytes.
+func (uuid UUID) Raw() [16]byte {
+	return [16]byte(uuid)
+}
+
+// String returns a hexadecimal string representation with
+// standardized separators.
+func (uuid UUID) String() string {
+	return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16])
+}
diff --git a/automation/vendor/github.com/juju/utils/yaml.go b/automation/vendor/github.com/juju/utils/yaml.go
new file mode 100644
index 0000000..2d443a0
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/yaml.go
@@ -0,0 +1,107 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/errors"
+
+	"gopkg.in/yaml.v2"
+)
+
+// WriteYaml marshals obj as yaml to a temporary file in the same directory
+// as path, than atomically replaces path with the temporary file.
+func WriteYaml(path string, obj interface{}) error {
+	data, err := yaml.Marshal(obj)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	dir := filepath.Dir(path)
+	f, err := ioutil.TempFile(dir, "juju")
+	if err != nil {
+		return errors.Trace(err)
+	}
+	tmp := f.Name()
+	if _, err := f.Write(data); err != nil {
+		f.Close()      // don't leak file handle
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+	// Explicitly close the file before moving it. This is needed on Windows
+	// where the OS will not allow us to move a file that still has an open
+	// file handle. Must check the error on close because filesystems can delay
+	// reporting errors until the file is closed.
+	if err := f.Close(); err != nil {
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+
+	// ioutils.TempFile creates files 0600, but this function has a contract
+	// that files will be world readable, 0644 after replacement.
+	if err := os.Chmod(tmp, 0644); err != nil {
+		os.Remove(tmp) // remove file with incorrect permissions.
+		return errors.Trace(err)
+	}
+
+	return ReplaceFile(tmp, path)
+}
+
+// ReadYaml unmarshals the yaml contained in the file at path into obj. See
+// goyaml.Unmarshal. If path is not found, the error returned will be compatible
+// with os.IsNotExist.
+func ReadYaml(path string, obj interface{}) error {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return err // cannot wrap here because callers check for NotFound.
+	}
+	return yaml.Unmarshal(data, obj)
+}
+
+// ConformYAML ensures all keys of any nested maps are strings.  This is
+// necessary because YAML unmarshals map[interface{}]interface{} in nested
+// maps, which cannot be serialized by json or bson. Also, handle
+// []interface{}. cf. gopkg.in/juju/charm.v4/actions.go cleanse
+func ConformYAML(input interface{}) (interface{}, error) {
+	switch typedInput := input.(type) {
+
+	case map[string]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			newValue, err := ConformYAML(value)
+			if err != nil {
+				return nil, err
+			}
+			newMap[key] = newValue
+		}
+		return newMap, nil
+
+	case map[interface{}]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			typedKey, ok := key.(string)
+			if !ok {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newMap[typedKey] = value
+		}
+		return ConformYAML(newMap)
+
+	case []interface{}:
+		newSlice := make([]interface{}, len(typedInput))
+		for i, sliceValue := range typedInput {
+			newSliceValue, err := ConformYAML(sliceValue)
+			if err != nil {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newSlice[i] = newSliceValue
+		}
+		return newSlice, nil
+
+	default:
+		return input, nil
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/zfile_windows.go b/automation/vendor/github.com/juju/utils/zfile_windows.go
new file mode 100644
index 0000000..b1a50f1
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/zfile_windows.go
@@ -0,0 +1,28 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// mksyscall_windows.pl -l32 file_windows.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package utils
+
+import "unsafe"
+import "syscall"
+
+var (
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/automation/vendor/github.com/juju/version/LICENSE b/automation/vendor/github.com/juju/version/LICENSE
new file mode 100644
index 0000000..5cec73f
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply. 
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/version/README.md b/automation/vendor/github.com/juju/version/README.md
new file mode 100644
index 0000000..2d1b48b
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/README.md
@@ -0,0 +1,3 @@
+# Version [![GoDoc](https://godoc.org/github.com/juju/version?status.svg)](https://godoc.org/github.com/juju/version) 
+version is a go package for intelligent version comparisons.
+
diff --git a/automation/vendor/github.com/juju/version/version.go b/automation/vendor/github.com/juju/version/version.go
new file mode 100644
index 0000000..026e99f
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/version.go
@@ -0,0 +1,297 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the AGPLv3, see LICENCE file for details.
+
+// Package version implements version parsing.
+package version
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// Number represents a version number.
+type Number struct {
+	Major int
+	Minor int
+	Tag   string
+	Patch int
+	Build int
+}
+
+// Zero is occasionally convenient and readable.
+// Please don't change its value.
+var Zero = Number{}
+
+// Binary specifies a binary version of juju.v
+type Binary struct {
+	Number
+	Series string
+	Arch   string
+}
+
+// String returns the string representation of the binary version.
+func (b Binary) String() string {
+	return fmt.Sprintf("%v-%s-%s", b.Number, b.Series, b.Arch)
+}
+
+// GetBSON implements bson.Getter.
+func (b Binary) GetBSON() (interface{}, error) {
+	return b.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (b *Binary) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (b Binary) MarshalJSON() ([]byte, error) {
+	return json.Marshal(b.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (b *Binary) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface.
+func (b Binary) MarshalYAML() (interface{}, error) {
+	return b.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface.
+func (b *Binary) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(vstr)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+var (
+	binaryPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?-([^-]+)-([^-]+)$`)
+	numberPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?$`)
+)
+
+// MustParse parses a version and panics if it does
+// not parse correctly.
+func MustParse(s string) Number {
+	v, err := Parse(s)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// MustParseBinary parses a binary version and panics if it does
+// not parse correctly.
+func MustParseBinary(s string) Binary {
+	b, err := ParseBinary(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+// ParseBinary parses a binary version of the form "1.2.3-series-arch".
+func ParseBinary(s string) (Binary, error) {
+	m := binaryPat.FindStringSubmatch(s)
+	if m == nil {
+		return Binary{}, fmt.Errorf("invalid binary version %q", s)
+	}
+	var b Binary
+	b.Major = atoi(m[1])
+	b.Minor = atoi(m[2])
+	b.Tag = m[3]
+	b.Patch = atoi(m[4])
+	if m[5] != "" {
+		b.Build = atoi(m[5][1:])
+	}
+	b.Series = m[6]
+	b.Arch = m[7]
+	return b, nil
+}
+
+// Parse parses the version, which is of the form 1.2.3
+// giving the major, minor and release versions
+// respectively.
+func Parse(s string) (Number, error) {
+	m := numberPat.FindStringSubmatch(s)
+	if m == nil {
+		return Number{}, fmt.Errorf("invalid version %q", s)
+	}
+	var n Number
+	n.Major = atoi(m[1])
+	n.Minor = atoi(m[2])
+	n.Tag = m[3]
+	n.Patch = atoi(m[4])
+	if m[5] != "" {
+		n.Build = atoi(m[5][1:])
+	}
+	return n, nil
+}
+
+// atoi is the same as strconv.Atoi but assumes that
+// the string has been verified to be a valid integer.
+func atoi(s string) int {
+	n, err := strconv.Atoi(s)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+// String returns the string representation of this Number.
+func (n Number) String() string {
+	var s string
+	if n.Tag == "" {
+		s = fmt.Sprintf("%d.%d.%d", n.Major, n.Minor, n.Patch)
+	} else {
+		s = fmt.Sprintf("%d.%d-%s%d", n.Major, n.Minor, n.Tag, n.Patch)
+	}
+	if n.Build > 0 {
+		s += fmt.Sprintf(".%d", n.Build)
+	}
+	return s
+}
+
+// Compare returns -1, 0 or 1 depending on whether
+// n is less than, equal to or greater than other.
+// The comparison compares Major, then Minor, then Patch, then Build, using the first difference as
+func (n Number) Compare(other Number) int {
+	if n == other {
+		return 0
+	}
+	less := false
+	switch {
+	case n.Major != other.Major:
+		less = n.Major < other.Major
+	case n.Minor != other.Minor:
+		less = n.Minor < other.Minor
+	case n.Tag != other.Tag:
+		switch {
+		case n.Tag == "":
+			less = false
+		case other.Tag == "":
+			less = true
+		default:
+			less = n.Tag < other.Tag
+		}
+	case n.Patch != other.Patch:
+		less = n.Patch < other.Patch
+	case n.Build != other.Build:
+		less = n.Build < other.Build
+	}
+	if less {
+		return -1
+	}
+	return 1
+}
+
+// GetBSON implements bson.Getter.
+func (n Number) GetBSON() (interface{}, error) {
+	return n.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (n *Number) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (n Number) MarshalJSON() ([]byte, error) {
+	return json.Marshal(n.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (n *Number) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface
+func (n Number) MarshalYAML() (interface{}, error) {
+	return n.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface
+func (n *Number) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(vstr)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// ParseMajorMinor takes an argument of the form "major.minor" and returns ints major and minor.
+func ParseMajorMinor(vers string) (int, int, error) {
+	parts := strings.Split(vers, ".")
+	major, err := strconv.Atoi(parts[0])
+	minor := -1
+	if err != nil {
+		return -1, -1, fmt.Errorf("invalid major version number %s: %v", parts[0], err)
+	}
+	if len(parts) == 2 {
+		minor, err = strconv.Atoi(parts[1])
+		if err != nil {
+			return -1, -1, fmt.Errorf("invalid minor version number %s: %v", parts[1], err)
+		}
+	} else if len(parts) > 2 {
+		return -1, -1, fmt.Errorf("invalid major.minor version number %s", vers)
+	}
+	return major, minor, nil
+}
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE b/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/README.md b/automation/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/doc.go b/automation/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go b/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/usage.go b/automation/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/LICENSE b/automation/vendor/github.com/lunixbochs/vtclean/LICENSE
new file mode 100644
index 0000000..42e8263
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Ryan Hileman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/README.md b/automation/vendor/github.com/lunixbochs/vtclean/README.md
new file mode 100644
index 0000000..ddd1372
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/README.md
@@ -0,0 +1,44 @@
+vtclean
+----
+
+Clean up raw terminal output by stripping escape sequences, optionally preserving color.
+
+Get it: `go get github.com/lunixbochs/vtclean/vtclean`
+
+API:
+
+    import "github.com/lunixbochs/vtclean"
+    vtclean.Clean(line string, color bool) string
+
+Command line example:
+
+    $ echo -e '\x1b[1;32mcolor example
+    color forced to stop at end of line
+    backspace is ba\b\bgood
+    no beeps!\x07\x07' | ./vtclean -color
+
+    color example
+    color forced to stop at end of line
+    backspace is good
+    no beeps!
+
+Go example:
+
+    package main
+
+    import (
+        "fmt"
+        "github.com/lunixbochs/vtclean"
+    )
+
+    func main() {
+        line := vtclean.Clean(
+            "\033[1;32mcolor, " +
+            "curs\033[Aor, " +
+            "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false)
+        fmt.Println(line)
+    }
+
+Output:
+
+    color, cursor
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/io.go b/automation/vendor/github.com/lunixbochs/vtclean/io.go
new file mode 100644
index 0000000..31be007
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/io.go
@@ -0,0 +1,93 @@
+package vtclean
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+)
+
+type reader struct {
+	io.Reader
+	scanner *bufio.Scanner
+	buf     []byte
+
+	color bool
+}
+
+func NewReader(r io.Reader, color bool) io.Reader {
+	return &reader{Reader: r, color: color}
+}
+
+func (r *reader) scan() bool {
+	if r.scanner == nil {
+		r.scanner = bufio.NewScanner(r.Reader)
+	}
+	if len(r.buf) > 0 {
+		return true
+	}
+	if r.scanner.Scan() {
+		r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n")
+		return true
+	}
+	return false
+}
+
+func (r *reader) fill(p []byte) int {
+	n := len(r.buf)
+	copy(p, r.buf)
+	if len(p) < len(r.buf) {
+		r.buf = r.buf[len(p):]
+		n = len(p)
+	} else {
+		r.buf = nil
+	}
+	return n
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+	n := r.fill(p)
+	if n < len(p) {
+		if !r.scan() {
+			if n == 0 {
+				return 0, io.EOF
+			}
+			return n, nil
+		}
+		n += r.fill(p[n:])
+	}
+	return n, nil
+}
+
+type writer struct {
+	io.Writer
+	buf   []byte
+	color bool
+}
+
+func NewWriter(w io.Writer, color bool) io.WriteCloser {
+	return &writer{Writer: w, color: color}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+	buf := append(w.buf, p...)
+	lines := bytes.Split(buf, []byte("\n"))
+	if len(lines) > 0 {
+		last := len(lines) - 1
+		w.buf = lines[last]
+		count := 0
+		for _, line := range lines[:last] {
+			n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n"))
+			count += n
+			if err != nil {
+				return count, err
+			}
+		}
+	}
+	return len(p), nil
+}
+
+func (w *writer) Close() error {
+	cl := Clean(string(w.buf), w.color)
+	_, err := w.Writer.Write([]byte(cl))
+	return err
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/line.go b/automation/vendor/github.com/lunixbochs/vtclean/line.go
new file mode 100644
index 0000000..7272d91
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/line.go
@@ -0,0 +1,107 @@
+package vtclean
+
+type char struct {
+	char  byte
+	vt100 []byte
+}
+
+func chars(p []byte) []char {
+	tmp := make([]char, len(p))
+	for i, v := range p {
+		tmp[i].char = v
+	}
+	return tmp
+}
+
+type lineEdit struct {
+	buf       []char
+	pos, size int
+	vt100     []byte
+}
+
+func newLineEdit(length int) *lineEdit {
+	return &lineEdit{buf: make([]char, length)}
+}
+
+func (l *lineEdit) Vt100(p []byte) {
+	l.vt100 = p
+}
+
+func (l *lineEdit) Move(x int) {
+	if x < 0 && l.pos <= -x {
+		l.pos = 0
+	} else if x > 0 && l.pos+x > l.size {
+		l.pos = l.size
+	} else {
+		l.pos += x
+	}
+}
+
+func (l *lineEdit) Write(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	if len(l.buf)-l.pos < len(c) {
+		l.buf = append(l.buf[:l.pos], c...)
+	} else {
+		copy(l.buf[l.pos:], c)
+	}
+	l.pos += len(c)
+	if l.pos > l.size {
+		l.size = l.pos
+	}
+}
+
+func (l *lineEdit) Insert(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	l.size += len(c)
+	c = append(c, l.buf[l.pos:]...)
+	l.buf = append(l.buf[:l.pos], c...)
+}
+
+func (l *lineEdit) Delete(n int) {
+	most := l.size - l.pos
+	if n > most {
+		n = most
+	}
+	copy(l.buf[l.pos:], l.buf[l.pos+n:])
+	l.size -= n
+}
+
+func (l *lineEdit) Clear() {
+	for i := 0; i < len(l.buf); i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearLeft() {
+	for i := 0; i < l.pos+1; i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearRight() {
+	l.size = l.pos
+}
+
+func (l *lineEdit) Bytes() []byte {
+	length := 0
+	buf := l.buf[:l.size]
+	for _, v := range buf {
+		length += 1 + len(v.vt100)
+	}
+	tmp := make([]byte, 0, length)
+	for _, v := range buf {
+		tmp = append(tmp, v.vt100...)
+		tmp = append(tmp, v.char)
+	}
+	return tmp
+}
+
+func (l *lineEdit) String() string {
+	return string(l.Bytes())
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/regex.txt b/automation/vendor/github.com/lunixbochs/vtclean/regex.txt
new file mode 100644
index 0000000..e55e7f2
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/regex.txt
@@ -0,0 +1,14 @@
+this is the source definitions for the scary escape code regex
+
+# from tests in Terminal.app, this regex should cover all basic \e[ and \e] cases
+^([\[\]]([\d\?]+)?(;[\d\?]+)*)?.
+
+# this catches any case the above does not
+# make sure to not include any special characters the main regex finds (like ?)
+\[[^a-zA-Z0-9@\?]+.
+
+# esc + paren + any single char
+[\(\)].
+
+# didn't re-check this one (not included)
+[\[K]\d+;\d+
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go b/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go
new file mode 100644
index 0000000..1e74237
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go
@@ -0,0 +1,81 @@
+package vtclean
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+)
+
+// see regex.txt for a slightly separated version of this regex
+var vt100re = regexp.MustCompile(`^\033([\[\]]([\d\?]+)?(;[\d\?]+)*)?(.)`)
+var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`)
+
+func Clean(line string, color bool) string {
+	var edit = newLineEdit(len(line))
+	lineb := []byte(line)
+
+	hadColor := false
+	for i := 0; i < len(lineb); {
+		c := lineb[i]
+		switch c {
+		case '\b':
+			edit.Move(-1)
+		case '\033':
+			// set terminal title
+			if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) {
+				pos := bytes.Index(lineb[i:], []byte("\a"))
+				if pos != -1 {
+					i += pos + 1
+					continue
+				}
+			}
+			if m := vt100exc.Find(lineb[i:]); m != nil {
+				i += len(m)
+			} else if m := vt100re.FindSubmatch(lineb[i:]); m != nil {
+				i += len(m[0])
+				num := string(m[2])
+				n, err := strconv.Atoi(num)
+				if err != nil || n > 10000 {
+					n = 1
+				}
+				switch m[4][0] {
+				case 'm':
+					if color {
+						hadColor = true
+						edit.Vt100(m[0])
+					}
+				case '@':
+					edit.Insert(bytes.Repeat([]byte{' '}, n))
+				case 'C':
+					edit.Move(n)
+				case 'D':
+					edit.Move(-n)
+				case 'P':
+					edit.Delete(n)
+				case 'K':
+					switch num {
+					case "", "0":
+						edit.ClearRight()
+					case "1":
+						edit.ClearLeft()
+					case "2":
+						edit.Clear()
+					}
+				}
+			} else {
+				i += 1
+			}
+			continue
+		default:
+			if c == '\n' || c >= ' ' {
+				edit.Write([]byte{c})
+			}
+		}
+		i += 1
+	}
+	out := edit.Bytes()
+	if hadColor {
+		out = append(out, []byte("\033[0m")...)
+	}
+	return string(out)
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/LICENSE b/automation/vendor/github.com/mattn/go-colorable/LICENSE
new file mode 100644
index 0000000..91b5cef
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/automation/vendor/github.com/mattn/go-colorable/README.md b/automation/vendor/github.com/mattn/go-colorable/README.md
new file mode 100644
index 0000000..e84226a
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/README.md
@@ -0,0 +1,43 @@
+# go-colorable
+
+Colorable writer for windows.
+
+For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
+This package is possible to handle escape sequence for ansi color on windows.
+
+## Too Bad!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
+
+
+## So Good!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
+
+## Usage
+
+```go
+logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
+logrus.SetOutput(colorable.NewColorableStdout())
+
+logrus.Info("succeeded")
+logrus.Warn("not correct")
+logrus.Error("something error")
+logrus.Fatal("panic")
+```
+
+You can compile above code on non-windows OSs.
+
+## Installation
+
+```
+$ go get github.com/mattn/go-colorable
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/automation/vendor/github.com/mattn/go-colorable/colorable_others.go b/automation/vendor/github.com/mattn/go-colorable/colorable_others.go
new file mode 100644
index 0000000..a7fe19a
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package colorable
+
+import (
+	"io"
+	"os"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return os.Stderr
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go b/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go
new file mode 100644
index 0000000..628ad90
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -0,0 +1,820 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"github.com/mattn/go-isatty"
+)
+
+const (
+	foregroundBlue      = 0x1
+	foregroundGreen     = 0x2
+	foregroundRed       = 0x4
+	foregroundIntensity = 0x8
+	foregroundMask      = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+	backgroundBlue      = 0x10
+	backgroundGreen     = 0x20
+	backgroundRed       = 0x40
+	backgroundIntensity = 0x80
+	backgroundMask      = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+	x short
+	y short
+}
+
+type smallRect struct {
+	left   short
+	top    short
+	right  short
+	bottom short
+}
+
+type consoleScreenBufferInfo struct {
+	size              coord
+	cursorPosition    coord
+	attributes        word
+	window            smallRect
+	maximumWindowSize coord
+}
+
+type consoleCursorInfo struct {
+	size    dword
+	visible int32
+}
+
+var (
+	kernel32                       = syscall.NewLazyDLL("kernel32.dll")
+	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+	procSetConsoleTextAttribute    = kernel32.NewProc("SetConsoleTextAttribute")
+	procSetConsoleCursorPosition   = kernel32.NewProc("SetConsoleCursorPosition")
+	procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+	procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+	procGetConsoleCursorInfo       = kernel32.NewProc("GetConsoleCursorInfo")
+	procSetConsoleCursorInfo       = kernel32.NewProc("SetConsoleCursorInfo")
+)
+
+type Writer struct {
+	out     io.Writer
+	handle  syscall.Handle
+	lastbuf bytes.Buffer
+	oldattr word
+	oldpos  coord
+}
+
+// NewColorable return new instance of Writer which handle escape sequence from File.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	if isatty.IsTerminal(file.Fd()) {
+		var csbi consoleScreenBufferInfo
+		handle := syscall.Handle(file.Fd())
+		procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+		return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+	} else {
+		return file
+	}
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return NewColorable(os.Stdout)
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return NewColorable(os.Stderr)
+}
+
+var color256 = map[int]int{
+	0:   0x000000,
+	1:   0x800000,
+	2:   0x008000,
+	3:   0x808000,
+	4:   0x000080,
+	5:   0x800080,
+	6:   0x008080,
+	7:   0xc0c0c0,
+	8:   0x808080,
+	9:   0xff0000,
+	10:  0x00ff00,
+	11:  0xffff00,
+	12:  0x0000ff,
+	13:  0xff00ff,
+	14:  0x00ffff,
+	15:  0xffffff,
+	16:  0x000000,
+	17:  0x00005f,
+	18:  0x000087,
+	19:  0x0000af,
+	20:  0x0000d7,
+	21:  0x0000ff,
+	22:  0x005f00,
+	23:  0x005f5f,
+	24:  0x005f87,
+	25:  0x005faf,
+	26:  0x005fd7,
+	27:  0x005fff,
+	28:  0x008700,
+	29:  0x00875f,
+	30:  0x008787,
+	31:  0x0087af,
+	32:  0x0087d7,
+	33:  0x0087ff,
+	34:  0x00af00,
+	35:  0x00af5f,
+	36:  0x00af87,
+	37:  0x00afaf,
+	38:  0x00afd7,
+	39:  0x00afff,
+	40:  0x00d700,
+	41:  0x00d75f,
+	42:  0x00d787,
+	43:  0x00d7af,
+	44:  0x00d7d7,
+	45:  0x00d7ff,
+	46:  0x00ff00,
+	47:  0x00ff5f,
+	48:  0x00ff87,
+	49:  0x00ffaf,
+	50:  0x00ffd7,
+	51:  0x00ffff,
+	52:  0x5f0000,
+	53:  0x5f005f,
+	54:  0x5f0087,
+	55:  0x5f00af,
+	56:  0x5f00d7,
+	57:  0x5f00ff,
+	58:  0x5f5f00,
+	59:  0x5f5f5f,
+	60:  0x5f5f87,
+	61:  0x5f5faf,
+	62:  0x5f5fd7,
+	63:  0x5f5fff,
+	64:  0x5f8700,
+	65:  0x5f875f,
+	66:  0x5f8787,
+	67:  0x5f87af,
+	68:  0x5f87d7,
+	69:  0x5f87ff,
+	70:  0x5faf00,
+	71:  0x5faf5f,
+	72:  0x5faf87,
+	73:  0x5fafaf,
+	74:  0x5fafd7,
+	75:  0x5fafff,
+	76:  0x5fd700,
+	77:  0x5fd75f,
+	78:  0x5fd787,
+	79:  0x5fd7af,
+	80:  0x5fd7d7,
+	81:  0x5fd7ff,
+	82:  0x5fff00,
+	83:  0x5fff5f,
+	84:  0x5fff87,
+	85:  0x5fffaf,
+	86:  0x5fffd7,
+	87:  0x5fffff,
+	88:  0x870000,
+	89:  0x87005f,
+	90:  0x870087,
+	91:  0x8700af,
+	92:  0x8700d7,
+	93:  0x8700ff,
+	94:  0x875f00,
+	95:  0x875f5f,
+	96:  0x875f87,
+	97:  0x875faf,
+	98:  0x875fd7,
+	99:  0x875fff,
+	100: 0x878700,
+	101: 0x87875f,
+	102: 0x878787,
+	103: 0x8787af,
+	104: 0x8787d7,
+	105: 0x8787ff,
+	106: 0x87af00,
+	107: 0x87af5f,
+	108: 0x87af87,
+	109: 0x87afaf,
+	110: 0x87afd7,
+	111: 0x87afff,
+	112: 0x87d700,
+	113: 0x87d75f,
+	114: 0x87d787,
+	115: 0x87d7af,
+	116: 0x87d7d7,
+	117: 0x87d7ff,
+	118: 0x87ff00,
+	119: 0x87ff5f,
+	120: 0x87ff87,
+	121: 0x87ffaf,
+	122: 0x87ffd7,
+	123: 0x87ffff,
+	124: 0xaf0000,
+	125: 0xaf005f,
+	126: 0xaf0087,
+	127: 0xaf00af,
+	128: 0xaf00d7,
+	129: 0xaf00ff,
+	130: 0xaf5f00,
+	131: 0xaf5f5f,
+	132: 0xaf5f87,
+	133: 0xaf5faf,
+	134: 0xaf5fd7,
+	135: 0xaf5fff,
+	136: 0xaf8700,
+	137: 0xaf875f,
+	138: 0xaf8787,
+	139: 0xaf87af,
+	140: 0xaf87d7,
+	141: 0xaf87ff,
+	142: 0xafaf00,
+	143: 0xafaf5f,
+	144: 0xafaf87,
+	145: 0xafafaf,
+	146: 0xafafd7,
+	147: 0xafafff,
+	148: 0xafd700,
+	149: 0xafd75f,
+	150: 0xafd787,
+	151: 0xafd7af,
+	152: 0xafd7d7,
+	153: 0xafd7ff,
+	154: 0xafff00,
+	155: 0xafff5f,
+	156: 0xafff87,
+	157: 0xafffaf,
+	158: 0xafffd7,
+	159: 0xafffff,
+	160: 0xd70000,
+	161: 0xd7005f,
+	162: 0xd70087,
+	163: 0xd700af,
+	164: 0xd700d7,
+	165: 0xd700ff,
+	166: 0xd75f00,
+	167: 0xd75f5f,
+	168: 0xd75f87,
+	169: 0xd75faf,
+	170: 0xd75fd7,
+	171: 0xd75fff,
+	172: 0xd78700,
+	173: 0xd7875f,
+	174: 0xd78787,
+	175: 0xd787af,
+	176: 0xd787d7,
+	177: 0xd787ff,
+	178: 0xd7af00,
+	179: 0xd7af5f,
+	180: 0xd7af87,
+	181: 0xd7afaf,
+	182: 0xd7afd7,
+	183: 0xd7afff,
+	184: 0xd7d700,
+	185: 0xd7d75f,
+	186: 0xd7d787,
+	187: 0xd7d7af,
+	188: 0xd7d7d7,
+	189: 0xd7d7ff,
+	190: 0xd7ff00,
+	191: 0xd7ff5f,
+	192: 0xd7ff87,
+	193: 0xd7ffaf,
+	194: 0xd7ffd7,
+	195: 0xd7ffff,
+	196: 0xff0000,
+	197: 0xff005f,
+	198: 0xff0087,
+	199: 0xff00af,
+	200: 0xff00d7,
+	201: 0xff00ff,
+	202: 0xff5f00,
+	203: 0xff5f5f,
+	204: 0xff5f87,
+	205: 0xff5faf,
+	206: 0xff5fd7,
+	207: 0xff5fff,
+	208: 0xff8700,
+	209: 0xff875f,
+	210: 0xff8787,
+	211: 0xff87af,
+	212: 0xff87d7,
+	213: 0xff87ff,
+	214: 0xffaf00,
+	215: 0xffaf5f,
+	216: 0xffaf87,
+	217: 0xffafaf,
+	218: 0xffafd7,
+	219: 0xffafff,
+	220: 0xffd700,
+	221: 0xffd75f,
+	222: 0xffd787,
+	223: 0xffd7af,
+	224: 0xffd7d7,
+	225: 0xffd7ff,
+	226: 0xffff00,
+	227: 0xffff5f,
+	228: 0xffff87,
+	229: 0xffffaf,
+	230: 0xffffd7,
+	231: 0xffffff,
+	232: 0x080808,
+	233: 0x121212,
+	234: 0x1c1c1c,
+	235: 0x262626,
+	236: 0x303030,
+	237: 0x3a3a3a,
+	238: 0x444444,
+	239: 0x4e4e4e,
+	240: 0x585858,
+	241: 0x626262,
+	242: 0x6c6c6c,
+	243: 0x767676,
+	244: 0x808080,
+	245: 0x8a8a8a,
+	246: 0x949494,
+	247: 0x9e9e9e,
+	248: 0xa8a8a8,
+	249: 0xb2b2b2,
+	250: 0xbcbcbc,
+	251: 0xc6c6c6,
+	252: 0xd0d0d0,
+	253: 0xdadada,
+	254: 0xe4e4e4,
+	255: 0xeeeeee,
+}
+
+// Write write data on console
+func (w *Writer) Write(data []byte) (n int, err error) {
+	var csbi consoleScreenBufferInfo
+	procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+		if r1 == 0 {
+			break loop
+		}
+
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		var m byte
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				m = c
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+
+		var csbi consoleScreenBufferInfo
+		switch m {
+		case 'A':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'B':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'C':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'D':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			if n, err = strconv.Atoi(buf.String()); err == nil {
+				var csbi consoleScreenBufferInfo
+				procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+				csbi.cursorPosition.x += short(n)
+				procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			}
+		case 'E':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'F':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'G':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = short(n - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'H':
+			token := strings.Split(buf.String(), ";")
+			if len(token) != 2 {
+				continue
+			}
+			n1, err := strconv.Atoi(token[0])
+			if err != nil {
+				continue
+			}
+			n2, err := strconv.Atoi(token[1])
+			if err != nil {
+				continue
+			}
+			csbi.cursorPosition.x = short(n2 - 1)
+			csbi.cursorPosition.y = short(n1 - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'J':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'K':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'm':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			attr := csbi.attributes
+			cs := buf.String()
+			if cs == "" {
+				procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+				continue
+			}
+			token := strings.Split(cs, ";")
+			for i := 0; i < len(token); i++ {
+				ns := token[i]
+				if n, err = strconv.Atoi(ns); err == nil {
+					switch {
+					case n == 0 || n == 100:
+						attr = w.oldattr
+					case 1 <= n && n <= 5:
+						attr |= foregroundIntensity
+					case n == 7:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 22 == n || n == 25 || n == 25:
+						attr |= foregroundIntensity
+					case n == 27:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 30 <= n && n <= 37:
+						attr &= backgroundMask
+						if (n-30)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-30)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-30)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case n == 38: // set foreground color.
+						if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256foreAttr == nil {
+									n256setup()
+								}
+								attr &= backgroundMask
+								attr |= n256foreAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & backgroundMask)
+						}
+					case n == 39: // reset foreground color.
+						attr &= backgroundMask
+						attr |= w.oldattr & foregroundMask
+					case 40 <= n && n <= 47:
+						attr &= foregroundMask
+						if (n-40)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-40)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-40)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					case n == 48: // set background color.
+						if i < len(token)-2 && token[i+1] == "5" {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256backAttr == nil {
+									n256setup()
+								}
+								attr &= foregroundMask
+								attr |= n256backAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & foregroundMask)
+						}
+					case n == 49: // reset foreground color.
+						attr &= foregroundMask
+						attr |= w.oldattr & backgroundMask
+					case 90 <= n && n <= 97:
+						attr = (attr & backgroundMask)
+						attr |= foregroundIntensity
+						if (n-90)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-90)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-90)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case 100 <= n && n <= 107:
+						attr = (attr & foregroundMask)
+						attr |= backgroundIntensity
+						if (n-100)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-100)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-100)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					}
+					procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+				}
+			}
+		case 'h':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 1
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 'l':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 0
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 's':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			w.oldpos = csbi.cursorPosition
+		case 'u':
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+	rgb       int
+	red       bool
+	green     bool
+	blue      bool
+	intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+	if c.red {
+		attr |= foregroundRed
+	}
+	if c.green {
+		attr |= foregroundGreen
+	}
+	if c.blue {
+		attr |= foregroundBlue
+	}
+	if c.intensity {
+		attr |= foregroundIntensity
+	}
+	return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+	if c.red {
+		attr |= backgroundRed
+	}
+	if c.green {
+		attr |= backgroundGreen
+	}
+	if c.blue {
+		attr |= backgroundBlue
+	}
+	if c.intensity {
+		attr |= backgroundIntensity
+	}
+	return
+}
+
+var color16 = []consoleColor{
+	consoleColor{0x000000, false, false, false, false},
+	consoleColor{0x000080, false, false, true, false},
+	consoleColor{0x008000, false, true, false, false},
+	consoleColor{0x008080, false, true, true, false},
+	consoleColor{0x800000, true, false, false, false},
+	consoleColor{0x800080, true, false, true, false},
+	consoleColor{0x808000, true, true, false, false},
+	consoleColor{0xc0c0c0, true, true, true, false},
+	consoleColor{0x808080, false, false, false, true},
+	consoleColor{0x0000ff, false, false, true, true},
+	consoleColor{0x00ff00, false, true, false, true},
+	consoleColor{0x00ffff, false, true, true, true},
+	consoleColor{0xff0000, true, false, false, true},
+	consoleColor{0xff00ff, true, false, true, true},
+	consoleColor{0xffff00, true, true, false, true},
+	consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+	h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+	dh := a.h - b.h
+	switch {
+	case dh > 0.5:
+		dh = 1 - dh
+	case dh < -0.5:
+		dh = -1 - dh
+	}
+	ds := a.s - b.s
+	dv := a.v - b.v
+	return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+	r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+		float32((rgb&0x00FF00)>>8)/256.0,
+		float32(rgb&0x0000FF)/256.0
+	min, max := minmax3f(r, g, b)
+	h := max - min
+	if h > 0 {
+		if max == r {
+			h = (g - b) / h
+			if h < 0 {
+				h += 6
+			}
+		} else if max == g {
+			h = 2 + (b-r)/h
+		} else {
+			h = 4 + (r-g)/h
+		}
+	}
+	h /= 6.0
+	s := max - min
+	if max != 0 {
+		s /= max
+	}
+	v := max
+	return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+	t := make(hsvTable, len(rgbTable))
+	for i, c := range rgbTable {
+		t[i] = toHSV(c.rgb)
+	}
+	return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+	hsv := toHSV(rgb)
+	n := 7
+	l := float32(5.0)
+	for i, p := range t {
+		d := hsv.dist(p)
+		if d < l {
+			l, n = d, i
+		}
+	}
+	return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+	if a < b {
+		if b < c {
+			return a, c
+		} else if a < c {
+			return a, b
+		} else {
+			return c, b
+		}
+	} else {
+		if a < c {
+			return b, c
+		} else if b < c {
+			return b, a
+		} else {
+			return c, a
+		}
+	}
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+	n256foreAttr = make([]word, 256)
+	n256backAttr = make([]word, 256)
+	t := toHSVTable(color16)
+	for i, rgb := range color256 {
+		c := t.find(rgb)
+		n256foreAttr[i] = c.foregroundAttr()
+		n256backAttr[i] = c.backgroundAttr()
+	}
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/noncolorable.go b/automation/vendor/github.com/mattn/go-colorable/noncolorable.go
new file mode 100644
index 0000000..ca588c7
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -0,0 +1,61 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+)
+
+// NonColorable hold writer but remove escape sequence.
+type NonColorable struct {
+	out     io.Writer
+	lastbuf bytes.Buffer
+}
+
+// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
+func NewNonColorable(w io.Writer) io.Writer {
+	return &NonColorable{out: w}
+}
+
+// Write write data on console
+func (w *NonColorable) Write(data []byte) (n int, err error) {
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/LICENSE b/automation/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 0000000..65dc692
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/automation/vendor/github.com/mattn/go-isatty/README.md b/automation/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 0000000..74845de
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,37 @@
+# go-isatty
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/mattn/go-isatty"
+	"os"
+)
+
+func main() {
+	if isatty.IsTerminal(os.Stdout.Fd()) {
+		fmt.Println("Is Terminal")
+	} else {
+		fmt.Println("Is Not Terminal")
+	}
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/automation/vendor/github.com/mattn/go-isatty/doc.go b/automation/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 0000000..17d4f90
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 0000000..83c5887
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,9 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+	return false
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 0000000..42f2514
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go b/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 0000000..9d24bac
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 0000000..1f0c6bf
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+	var termio unix.Termio
+	err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+	return err == nil
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go b/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 0000000..83c398b
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/automation/vendor/golang.org/x/crypto/LICENSE b/automation/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/automation/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/golang.org/x/crypto/PATENTS b/automation/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/automation/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/automation/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/automation/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..593f653
--- /dev/null
+++ b/automation/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+
+import (
+	"crypto/hmac"
+	"hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+	prf := hmac.New(h, password)
+	hashLen := prf.Size()
+	numBlocks := (keyLen + hashLen - 1) / hashLen
+
+	var buf [4]byte
+	dk := make([]byte, 0, numBlocks*hashLen)
+	U := make([]byte, hashLen)
+	for block := 1; block <= numBlocks; block++ {
+		// N.B.: || means concatenation, ^ means XOR
+		// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+		// U_1 = PRF(password, salt || uint(i))
+		prf.Reset()
+		prf.Write(salt)
+		buf[0] = byte(block >> 24)
+		buf[1] = byte(block >> 16)
+		buf[2] = byte(block >> 8)
+		buf[3] = byte(block)
+		prf.Write(buf[:4])
+		dk = prf.Sum(dk)
+		T := dk[len(dk)-hashLen:]
+		copy(U, T)
+
+		// U_n = PRF(password, U_(n-1))
+		for n := 2; n <= iter; n++ {
+			prf.Reset()
+			prf.Write(U)
+			U = U[:0]
+			U = prf.Sum(U)
+			for x := range U {
+				T[x] ^= U[x]
+			}
+		}
+	}
+	return dk[:keyLen]
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/LICENSE b/automation/vendor/gopkg.in/juju/names.v2/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/gopkg.in/juju/names.v2/README.md b/automation/vendor/gopkg.in/juju/names.v2/README.md
new file mode 100644
index 0000000..ecacdc7
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/README.md
@@ -0,0 +1,4 @@
+juju/names
+============
+
+This package provides helpers for handling Juju entity names.
diff --git a/automation/vendor/gopkg.in/juju/names.v2/action.go b/automation/vendor/gopkg.in/juju/names.v2/action.go
new file mode 100644
index 0000000..3211881
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/action.go
@@ -0,0 +1,79 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+	"github.com/juju/utils"
+)
+
+const ActionTagKind = "action"
+
+type ActionTag struct {
+	// Tags that are serialized need to have fields exported.
+	ID utils.UUID
+}
+
+// NewActionTag returns the tag of an action with the given id (UUID).
+func NewActionTag(id string) ActionTag {
+	uuid, err := utils.UUIDFromString(id)
+	if err != nil {
+		panic(err)
+	}
+	return ActionTag{ID: uuid}
+}
+
+// ParseActionTag parses an action tag string.
+func ParseActionTag(actionTag string) (ActionTag, error) {
+	tag, err := ParseTag(actionTag)
+	if err != nil {
+		return ActionTag{}, err
+	}
+	at, ok := tag.(ActionTag)
+	if !ok {
+		return ActionTag{}, invalidTagError(actionTag, ActionTagKind)
+	}
+	return at, nil
+}
+
+func (t ActionTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ActionTag) Kind() string   { return ActionTagKind }
+func (t ActionTag) Id() string     { return t.ID.String() }
+
+// IsValidAction returns whether id is a valid action id (UUID).
+func IsValidAction(id string) bool {
+	return utils.IsValidUUIDString(id)
+}
+
+// ActionReceiverTag returns an ActionReceiver Tag from a
+// machine or unit name.
+func ActionReceiverTag(name string) (Tag, error) {
+	if IsValidUnit(name) {
+		return NewUnitTag(name), nil
+	}
+	if IsValidApplication(name) {
+		// TODO(jcw4) enable when leader elections complete
+		//return NewApplicationTag(name), nil
+	}
+	if IsValidMachine(name) {
+		return NewMachineTag(name), nil
+	}
+	return nil, fmt.Errorf("invalid actionreceiver name %q", name)
+}
+
+// ActionReceiverFrom Tag returns an ActionReceiver tag from
+// a machine or unit tag.
+func ActionReceiverFromTag(tag string) (Tag, error) {
+	unitTag, err := ParseUnitTag(tag)
+	if err == nil {
+		return unitTag, nil
+	}
+	machineTag, err := ParseMachineTag(tag)
+	if err == nil {
+		return machineTag, nil
+	}
+	return nil, errors.Errorf("invalid actionreceiver tag %q", tag)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/application.go b/automation/vendor/gopkg.in/juju/names.v2/application.go
new file mode 100644
index 0000000..aa8985e
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/application.go
@@ -0,0 +1,48 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+const ApplicationTagKind = "application"
+
+const (
+	ApplicationSnippet = "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)"
+	NumberSnippet      = "(?:0|[1-9][0-9]*)"
+)
+
+var validApplication = regexp.MustCompile("^" + ApplicationSnippet + "$")
+
+// IsValidApplication returns whether name is a valid application name.
+func IsValidApplication(name string) bool {
+	return validApplication.MatchString(name)
+}
+
+type ApplicationTag struct {
+	Name string
+}
+
+func (t ApplicationTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ApplicationTag) Kind() string   { return ApplicationTagKind }
+func (t ApplicationTag) Id() string     { return t.Name }
+
+// NewApplicationTag returns the tag for the application with the given name.
+func NewApplicationTag(applicationName string) ApplicationTag {
+	return ApplicationTag{Name: applicationName}
+}
+
+// ParseApplicationTag parses a application tag string.
+func ParseApplicationTag(applicationTag string) (ApplicationTag, error) {
+	tag, err := ParseTag(applicationTag)
+	if err != nil {
+		return ApplicationTag{}, err
+	}
+	st, ok := tag.(ApplicationTag)
+	if !ok {
+		return ApplicationTag{}, invalidTagError(applicationTag, ApplicationTagKind)
+	}
+	return st, nil
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/charm.go b/automation/vendor/gopkg.in/juju/names.v2/charm.go
new file mode 100644
index 0000000..3984426
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/charm.go
@@ -0,0 +1,105 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+// CharmTagKind specifies charm tag kind
+const CharmTagKind = "charm"
+
+// Valid charm url can be either in V1 or V3 format. (V2 is a
+// charmstore web URL like https://jujucharms.com/postgresql/105, but
+// that's not valid as a tag.)
+//
+// V1 is of the form:
+// schema:~user/series/name-revision
+// where
+//     schema    is optional and can be either "local" or "cs".
+//               When not supplied, "cs" is implied.
+//     user      is optional and is only applicable for "cs" schema
+//     series    is optional and is a valid series name
+//     name      is mandatory and is the name of the charm
+//     revision  is optional and can be -1 if revision is unset
+//
+// V3 is of the form
+// schema:user/name/series/revision
+// with the same fields and constraints as the V1 format.
+
+var (
+	// SeriesSnippet is a regular expression representing series
+	SeriesSnippet = "[a-z]+([a-z0-9]+)?"
+
+	// CharmNameSnippet is a regular expression representing charm name
+	CharmNameSnippet = "[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*"
+
+	localSchemaSnippet        = "local:"
+	v1CharmStoreSchemaSnippet = "cs:(~" + validUserNameSnippet + "/)?"
+	revisionSnippet           = "(-1|0|[1-9][0-9]*)"
+
+	validV1CharmRegEx = regexp.MustCompile("^(" +
+		localSchemaSnippet + "|" +
+		v1CharmStoreSchemaSnippet + ")?(" +
+		SeriesSnippet + "/)?" +
+		CharmNameSnippet + "(-" +
+		revisionSnippet + ")?$")
+
+	v3CharmStoreSchemaSnippet = "(cs:)?(" + validUserNameSnippet + "/)?"
+
+	validV3CharmRegEx = regexp.MustCompile("^(" +
+		localSchemaSnippet + "|" +
+		v3CharmStoreSchemaSnippet + ")" +
+		CharmNameSnippet + "(/" +
+		SeriesSnippet + ")?(/" +
+		revisionSnippet + ")?$")
+)
+
+// CharmTag represents tag for charm
+// using charm's URL
+type CharmTag struct {
+	url string
+}
+
+// String satisfies Tag interface.
+// Produces string representation of charm tag.
+func (t CharmTag) String() string { return t.Kind() + "-" + t.Id() }
+
+// Kind satisfies Tag interface.
+// Returns Charm tag kind.
+func (t CharmTag) Kind() string { return CharmTagKind }
+
+// Id satisfies Tag interface.
+// Returns charm URL.
+func (t CharmTag) Id() string { return t.url }
+
+// NewCharmTag returns the tag for the charm with the given url.
+// It will panic if the given charm url is not valid.
+func NewCharmTag(charmURL string) CharmTag {
+	if !IsValidCharm(charmURL) {
+		panic(fmt.Sprintf("%q is not a valid charm name", charmURL))
+	}
+	return CharmTag{url: charmURL}
+}
+
+var emptyTag = CharmTag{}
+
+// ParseCharmTag parses a charm tag string.
+func ParseCharmTag(charmTag string) (CharmTag, error) {
+	tag, err := ParseTag(charmTag)
+	if err != nil {
+		return emptyTag, err
+	}
+	ct, ok := tag.(CharmTag)
+	if !ok {
+		return emptyTag, invalidTagError(charmTag, CharmTagKind)
+	}
+	return ct, nil
+}
+
+// IsValidCharm returns whether name is a valid charm url.
+func IsValidCharm(url string) bool {
+	return validV1CharmRegEx.MatchString(url) || validV3CharmRegEx.MatchString(url)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/cloud.go b/automation/vendor/gopkg.in/juju/names.v2/cloud.go
new file mode 100644
index 0000000..78a3c8b
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/cloud.go
@@ -0,0 +1,51 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const CloudTagKind = "cloud"
+
+var (
+	cloudSnippet = "[a-zA-Z0-9][a-zA-Z0-9.-]*"
+	validCloud   = regexp.MustCompile("^" + cloudSnippet + "$")
+)
+
+type CloudTag struct {
+	id string
+}
+
+func (t CloudTag) String() string { return t.Kind() + "-" + t.id }
+func (t CloudTag) Kind() string   { return CloudTagKind }
+func (t CloudTag) Id() string     { return t.id }
+
+// NewCloudTag returns the tag for the cloud with the given ID.
+// It will panic if the given cloud ID is not valid.
+func NewCloudTag(id string) CloudTag {
+	if !IsValidCloud(id) {
+		panic(fmt.Sprintf("%q is not a valid cloud ID", id))
+	}
+	return CloudTag{id}
+}
+
+// ParseCloudTag parses a cloud tag string.
+func ParseCloudTag(cloudTag string) (CloudTag, error) {
+	tag, err := ParseTag(cloudTag)
+	if err != nil {
+		return CloudTag{}, err
+	}
+	dt, ok := tag.(CloudTag)
+	if !ok {
+		return CloudTag{}, invalidTagError(cloudTag, CloudTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidCloud returns whether id is a valid cloud ID.
+func IsValidCloud(id string) bool {
+	return validCloud.MatchString(id)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/cloudcredential.go b/automation/vendor/gopkg.in/juju/names.v2/cloudcredential.go
new file mode 100644
index 0000000..1d7252d
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/cloudcredential.go
@@ -0,0 +1,120 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+const CloudCredentialTagKind = "cloudcred"
+
+var (
+	cloudCredentialNameSnippet = "[a-zA-Z][a-zA-Z0-9.@_-]*"
+	validCloudCredentialName   = regexp.MustCompile("^" + cloudCredentialNameSnippet + "$")
+	validCloudCredential       = regexp.MustCompile(
+		"^" +
+			"(" + cloudSnippet + ")" +
+			"/(" + validUserSnippet + ")" + // credential owner
+			"/(" + cloudCredentialNameSnippet + ")" +
+			"$",
+	)
+)
+
+type CloudCredentialTag struct {
+	cloud CloudTag
+	owner UserTag
+	name  string
+}
+
+// IsZero reports whether t is zero.
+func (t CloudCredentialTag) IsZero() bool {
+	return t == CloudCredentialTag{}
+}
+
+// Kind is part of the Tag interface.
+func (t CloudCredentialTag) Kind() string { return CloudCredentialTagKind }
+
+// Id implements Tag.Id. It returns the empty string
+// if t is zero.
+func (t CloudCredentialTag) Id() string {
+	if t.IsZero() {
+		return ""
+	}
+	return fmt.Sprintf("%s/%s/%s", t.cloud.Id(), t.owner.Id(), t.name)
+}
+
+func quoteCredentialSeparator(in string) string {
+	return strings.Replace(in, "_", `%5f`, -1)
+}
+
+// String implements Tag.String. It returns the empty
+// string if t is zero.
+func (t CloudCredentialTag) String() string {
+	if t.IsZero() {
+		return ""
+	}
+	return fmt.Sprintf("%s-%s_%s_%s", t.Kind(),
+		quoteCredentialSeparator(t.cloud.Id()),
+		quoteCredentialSeparator(t.owner.Id()),
+		quoteCredentialSeparator(t.name))
+}
+
+// Cloud returns the tag of the cloud to which the credential pertains.
+func (t CloudCredentialTag) Cloud() CloudTag {
+	return t.cloud
+}
+
+// Owner returns the tag of the user that owns the credential.
+func (t CloudCredentialTag) Owner() UserTag {
+	return t.owner
+}
+
+// Name returns the cloud credential name, excluding the
+// cloud and owner qualifiers.
+func (t CloudCredentialTag) Name() string {
+	return t.name
+}
+
+// NewCloudCredentialTag returns the tag for the cloud with the given ID.
+// It will panic if the given cloud ID is not valid.
+func NewCloudCredentialTag(id string) CloudCredentialTag {
+	parts := validCloudCredential.FindStringSubmatch(id)
+	if len(parts) != 4 {
+		panic(fmt.Sprintf("%q is not a valid cloud credential ID", id))
+	}
+	cloud := NewCloudTag(parts[1])
+	owner := NewUserTag(parts[2])
+	return CloudCredentialTag{cloud, owner, parts[3]}
+}
+
+// ParseCloudCredentialTag parses a cloud tag string.
+func ParseCloudCredentialTag(s string) (CloudCredentialTag, error) {
+	tag, err := ParseTag(s)
+	if err != nil {
+		return CloudCredentialTag{}, err
+	}
+	dt, ok := tag.(CloudCredentialTag)
+	if !ok {
+		return CloudCredentialTag{}, invalidTagError(s, CloudCredentialTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidCloudCredential returns whether id is a valid cloud credential ID.
+func IsValidCloudCredential(id string) bool {
+	return validCloudCredential.MatchString(id)
+}
+
+// IsValidCloudCredentialName returns whether name is a valid cloud credential name.
+func IsValidCloudCredentialName(name string) bool {
+	return validCloudCredentialName.MatchString(name)
+}
+
+func cloudCredentialTagSuffixToId(s string) (string, error) {
+	s = strings.Replace(s, "_", "/", -1)
+	return url.QueryUnescape(s)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/controller.go b/automation/vendor/gopkg.in/juju/names.v2/controller.go
new file mode 100644
index 0000000..c48c326
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/controller.go
@@ -0,0 +1,56 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+// ControllerTagKind indicates that a tag belongs to a controller.
+const ControllerTagKind = "controller"
+
+// ControllerTag represents a tag used to describe a controller.
+type ControllerTag struct {
+	uuid string
+}
+
+// Lowercase letters, digits and (non-leading) hyphens.
+var validControllerName = regexp.MustCompile(`^[a-z0-9]+[a-z0-9-]*$`)
+
+// NewControllerTag returns the tag of an controller with the given controller UUID.
+func NewControllerTag(uuid string) ControllerTag {
+	return ControllerTag{uuid: uuid}
+}
+
+// ParseControllerTag parses an environ tag string.
+func ParseControllerTag(controllerTag string) (ControllerTag, error) {
+	tag, err := ParseTag(controllerTag)
+	if err != nil {
+		return ControllerTag{}, err
+	}
+	et, ok := tag.(ControllerTag)
+	if !ok {
+		return ControllerTag{}, invalidTagError(controllerTag, ControllerTagKind)
+	}
+	return et, nil
+}
+
+// String implements Tag.
+func (t ControllerTag) String() string { return t.Kind() + "-" + t.Id() }
+
+// Kind implements Tag.
+func (t ControllerTag) Kind() string { return ControllerTagKind }
+
+// Id implements Tag.
+func (t ControllerTag) Id() string { return t.uuid }
+
+// IsValidController returns whether id is a valid controller UUID.
+func IsValidController(id string) bool {
+	return validUUID.MatchString(id)
+}
+
+// IsValidControllerName returns whether name is a valid string safe for a controller name.
+func IsValidControllerName(name string) bool {
+	return validControllerName.MatchString(name)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/environ.go b/automation/vendor/gopkg.in/juju/names.v2/environ.go
new file mode 100644
index 0000000..db02d00
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/environ.go
@@ -0,0 +1,38 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+// EnvironTagKind is DEPRECATED: model tags are used instead.
+const EnvironTagKind = "environment"
+
+type EnvironTag struct {
+	uuid string
+}
+
+// NewEnvironTag returns the tag of an environment with the given environment UUID.
+func NewEnvironTag(uuid string) EnvironTag {
+	return EnvironTag{uuid: uuid}
+}
+
+// ParseEnvironTag parses an environ tag string.
+func ParseEnvironTag(environTag string) (EnvironTag, error) {
+	tag, err := ParseTag(environTag)
+	if err != nil {
+		return EnvironTag{}, err
+	}
+	et, ok := tag.(EnvironTag)
+	if !ok {
+		return EnvironTag{}, invalidTagError(environTag, EnvironTagKind)
+	}
+	return et, nil
+}
+
+func (t EnvironTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t EnvironTag) Kind() string   { return EnvironTagKind }
+func (t EnvironTag) Id() string     { return t.uuid }
+
+// IsValidEnvironment returns whether id is a valid environment UUID.
+func IsValidEnvironment(id string) bool {
+	return validUUID.MatchString(id)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/filesystem.go b/automation/vendor/gopkg.in/juju/names.v2/filesystem.go
new file mode 100644
index 0000000..44ecc96
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/filesystem.go
@@ -0,0 +1,76 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const FilesystemTagKind = "filesystem"
+
+// Filesystems may be bound to a machine, meaning that the filesystem cannot
+// exist without that machine. We encode this in the tag.
+var validFilesystem = regexp.MustCompile("^(" + MachineSnippet + "/)?" + NumberSnippet + "$")
+
+type FilesystemTag struct {
+	id string
+}
+
+func (t FilesystemTag) String() string { return t.Kind() + "-" + t.id }
+func (t FilesystemTag) Kind() string   { return FilesystemTagKind }
+func (t FilesystemTag) Id() string     { return filesystemTagSuffixToId(t.id) }
+
+// NewFilesystemTag returns the tag for the filesystem with the given name.
+// It will panic if the given filesystem name is not valid.
+func NewFilesystemTag(id string) FilesystemTag {
+	tag, ok := tagFromFilesystemId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid filesystem id", id))
+	}
+	return tag
+}
+
+// ParseFilesystemTag parses a filesystem tag string.
+func ParseFilesystemTag(filesystemTag string) (FilesystemTag, error) {
+	tag, err := ParseTag(filesystemTag)
+	if err != nil {
+		return FilesystemTag{}, err
+	}
+	fstag, ok := tag.(FilesystemTag)
+	if !ok {
+		return FilesystemTag{}, invalidTagError(filesystemTag, FilesystemTagKind)
+	}
+	return fstag, nil
+}
+
+// IsValidFilesystem returns whether id is a valid filesystem id.
+func IsValidFilesystem(id string) bool {
+	return validFilesystem.MatchString(id)
+}
+
+// FilesystemMachine returns the machine component of the filesystem
+// tag, and a boolean indicating whether or not there is a
+// machine component.
+func FilesystemMachine(tag FilesystemTag) (MachineTag, bool) {
+	id := tag.Id()
+	pos := strings.LastIndex(id, "/")
+	if pos == -1 {
+		return MachineTag{}, false
+	}
+	return NewMachineTag(id[:pos]), true
+}
+
+func tagFromFilesystemId(id string) (FilesystemTag, bool) {
+	if !IsValidFilesystem(id) {
+		return FilesystemTag{}, false
+	}
+	id = strings.Replace(id, "/", "-", -1)
+	return FilesystemTag{id}, true
+}
+
+func filesystemTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/ipaddress.go b/automation/vendor/gopkg.in/juju/names.v2/ipaddress.go
new file mode 100644
index 0000000..b92a389
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/ipaddress.go
@@ -0,0 +1,46 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"github.com/juju/utils"
+)
+
+const IPAddressTagKind = "ipaddress"
+
+// IsValidIPAddress returns whether id is a valid IP address ID.
+// Here it simply is checked if it is a valid UUID.
+func IsValidIPAddress(id string) bool {
+	return utils.IsValidUUIDString(id)
+}
+
+type IPAddressTag struct {
+	id utils.UUID
+}
+
+func (t IPAddressTag) String() string { return t.Kind() + "-" + t.id.String() }
+func (t IPAddressTag) Kind() string   { return IPAddressTagKind }
+func (t IPAddressTag) Id() string     { return t.id.String() }
+
+// NewIPAddressTag returns the tag for the IP address with the given ID (UUID).
+func NewIPAddressTag(id string) IPAddressTag {
+	uuid, err := utils.UUIDFromString(id)
+	if err != nil {
+		panic(err)
+	}
+	return IPAddressTag{id: uuid}
+}
+
+// ParseIPAddressTag parses an IP address tag string.
+func ParseIPAddressTag(ipAddressTag string) (IPAddressTag, error) {
+	tag, err := ParseTag(ipAddressTag)
+	if err != nil {
+		return IPAddressTag{}, err
+	}
+	ipat, ok := tag.(IPAddressTag)
+	if !ok {
+		return IPAddressTag{}, invalidTagError(ipAddressTag, IPAddressTagKind)
+	}
+	return ipat, nil
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/machine.go b/automation/vendor/gopkg.in/juju/names.v2/machine.go
new file mode 100644
index 0000000..867aa72
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/machine.go
@@ -0,0 +1,60 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+	"strings"
+)
+
+const MachineTagKind = "machine"
+
+const (
+	ContainerTypeSnippet = "[a-z]+"
+	ContainerSnippet     = "/" + ContainerTypeSnippet + "/" + NumberSnippet + ""
+	MachineSnippet       = NumberSnippet + "(?:" + ContainerSnippet + ")*"
+)
+
+var validMachine = regexp.MustCompile("^" + MachineSnippet + "$")
+
+// IsValidMachine returns whether id is a valid machine id.
+func IsValidMachine(id string) bool {
+	return validMachine.MatchString(id)
+}
+
+// IsContainerMachine returns whether id is a valid container machine id.
+func IsContainerMachine(id string) bool {
+	return validMachine.MatchString(id) && strings.Contains(id, "/")
+}
+
+type MachineTag struct {
+	id string
+}
+
+func (t MachineTag) String() string { return t.Kind() + "-" + t.id }
+func (t MachineTag) Kind() string   { return MachineTagKind }
+func (t MachineTag) Id() string     { return machineTagSuffixToId(t.id) }
+
+// NewMachineTag returns the tag for the machine with the given id.
+func NewMachineTag(id string) MachineTag {
+	id = strings.Replace(id, "/", "-", -1)
+	return MachineTag{id: id}
+}
+
+// ParseMachineTag parses a machine tag string.
+func ParseMachineTag(machineTag string) (MachineTag, error) {
+	tag, err := ParseTag(machineTag)
+	if err != nil {
+		return MachineTag{}, err
+	}
+	mt, ok := tag.(MachineTag)
+	if !ok {
+		return MachineTag{}, invalidTagError(machineTag, MachineTagKind)
+	}
+	return mt, nil
+}
+
+func machineTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/model.go b/automation/vendor/gopkg.in/juju/names.v2/model.go
new file mode 100644
index 0000000..60e0db2
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/model.go
@@ -0,0 +1,52 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+const ModelTagKind = "model"
+
+// ModelTag represents a tag used to describe a model.
+type ModelTag struct {
+	uuid string
+}
+
+var validUUID = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`)
+
+// Lowercase letters, digits and (non-leading) hyphens, as per LP:1568944 #5.
+var validModelName = regexp.MustCompile(`^[a-z0-9]+[a-z0-9-]*$`)
+
+// NewModelTag returns the tag of an model with the given model UUID.
+func NewModelTag(uuid string) ModelTag {
+	return ModelTag{uuid: uuid}
+}
+
+// ParseModelTag parses an environ tag string.
+func ParseModelTag(modelTag string) (ModelTag, error) {
+	tag, err := ParseTag(modelTag)
+	if err != nil {
+		return ModelTag{}, err
+	}
+	et, ok := tag.(ModelTag)
+	if !ok {
+		return ModelTag{}, invalidTagError(modelTag, ModelTagKind)
+	}
+	return et, nil
+}
+
+func (t ModelTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ModelTag) Kind() string   { return ModelTagKind }
+func (t ModelTag) Id() string     { return t.uuid }
+
+// IsValidModel returns whether id is a valid model UUID.
+func IsValidModel(id string) bool {
+	return validUUID.MatchString(id)
+}
+
+// IsValidModelName returns whether name is a valid string safe for a model name.
+func IsValidModelName(name string) bool {
+	return validModelName.MatchString(name)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/payload.go b/automation/vendor/gopkg.in/juju/names.v2/payload.go
new file mode 100644
index 0000000..c911ffd
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/payload.go
@@ -0,0 +1,74 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+
+	"github.com/juju/utils"
+)
+
+const (
+	// PayloadTagKind is used as the prefix for the string
+	// representation of payload tags.
+	PayloadTagKind = "payload"
+
+	// This can be expanded later, as needed.
+	payloadClass = "([a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)"
+)
+
+var validPayload = regexp.MustCompile("^" + payloadClass + "$")
+
+// IsValidPayload returns whether id is a valid Juju ID for
+// a charm payload. The ID must be a valid alpha-numeric (plus hyphens).
+func IsValidPayload(id string) bool {
+	return validPayload.MatchString(id)
+}
+
+// For compatibility with Juju 1.25, UUIDs are also supported.
+func isValidPayload(id string) bool {
+	return IsValidPayload(id) || utils.IsValidUUIDString(id)
+}
+
+// PayloadTag represents a charm payload.
+type PayloadTag struct {
+	id string
+}
+
+// NewPayloadTag returns the tag for a charm's payload with the given id.
+func NewPayloadTag(id string) PayloadTag {
+	return PayloadTag{
+		id: id,
+	}
+}
+
+// ParsePayloadTag parses a payload tag string.
+// So ParsePayloadTag(tag.String()) === tag.
+func ParsePayloadTag(tag string) (PayloadTag, error) {
+	t, err := ParseTag(tag)
+	if err != nil {
+		return PayloadTag{}, err
+	}
+	pt, ok := t.(PayloadTag)
+	if !ok {
+		return PayloadTag{}, invalidTagError(tag, PayloadTagKind)
+	}
+	return pt, nil
+}
+
+// Kind implements Tag.
+func (t PayloadTag) Kind() string {
+	return PayloadTagKind
+}
+
+// Id implements Tag.Id. It always returns the same ID with which
+// it was created. So NewPayloadTag(x).Id() == x for all valid x.
+func (t PayloadTag) Id() string {
+	return t.id
+}
+
+// String implements Tag.
+func (t PayloadTag) String() string {
+	return tagString(t)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/relation.go b/automation/vendor/gopkg.in/juju/names.v2/relation.go
new file mode 100644
index 0000000..b61696c
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/relation.go
@@ -0,0 +1,67 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const RelationTagKind = "relation"
+
+const RelationSnippet = "[a-z][a-z0-9]*(?:[_-][a-z0-9]+)*"
+
+// Relation keys have the format "application1:relName1 application2:relName2".
+// Except the peer relations, which have the format "application:relName"
+// Relation tags have the format "relation-application1.rel1#application2.rel2".
+// For peer relations, the format is "relation-application.rel"
+
+var (
+	validRelation     = regexp.MustCompile("^" + ApplicationSnippet + ":" + RelationSnippet + " " + ApplicationSnippet + ":" + RelationSnippet + "$")
+	validPeerRelation = regexp.MustCompile("^" + ApplicationSnippet + ":" + RelationSnippet + "$")
+)
+
+// IsValidRelation returns whether key is a valid relation key.
+func IsValidRelation(key string) bool {
+	return validRelation.MatchString(key) || validPeerRelation.MatchString(key)
+}
+
+type RelationTag struct {
+	key string
+}
+
+func (t RelationTag) String() string { return t.Kind() + "-" + t.key }
+func (t RelationTag) Kind() string   { return RelationTagKind }
+func (t RelationTag) Id() string     { return relationTagSuffixToKey(t.key) }
+
+// NewRelationTag returns the tag for the relation with the given key.
+func NewRelationTag(relationKey string) RelationTag {
+	if !IsValidRelation(relationKey) {
+		panic(fmt.Sprintf("%q is not a valid relation key", relationKey))
+	}
+	// Replace both ":" with "." and the " " with "#".
+	relationKey = strings.Replace(relationKey, ":", ".", 2)
+	relationKey = strings.Replace(relationKey, " ", "#", 1)
+	return RelationTag{key: relationKey}
+}
+
+// ParseRelationTag parses a relation tag string.
+func ParseRelationTag(relationTag string) (RelationTag, error) {
+	tag, err := ParseTag(relationTag)
+	if err != nil {
+		return RelationTag{}, err
+	}
+	rt, ok := tag.(RelationTag)
+	if !ok {
+		return RelationTag{}, invalidTagError(relationTag, RelationTagKind)
+	}
+	return rt, nil
+}
+
+func relationTagSuffixToKey(s string) string {
+	// Replace both "." with ":" and the "#" with " ".
+	s = strings.Replace(s, ".", ":", 2)
+	return strings.Replace(s, "#", " ", 1)
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/space.go b/automation/vendor/gopkg.in/juju/names.v2/space.go
new file mode 100644
index 0000000..049e1c7
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/space.go
@@ -0,0 +1,50 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const (
+	SpaceTagKind = "space"
+	SpaceSnippet = "(?:[a-z0-9]+(?:-[a-z0-9]+)*)"
+)
+
+var validSpace = regexp.MustCompile("^" + SpaceSnippet + "$")
+
+// IsValidSpace reports whether name is a valid space name.
+func IsValidSpace(name string) bool {
+	return validSpace.MatchString(name)
+}
+
+type SpaceTag struct {
+	name string
+}
+
+func (t SpaceTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t SpaceTag) Kind() string   { return SpaceTagKind }
+func (t SpaceTag) Id() string     { return t.name }
+
+// NewSpaceTag returns the tag of a space with the given name.
+func NewSpaceTag(name string) SpaceTag {
+	if !IsValidSpace(name) {
+		panic(fmt.Sprintf("%q is not a valid space name", name))
+	}
+	return SpaceTag{name: name}
+}
+
+// ParseSpaceTag parses a space tag string.
+func ParseSpaceTag(spaceTag string) (SpaceTag, error) {
+	tag, err := ParseTag(spaceTag)
+	if err != nil {
+		return SpaceTag{}, err
+	}
+	nt, ok := tag.(SpaceTag)
+	if !ok {
+		return SpaceTag{}, invalidTagError(spaceTag, SpaceTagKind)
+	}
+	return nt, nil
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/storage.go b/automation/vendor/gopkg.in/juju/names.v2/storage.go
new file mode 100644
index 0000000..2314f87
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/storage.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const (
+	StorageTagKind = "storage"
+
+	// StorageNameSnippet is the regular expression that describes valid
+	// storage names (without the storage instance sequence number).
+	StorageNameSnippet = "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)"
+)
+
+var validStorage = regexp.MustCompile("^(" + StorageNameSnippet + ")/" + NumberSnippet + "$")
+
+type StorageTag struct {
+	id string
+}
+
+func (t StorageTag) String() string { return t.Kind() + "-" + t.id }
+func (t StorageTag) Kind() string   { return StorageTagKind }
+func (t StorageTag) Id() string     { return storageTagSuffixToId(t.id) }
+
+// NewStorageTag returns the tag for the storage instance with the given ID.
+// It will panic if the given string is not a valid storage instance Id.
+func NewStorageTag(id string) StorageTag {
+	tag, ok := tagFromStorageId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid storage instance ID", id))
+	}
+	return tag
+}
+
+// ParseStorageTag parses a storage tag string.
+func ParseStorageTag(s string) (StorageTag, error) {
+	tag, err := ParseTag(s)
+	if err != nil {
+		return StorageTag{}, err
+	}
+	st, ok := tag.(StorageTag)
+	if !ok {
+		return StorageTag{}, invalidTagError(s, StorageTagKind)
+	}
+	return st, nil
+}
+
+// IsValidStorage returns whether id is a valid storage instance ID.
+func IsValidStorage(id string) bool {
+	return validStorage.MatchString(id)
+}
+
+// StorageName returns the storage name from a storage instance ID.
+// StorageName returns an error if "id" is not a valid storage
+// instance ID.
+func StorageName(id string) (string, error) {
+	s := validStorage.FindStringSubmatch(id)
+	if s == nil {
+		return "", fmt.Errorf("%q is not a valid storage instance ID", id)
+	}
+	return s[1], nil
+}
+
+func tagFromStorageId(id string) (StorageTag, bool) {
+	// replace only the last "/" with "-".
+	i := strings.LastIndex(id, "/")
+	if i <= 0 || !IsValidStorage(id) {
+		return StorageTag{}, false
+	}
+	id = id[:i] + "-" + id[i+1:]
+	return StorageTag{id}, true
+}
+
+func storageTagSuffixToId(s string) string {
+	// Replace only the last "-" with "/", as it is valid for storage
+	// names to contain hyphens.
+	if i := strings.LastIndex(s, "-"); i > 0 {
+		s = s[:i] + "/" + s[i+1:]
+	}
+	return s
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/subnet.go b/automation/vendor/gopkg.in/juju/names.v2/subnet.go
new file mode 100644
index 0000000..281eb0f
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/subnet.go
@@ -0,0 +1,49 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"net"
+)
+
+const SubnetTagKind = "subnet"
+
+// IsValidSubnet returns whether cidr is a valid subnet CIDR.
+func IsValidSubnet(cidr string) bool {
+	_, ipNet, err := net.ParseCIDR(cidr)
+	if err == nil && ipNet.String() == cidr {
+		return true
+	}
+	return false
+}
+
+type SubnetTag struct {
+	cidr string
+}
+
+func (t SubnetTag) String() string { return t.Kind() + "-" + t.cidr }
+func (t SubnetTag) Kind() string   { return SubnetTagKind }
+func (t SubnetTag) Id() string     { return t.cidr }
+
+// NewSubnetTag returns the tag for subnet with the given CIDR.
+func NewSubnetTag(cidr string) SubnetTag {
+	if !IsValidSubnet(cidr) {
+		panic(fmt.Sprintf("%s is not a valid subnet CIDR", cidr))
+	}
+	return SubnetTag{cidr: cidr}
+}
+
+// ParseSubnetTag parses a subnet tag string.
+func ParseSubnetTag(subnetTag string) (SubnetTag, error) {
+	tag, err := ParseTag(subnetTag)
+	if err != nil {
+		return SubnetTag{}, err
+	}
+	subt, ok := tag.(SubnetTag)
+	if !ok {
+		return SubnetTag{}, invalidTagError(subnetTag, SubnetTagKind)
+	}
+	return subt, nil
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/tag.go b/automation/vendor/gopkg.in/juju/names.v2/tag.go
new file mode 100644
index 0000000..9feae00
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/tag.go
@@ -0,0 +1,216 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/utils"
+)
+
+// A Tag tags things that are taggable. Its purpose is to uniquely
+// identify some resource and provide a consistent representation of
+// that identity in both a human-readable and a machine-friendly format.
+// The latter benefits use of the tag in over-the-wire transmission
+// (e.g. in HTTP RPC calls) and in filename paths. The human-readable
+// tag "name" is available through the Id method. The machine-friendly
+// representation is provided by the String method.
+//
+// The ParseTag function may be used to build a tag from the machine-
+// formatted string. As well each kind of tag has its own Parse* method.
+// Each kind also has a New* method (e.g. NewMachineTag) which produces
+// a tag from the human-readable tag "ID".
+//
+// In the context of juju, the API *must* use tags to represent the
+// various juju entities. This contrasts with user-facing code, where
+// tags *must not* be used. Internal to juju the use of tags is a
+// judgement call based on the situation.
+type Tag interface {
+	// Kind returns the kind of the tag.
+	// This method is for legacy compatibility, callers should
+	// use equality or type assertions to verify the Kind, or type
+	// of a Tag.
+	Kind() string
+
+	// Id returns an identifier for this Tag.
+	// The contents and format of the identifier are specific
+	// to the implementer of the Tag.
+	Id() string
+
+	fmt.Stringer // all Tags should be able to print themselves
+}
+
+// tagString returns the canonical string representation of a tag.
+// It round-trips with splitTag().
+func tagString(tag Tag) string {
+	return tag.Kind() + "-" + tag.Id()
+}
+
+// TagKind returns one of the *TagKind constants for the given tag, or
+// an error if none matches.
+func TagKind(tag string) (string, error) {
+	i := strings.Index(tag, "-")
+	if i <= 0 || !validKinds(tag[:i]) {
+		return "", fmt.Errorf("%q is not a valid tag", tag)
+	}
+	return tag[:i], nil
+}
+
+func validKinds(kind string) bool {
+	switch kind {
+	case UnitTagKind, MachineTagKind, ApplicationTagKind, EnvironTagKind, UserTagKind,
+		RelationTagKind, ActionTagKind, VolumeTagKind, CharmTagKind, StorageTagKind,
+		FilesystemTagKind, IPAddressTagKind, SpaceTagKind, SubnetTagKind,
+		PayloadTagKind, ModelTagKind, ControllerTagKind, CloudTagKind, CloudCredentialTagKind:
+		return true
+	}
+	return false
+}
+
+func splitTag(tag string) (string, string, error) {
+	kind, err := TagKind(tag)
+	if err != nil {
+		return "", "", err
+	}
+	return kind, tag[len(kind)+1:], nil
+}
+
+// ParseTag parses a string representation into a Tag.
+func ParseTag(tag string) (Tag, error) {
+	kind, id, err := splitTag(tag)
+	if err != nil {
+		return nil, invalidTagError(tag, "")
+	}
+	switch kind {
+	case UnitTagKind:
+		id = unitTagSuffixToId(id)
+		if !IsValidUnit(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewUnitTag(id), nil
+	case MachineTagKind:
+		id = machineTagSuffixToId(id)
+		if !IsValidMachine(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewMachineTag(id), nil
+	case ApplicationTagKind:
+		if !IsValidApplication(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewApplicationTag(id), nil
+	case UserTagKind:
+		if !IsValidUser(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewUserTag(id), nil
+	case EnvironTagKind:
+		if !IsValidEnvironment(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewEnvironTag(id), nil
+	case ModelTagKind:
+		if !IsValidModel(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewModelTag(id), nil
+	case ControllerTagKind:
+		if !IsValidController(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewControllerTag(id), nil
+
+	case RelationTagKind:
+		id = relationTagSuffixToKey(id)
+		if !IsValidRelation(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewRelationTag(id), nil
+	case ActionTagKind:
+		if !IsValidAction(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewActionTag(id), nil
+	case VolumeTagKind:
+		id = volumeTagSuffixToId(id)
+		if !IsValidVolume(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewVolumeTag(id), nil
+	case CharmTagKind:
+		if !IsValidCharm(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCharmTag(id), nil
+	case StorageTagKind:
+		id = storageTagSuffixToId(id)
+		if !IsValidStorage(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewStorageTag(id), nil
+	case FilesystemTagKind:
+		id = filesystemTagSuffixToId(id)
+		if !IsValidFilesystem(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewFilesystemTag(id), nil
+	case IPAddressTagKind:
+		uuid, err := utils.UUIDFromString(id)
+		if err != nil {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewIPAddressTag(uuid.String()), nil
+	case SubnetTagKind:
+		if !IsValidSubnet(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewSubnetTag(id), nil
+	case SpaceTagKind:
+		if !IsValidSpace(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewSpaceTag(id), nil
+	case PayloadTagKind:
+		if !isValidPayload(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewPayloadTag(id), nil
+	case CloudTagKind:
+		if !IsValidCloud(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCloudTag(id), nil
+	case CloudCredentialTagKind:
+		id, err = cloudCredentialTagSuffixToId(id)
+		if err != nil {
+			return nil, errors.Wrap(err, invalidTagError(tag, kind))
+		}
+		if !IsValidCloudCredential(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCloudCredentialTag(id), nil
+	default:
+		return nil, invalidTagError(tag, "")
+	}
+}
+
+func invalidTagError(tag, kind string) error {
+	if kind != "" {
+		return fmt.Errorf("%q is not a valid %s tag", tag, kind)
+	}
+	return fmt.Errorf("%q is not a valid tag", tag)
+}
+
+// ReadableString returns a human-readable string from the tag passed in.
+// It currently supports unit and machine tags. Support for additional types
+// can be added in as needed.
+func ReadableString(tag Tag) string {
+	if tag == nil {
+		return ""
+	}
+
+	return tag.Kind() + " " + tag.Id()
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/unit.go b/automation/vendor/gopkg.in/juju/names.v2/unit.go
new file mode 100644
index 0000000..3f3a026
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/unit.go
@@ -0,0 +1,79 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const UnitTagKind = "unit"
+
+var validUnit = regexp.MustCompile("^(" + ApplicationSnippet + ")/" + NumberSnippet + "$")
+
+type UnitTag struct {
+	name string
+}
+
+func (t UnitTag) String() string { return t.Kind() + "-" + t.name }
+func (t UnitTag) Kind() string   { return UnitTagKind }
+func (t UnitTag) Id() string     { return unitTagSuffixToId(t.name) }
+
+// NewUnitTag returns the tag for the unit with the given name.
+// It will panic if the given unit name is not valid.
+func NewUnitTag(unitName string) UnitTag {
+	tag, ok := tagFromUnitName(unitName)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid unit name", unitName))
+	}
+	return tag
+}
+
+// ParseUnitTag parses a unit tag string.
+func ParseUnitTag(unitTag string) (UnitTag, error) {
+	tag, err := ParseTag(unitTag)
+	if err != nil {
+		return UnitTag{}, err
+	}
+	ut, ok := tag.(UnitTag)
+	if !ok {
+		return UnitTag{}, invalidTagError(unitTag, UnitTagKind)
+	}
+	return ut, nil
+}
+
+// IsValidUnit returns whether name is a valid unit name.
+func IsValidUnit(name string) bool {
+	return validUnit.MatchString(name)
+}
+
+// UnitApplication returns the name of the application that the unit is
+// associated with. It returns an error if unitName is not a valid unit name.
+func UnitApplication(unitName string) (string, error) {
+	s := validUnit.FindStringSubmatch(unitName)
+	if s == nil {
+		return "", fmt.Errorf("%q is not a valid unit name", unitName)
+	}
+	return s[1], nil
+}
+
+func tagFromUnitName(unitName string) (UnitTag, bool) {
+	// Replace only the last "/" with "-".
+	i := strings.LastIndex(unitName, "/")
+	if i <= 0 || !IsValidUnit(unitName) {
+		return UnitTag{}, false
+	}
+	unitName = unitName[:i] + "-" + unitName[i+1:]
+	return UnitTag{name: unitName}, true
+}
+
+func unitTagSuffixToId(s string) string {
+	// Replace only the last "-" with "/", as it is valid for application
+	// names to contain hyphens.
+	if i := strings.LastIndex(s, "-"); i > 0 {
+		s = s[:i] + "/" + s[i+1:]
+	}
+	return s
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/user.go b/automation/vendor/gopkg.in/juju/names.v2/user.go
new file mode 100644
index 0000000..332ee27
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/user.go
@@ -0,0 +1,130 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const (
+	UserTagKind     = "user"
+	LocalUserDomain = "local"
+)
+
+var (
+	// TODO this does not allow single character usernames or
+	// domains. Is that deliberate?
+	// https://github.com/juju/names/issues/54
+	validUserNameSnippet = "[a-zA-Z0-9][a-zA-Z0-9.+-]*[a-zA-Z0-9]"
+	validUserSnippet     = fmt.Sprintf("(?:%s(?:@%s)?)", validUserNameSnippet, validUserNameSnippet)
+	validName            = regexp.MustCompile(fmt.Sprintf("^(?P<name>%s)(?:@(?P<domain>%s))?$", validUserNameSnippet, validUserNameSnippet))
+	validUserName        = regexp.MustCompile("^" + validUserNameSnippet + "$")
+)
+
+// IsValidUser returns whether id is a valid user id.
+// Valid users may or may not be qualified with an
+// @domain suffix. Examples of valid users include
+// bob, bob@local, bob@somewhere-else, 0-a-f@123.
+func IsValidUser(id string) bool {
+	return validName.MatchString(id)
+}
+
+// IsValidUserName returns whether the given
+// name is a valid name part of a user. That is,
+// usernames with a domain suffix will return
+// false.
+func IsValidUserName(name string) bool {
+	return validUserName.MatchString(name)
+}
+
+// IsValidUserDomain returns whether the given user
+// domain is valid.
+func IsValidUserDomain(domain string) bool {
+	return validUserName.MatchString(domain)
+}
+
+// UserTag represents a user that may be stored locally
+// or associated with some external domain.
+type UserTag struct {
+	name   string
+	domain string
+}
+
+func (t UserTag) Kind() string   { return UserTagKind }
+func (t UserTag) String() string { return UserTagKind + "-" + t.Id() }
+
+// Id implements Tag.Id. Local users will always have
+// an Id value without any domain.
+func (t UserTag) Id() string {
+	if t.domain == "" || t.domain == LocalUserDomain {
+		return t.name
+	}
+	return t.name + "@" + t.domain
+}
+
+// Name returns the name part of the user name
+// without its associated domain.
+func (t UserTag) Name() string { return t.name }
+
+// IsLocal returns true if the tag represents a local user.
+// Users without an explicit domain are considered local.
+func (t UserTag) IsLocal() bool {
+	return t.Domain() == LocalUserDomain || t.Domain() == ""
+}
+
+// Domain returns the user domain. Users in the local database
+// are from the LocalDomain. Other users are considered 'remote' users.
+func (t UserTag) Domain() string {
+	return t.domain
+}
+
+// WithDomain returns a copy of the user tag with the
+// domain changed to the given argument.
+// The domain must satisfy IsValidUserDomain
+// or this function will panic.
+func (t UserTag) WithDomain(domain string) UserTag {
+	if !IsValidUserDomain(domain) {
+		panic(fmt.Sprintf("invalid user domain %q", domain))
+	}
+	return UserTag{
+		name:   t.name,
+		domain: domain,
+	}
+}
+
+// NewUserTag returns the tag for the user with the given name.
+// It panics if the user name does not satisfy IsValidUser.
+func NewUserTag(userName string) UserTag {
+	parts := validName.FindStringSubmatch(userName)
+	if len(parts) != 3 {
+		panic(fmt.Sprintf("invalid user tag %q", userName))
+	}
+	domain := parts[2]
+	if domain == LocalUserDomain {
+		domain = ""
+	}
+	return UserTag{name: parts[1], domain: domain}
+}
+
+// NewLocalUserTag returns the tag for a local user with the given name.
+func NewLocalUserTag(name string) UserTag {
+	if !IsValidUserName(name) {
+		panic(fmt.Sprintf("invalid user name %q", name))
+	}
+	return UserTag{name: name}
+}
+
+// ParseUserTag parses a user tag string.
+func ParseUserTag(tag string) (UserTag, error) {
+	t, err := ParseTag(tag)
+	if err != nil {
+		return UserTag{}, err
+	}
+	ut, ok := t.(UserTag)
+	if !ok {
+		return UserTag{}, invalidTagError(tag, UserTagKind)
+	}
+	return ut, nil
+}
diff --git a/automation/vendor/gopkg.in/juju/names.v2/volume.go b/automation/vendor/gopkg.in/juju/names.v2/volume.go
new file mode 100644
index 0000000..2c1fd1b
--- /dev/null
+++ b/automation/vendor/gopkg.in/juju/names.v2/volume.go
@@ -0,0 +1,76 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const VolumeTagKind = "volume"
+
+// Volumes may be bound to a machine, meaning that the volume cannot
+// exist without that machine. We encode this in the tag to allow
+var validVolume = regexp.MustCompile("^(" + MachineSnippet + "/)?" + NumberSnippet + "$")
+
+type VolumeTag struct {
+	id string
+}
+
+func (t VolumeTag) String() string { return t.Kind() + "-" + t.id }
+func (t VolumeTag) Kind() string   { return VolumeTagKind }
+func (t VolumeTag) Id() string     { return volumeTagSuffixToId(t.id) }
+
+// NewVolumeTag returns the tag for the volume with the given ID.
+// It will panic if the given volume ID is not valid.
+func NewVolumeTag(id string) VolumeTag {
+	tag, ok := tagFromVolumeId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid volume ID", id))
+	}
+	return tag
+}
+
+// ParseVolumeTag parses a volume tag string.
+func ParseVolumeTag(volumeTag string) (VolumeTag, error) {
+	tag, err := ParseTag(volumeTag)
+	if err != nil {
+		return VolumeTag{}, err
+	}
+	dt, ok := tag.(VolumeTag)
+	if !ok {
+		return VolumeTag{}, invalidTagError(volumeTag, VolumeTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidVolume returns whether id is a valid volume ID.
+func IsValidVolume(id string) bool {
+	return validVolume.MatchString(id)
+}
+
+// VolumeMachine returns the machine component of the volume
+// tag, and a boolean indicating whether or not there is a
+// machine component.
+func VolumeMachine(tag VolumeTag) (MachineTag, bool) {
+	id := tag.Id()
+	pos := strings.LastIndex(id, "/")
+	if pos == -1 {
+		return MachineTag{}, false
+	}
+	return NewMachineTag(id[:pos]), true
+}
+
+func tagFromVolumeId(id string) (VolumeTag, bool) {
+	if !IsValidVolume(id) {
+		return VolumeTag{}, false
+	}
+	id = strings.Replace(id, "/", "-", -1)
+	return VolumeTag{id}, true
+}
+
+func volumeTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/LICENSE b/automation/vendor/gopkg.in/mgo.v2/LICENSE
new file mode 100644
index 0000000..770c767
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/LICENSE
@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/LICENSE b/automation/vendor/gopkg.in/mgo.v2/bson/LICENSE
new file mode 100644
index 0000000..8903260
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/bson.go b/automation/vendor/gopkg.in/mgo.v2/bson/bson.go
new file mode 100644
index 0000000..7fb7f8c
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/bson.go
@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+//     http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+	GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+//     type MyString string
+//
+//     func (s *MyString) SetBSON(raw bson.Raw) error {
+//         return raw.Unmarshal(s)
+//     }
+//
+type Setter interface {
+	SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way.  For instance:
+//
+//     bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type.  Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+//     bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important.  If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+	Name  string
+	Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+	m = make(M, len(d))
+	for _, item := range d {
+		m[item.Name] = item.Value
+	}
+	return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+//     http://bsonspec.org/#/specification
+//
+type Raw struct {
+	Kind byte
+	Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+	Name  string
+	Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+	d, err := hex.DecodeString(s)
+	if err != nil || len(d) != 12 {
+		panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+	}
+	return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+	if len(s) != 24 {
+		return false
+	}
+	_, err := hex.DecodeString(s)
+	return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot read random object id: %v", err))
+	}
+	return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+	var sum [3]byte
+	id := sum[:]
+	hostname, err1 := os.Hostname()
+	if err1 != nil {
+		_, err2 := io.ReadFull(rand.Reader, id)
+		if err2 != nil {
+			panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+		}
+		return id
+	}
+	hw := md5.New()
+	hw.Write([]byte(hostname))
+	copy(id, hw.Sum(nil))
+	return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+	var b [12]byte
+	// Timestamp, 4 bytes, big endian
+	binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+	// Machine, first 3 bytes of md5(hostname)
+	b[4] = machineId[0]
+	b[5] = machineId[1]
+	b[6] = machineId[2]
+	// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+	b[7] = byte(processId >> 8)
+	b[8] = byte(processId)
+	// Increment, 3 bytes, big endian
+	i := atomic.AddUint32(&objectIdCounter, 1)
+	b[9] = byte(i >> 16)
+	b[10] = byte(i >> 8)
+	b[11] = byte(i)
+	return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+	var b [12]byte
+	binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+	return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+	return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+	return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+		var v struct {
+			Id json.RawMessage `json:"$oid"`
+			Func struct {
+				Id json.RawMessage
+			} `json:"$oidFunc"`
+		}
+		err := jdec(data, &v)
+		if err == nil {
+			if len(v.Id) > 0 {
+				data = []byte(v.Id)
+			} else {
+				data = []byte(v.Func.Id)
+			}
+		}
+	}
+	if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+		*id = ""
+		return nil
+	}
+	if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[1:25])
+	if err != nil {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+	if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+		*id = ""
+		return nil
+	}
+	if len(data) != 24 {
+		return fmt.Errorf("invalid ObjectId: %s", data)
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[:])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+	return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+	if len(id) != 12 {
+		panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+	}
+	return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+	// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+	secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+	return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+	return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+	return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+	b := id.byteSlice(9, 12)
+	// Counter is stored as big-endian 3-byte value
+	return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+	return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values.  Any kind should
+// work, but the following are known as of this writing:
+//
+//   0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+//   0x01 - Function (!?)
+//   0x02 - Obsolete generic.
+//   0x03 - UUID
+//   0x05 - MD5
+//   0x80 - User defined.
+//
+type Binary struct {
+	Kind byte
+	Data []byte
+}
+
+// RegEx represents a regular expression.  The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+	Pattern string
+	Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+	Code  string
+	Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+	Namespace string
+	Id        ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		} else if _, ok := r.(externalPanic); ok {
+			panic(r)
+		} else if s, ok := r.(string); ok {
+			*err = errors.New(s)
+		} else if e, ok := r.(error); ok {
+			*err = e
+		} else {
+			panic(r)
+		}
+	}
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty  Only include the field if it's not set to the zero
+//                value for the type or to empty slices or maps.
+//
+//     minsize    Marshal an int64 value as an int32, if that's feasible
+//                while preserving the numeric value.
+//
+//     inline     Inline the field, which must be a struct or a map,
+//                causing all of its fields or keys to be processed as if
+//                they were part of the outer struct. For maps, keys must
+//                not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := &encoder{make([]byte, 0, initialBufferSize)}
+	e.addDoc(reflect.ValueOf(in))
+	return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value.  The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+//     inline     Inline the field, which must be a struct or a map.
+//                Inlined structs are handled as if its fields were part
+//                of the outer struct. An inlined map causes keys that do
+//                not match any other struct field to be inserted in the
+//                map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data.  The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+//   value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+	if raw, ok := out.(*Raw); ok {
+		raw.Kind = 3
+		raw.Data = in
+		return nil
+	}
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(in)
+		d.readDocTo(v)
+	case reflect.Struct:
+		return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Unmarshal needs a map or a pointer to a struct.")
+	}
+	return nil
+}
+
+// Unmarshal deserializes raw into the out value.  If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		v = v.Elem()
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(raw.Data)
+		good := d.readElemTo(v, raw.Kind)
+		if !good {
+			return &TypeError{v.Type(), raw.Kind}
+		}
+	case reflect.Struct:
+		return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+	}
+	return nil
+}
+
+type TypeError struct {
+	Type reflect.Type
+	Kind byte
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+	InlineMap  int
+	Zero       reflect.Value
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	MinSize   bool
+	Inline    []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+	return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	structMapMutex.RLock()
+	sinfo, found := structMap[st]
+	structMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("bson")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "minsize":
+					info.MinSize = true
+				case "inline":
+					inline = true
+				default:
+					msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+					panic(externalPanic(msg))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				panic("Option ,inline needs a struct value or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+	sinfo = &structInfo{
+		fieldsMap,
+		fieldsList,
+		inlineMap,
+		reflect.New(st).Elem(),
+	}
+	structMapMutex.Lock()
+	structMap[st] = sinfo
+	structMapMutex.Unlock()
+	return sinfo, nil
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/decimal.go b/automation/vendor/gopkg.in/mgo.v2/bson/decimal.go
new file mode 100644
index 0000000..3d2f700
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/decimal.go
@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Inf"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/decode.go b/automation/vendor/gopkg.in/mgo.v2/bson/decode.go
new file mode 100644
index 0000000..7c2d841
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/decode.go
@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+type decoder struct {
+	in      []byte
+	i       int
+	docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+	return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+	panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+	setterUnknown = iota
+	setterNone
+	setterType
+	setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+	var iface Setter
+	setterIface = reflect.TypeOf(&iface).Elem()
+	setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+	setterMutex.RLock()
+	style := setterStyles[outt]
+	setterMutex.RUnlock()
+	if style == setterUnknown {
+		setterMutex.Lock()
+		defer setterMutex.Unlock()
+		if outt.Implements(setterIface) {
+			setterStyles[outt] = setterType
+		} else if reflect.PtrTo(outt).Implements(setterIface) {
+			setterStyles[outt] = setterAddr
+		} else {
+			setterStyles[outt] = setterNone
+		}
+		style = setterStyles[outt]
+	}
+	return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+	style := setterStyle(outt)
+	if style == setterNone {
+		return nil
+	}
+	if style == setterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		out = out.Addr()
+	} else if outt.Kind() == reflect.Ptr && out.IsNil() {
+		out.Set(reflect.New(outt.Elem()))
+	}
+	return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+	var elemType reflect.Type
+	outt := out.Type()
+	outk := outt.Kind()
+
+	for {
+		if outk == reflect.Ptr && out.IsNil() {
+			out.Set(reflect.New(outt.Elem()))
+		}
+		if setter := getSetter(outt, out); setter != nil {
+			var raw Raw
+			d.readDocTo(reflect.ValueOf(&raw))
+			err := setter.SetBSON(raw)
+			if _, ok := err.(*TypeError); err != nil && !ok {
+				panic(err)
+			}
+			return
+		}
+		if outk == reflect.Ptr {
+			out = out.Elem()
+			outt = out.Type()
+			outk = out.Kind()
+			continue
+		}
+		break
+	}
+
+	var fieldsMap map[string]fieldInfo
+	var inlineMap reflect.Value
+	start := d.i
+
+	origout := out
+	if outk == reflect.Interface {
+		if d.docType.Kind() == reflect.Map {
+			mv := reflect.MakeMap(d.docType)
+			out.Set(mv)
+			out = mv
+		} else {
+			dv := reflect.New(d.docType).Elem()
+			out.Set(dv)
+			out = dv
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	docType := d.docType
+	keyType := typeString
+	convertKey := false
+	switch outk {
+	case reflect.Map:
+		keyType = outt.Key()
+		if keyType.Kind() != reflect.String {
+			panic("BSON map must have string keys. Got: " + outt.String())
+		}
+		if keyType != typeString {
+			convertKey = true
+		}
+		elemType = outt.Elem()
+		if elemType == typeIface {
+			d.docType = outt
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(out.Type()))
+		} else if out.Len() > 0 {
+			clearMap(out)
+		}
+	case reflect.Struct:
+		if outt != typeRaw {
+			sinfo, err := getStructInfo(out.Type())
+			if err != nil {
+				panic(err)
+			}
+			fieldsMap = sinfo.FieldsMap
+			out.Set(sinfo.Zero)
+			if sinfo.InlineMap != -1 {
+				inlineMap = out.Field(sinfo.InlineMap)
+				if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+					clearMap(inlineMap)
+				}
+				elemType = inlineMap.Type().Elem()
+				if elemType == typeIface {
+					d.docType = inlineMap.Type()
+				}
+			}
+		}
+	case reflect.Slice:
+		switch outt.Elem() {
+		case typeDocElem:
+			origout.Set(d.readDocElems(outt))
+			return
+		case typeRawDocElem:
+			origout.Set(d.readRawDocElems(outt))
+			return
+		}
+		fallthrough
+	default:
+		panic("Unsupported document type for unmarshalling: " + out.Type().String())
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+
+		switch outk {
+		case reflect.Map:
+			e := reflect.New(elemType).Elem()
+			if d.readElemTo(e, kind) {
+				k := reflect.ValueOf(name)
+				if convertKey {
+					k = k.Convert(keyType)
+				}
+				out.SetMapIndex(k, e)
+			}
+		case reflect.Struct:
+			if outt == typeRaw {
+				d.dropElem(kind)
+			} else {
+				if info, ok := fieldsMap[name]; ok {
+					if info.Inline == nil {
+						d.readElemTo(out.Field(info.Num), kind)
+					} else {
+						d.readElemTo(out.FieldByIndex(info.Inline), kind)
+					}
+				} else if inlineMap.IsValid() {
+					if inlineMap.IsNil() {
+						inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+					}
+					e := reflect.New(elemType).Elem()
+					if d.readElemTo(e, kind) {
+						inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+					}
+				} else {
+					d.dropElem(kind)
+				}
+			}
+		case reflect.Slice:
+		}
+
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	d.docType = docType
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+	}
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	i := 0
+	l := out.Len()
+	for d.in[d.i] != '\x00' {
+		if i >= l {
+			panic("Length mismatch on array field")
+		}
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		d.readElemTo(out.Index(i), kind)
+		if d.i >= end {
+			corrupted()
+		}
+		i++
+	}
+	if i != l {
+		panic("Length mismatch on array field")
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+	tmp := make([]reflect.Value, 0, 8)
+	elemType := t.Elem()
+	if elemType == typeRawDocElem {
+		d.dropElem(0x04)
+		return reflect.Zero(t).Interface()
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := reflect.New(elemType).Elem()
+		if d.readElemTo(e, kind) {
+			tmp = append(tmp, e)
+		}
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+
+	n := len(tmp)
+	slice := reflect.MakeSlice(t, n, n)
+	for i := 0; i != n; i++ {
+		slice.Index(i).Set(tmp[i])
+	}
+	return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]DocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := DocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]RawDocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := RawDocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+		f(kind, name)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+	d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+	start := d.i
+
+	if kind == 0x03 {
+		// Delegate unmarshaling of documents.
+		outt := out.Type()
+		outk := out.Kind()
+		switch outk {
+		case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+			d.readDocTo(out)
+			return true
+		}
+		if setterStyle(outt) != setterNone {
+			d.readDocTo(out)
+			return true
+		}
+		if outk == reflect.Slice {
+			switch outt.Elem() {
+			case typeDocElem:
+				out.Set(d.readDocElems(outt))
+			case typeRawDocElem:
+				out.Set(d.readRawDocElems(outt))
+			default:
+				d.readDocTo(blackHole)
+			}
+			return true
+		}
+		d.readDocTo(blackHole)
+		return true
+	}
+
+	var in interface{}
+
+	switch kind {
+	case 0x01: // Float64
+		in = d.readFloat64()
+	case 0x02: // UTF-8 string
+		in = d.readStr()
+	case 0x03: // Document
+		panic("Can't happen. Handled above.")
+	case 0x04: // Array
+		outt := out.Type()
+		if setterStyle(outt) != setterNone {
+			// Skip the value so its data is handed to the setter below.
+			d.dropElem(kind)
+			break
+		}
+		for outt.Kind() == reflect.Ptr {
+			outt = outt.Elem()
+		}
+		switch outt.Kind() {
+		case reflect.Array:
+			d.readArrayDocTo(out)
+			return true
+		case reflect.Slice:
+			in = d.readSliceDoc(outt)
+		default:
+			in = d.readSliceDoc(typeSlice)
+		}
+	case 0x05: // Binary
+		b := d.readBinary()
+		if b.Kind == 0x00 || b.Kind == 0x02 {
+			in = b.Data
+		} else {
+			in = b
+		}
+	case 0x06: // Undefined (obsolete, but still seen in the wild)
+		in = Undefined
+	case 0x07: // ObjectId
+		in = ObjectId(d.readBytes(12))
+	case 0x08: // Bool
+		in = d.readBool()
+	case 0x09: // Timestamp
+		// MongoDB handles timestamps as milliseconds.
+		i := d.readInt64()
+		if i == -62135596800000 {
+			in = time.Time{} // In UTC for convenience.
+		} else {
+			in = time.Unix(i/1e3, i%1e3*1e6)
+		}
+	case 0x0A: // Nil
+		in = nil
+	case 0x0B: // RegEx
+		in = d.readRegEx()
+	case 0x0C:
+		in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+	case 0x0D: // JavaScript without scope
+		in = JavaScript{Code: d.readStr()}
+	case 0x0E: // Symbol
+		in = Symbol(d.readStr())
+	case 0x0F: // JavaScript with scope
+		d.i += 4 // Skip length
+		js := JavaScript{d.readStr(), make(M)}
+		d.readDocTo(reflect.ValueOf(js.Scope))
+		in = js
+	case 0x10: // Int32
+		in = int(d.readInt32())
+	case 0x11: // Mongo-specific timestamp
+		in = MongoTimestamp(d.readInt64())
+	case 0x12: // Int64
+		in = d.readInt64()
+	case 0x13: // Decimal128
+		in = Decimal128{
+			l: uint64(d.readInt64()),
+			h: uint64(d.readInt64()),
+		}
+	case 0x7F: // Max key
+		in = MaxKey
+	case 0xFF: // Min key
+		in = MinKey
+	default:
+		panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+
+	outt := out.Type()
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+		return true
+	}
+
+	if setter := getSetter(outt, out); setter != nil {
+		err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+		if err == SetZero {
+			out.Set(reflect.Zero(outt))
+			return true
+		}
+		if err == nil {
+			return true
+		}
+		if _, ok := err.(*TypeError); !ok {
+			panic(err)
+		}
+		return false
+	}
+
+	if in == nil {
+		out.Set(reflect.Zero(outt))
+		return true
+	}
+
+	outk := outt.Kind()
+
+	// Dereference and initialize pointer if necessary.
+	first := true
+	for outk == reflect.Ptr {
+		if !out.IsNil() {
+			out = out.Elem()
+		} else {
+			elem := reflect.New(outt.Elem())
+			if first {
+				// Only set if value is compatible.
+				first = false
+				defer func(out, elem reflect.Value) {
+					if good {
+						out.Set(elem)
+					}
+				}(out, elem)
+			} else {
+				out.Set(elem)
+			}
+			out = elem
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	inv := reflect.ValueOf(in)
+	if outt == inv.Type() {
+		out.Set(inv)
+		return true
+	}
+
+	switch outk {
+	case reflect.Interface:
+		out.Set(inv)
+		return true
+	case reflect.String:
+		switch inv.Kind() {
+		case reflect.String:
+			out.SetString(inv.String())
+			return true
+		case reflect.Slice:
+			if b, ok := in.([]byte); ok {
+				out.SetString(string(b))
+				return true
+			}
+		case reflect.Int, reflect.Int64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatInt(inv.Int(), 10))
+				return true
+			}
+		case reflect.Float64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+				return true
+			}
+		}
+	case reflect.Slice, reflect.Array:
+		// Remember, array (0x04) slices are built with the correct
+		// element type.  If we are here, must be a cross BSON kind
+		// conversion (e.g. 0x05 unmarshalling on string).
+		if outt.Elem().Kind() != reflect.Uint8 {
+			break
+		}
+		switch inv.Kind() {
+		case reflect.String:
+			slice := []byte(inv.String())
+			out.Set(reflect.ValueOf(slice))
+			return true
+		case reflect.Slice:
+			switch outt.Kind() {
+			case reflect.Array:
+				reflect.Copy(out, inv)
+			case reflect.Slice:
+				out.SetBytes(inv.Bytes())
+			}
+			return true
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetInt(inv.Int())
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetInt(int64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetInt(1)
+			} else {
+				out.SetInt(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("can't happen: no uint types in BSON (!?)")
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetUint(uint64(inv.Int()))
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetUint(uint64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetUint(1)
+			} else {
+				out.SetUint(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON.")
+		}
+	case reflect.Float32, reflect.Float64:
+		switch inv.Kind() {
+		case reflect.Float32, reflect.Float64:
+			out.SetFloat(inv.Float())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetFloat(float64(inv.Int()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetFloat(1)
+			} else {
+				out.SetFloat(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Bool:
+		switch inv.Kind() {
+		case reflect.Bool:
+			out.SetBool(inv.Bool())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetBool(inv.Int() != 0)
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetBool(inv.Float() != 0)
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Struct:
+		if outt == typeURL && inv.Kind() == reflect.String {
+			u, err := url.Parse(inv.String())
+			if err != nil {
+				panic(err)
+			}
+			out.Set(reflect.ValueOf(u).Elem())
+			return true
+		}
+		if outt == typeBinary {
+			if b, ok := in.([]byte); ok {
+				out.Set(reflect.ValueOf(Binary{Data: b}))
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+	re := RegEx{}
+	re.Pattern = d.readCStr()
+	re.Options = d.readCStr()
+	return re
+}
+
+func (d *decoder) readBinary() Binary {
+	l := d.readInt32()
+	b := Binary{}
+	b.Kind = d.readByte()
+	b.Data = d.readBytes(l)
+	if b.Kind == 0x02 && len(b.Data) >= 4 {
+		// Weird obsolete format with redundant length.
+		b.Data = b.Data[4:]
+	}
+	return b
+}
+
+func (d *decoder) readStr() string {
+	l := d.readInt32()
+	b := d.readBytes(l - 1)
+	if d.readByte() != '\x00' {
+		corrupted()
+	}
+	return string(b)
+}
+
+func (d *decoder) readCStr() string {
+	start := d.i
+	end := start
+	l := len(d.in)
+	for ; end != l; end++ {
+		if d.in[end] == '\x00' {
+			break
+		}
+	}
+	d.i = end + 1
+	if d.i > l {
+		corrupted()
+	}
+	return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+	b := d.readByte()
+	if b == 0 {
+		return false
+	}
+	if b == 1 {
+		return true
+	}
+	panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+	return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+	b := d.readBytes(4)
+	return int32((uint32(b[0]) << 0) |
+		(uint32(b[1]) << 8) |
+		(uint32(b[2]) << 16) |
+		(uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+	b := d.readBytes(8)
+	return int64((uint64(b[0]) << 0) |
+		(uint64(b[1]) << 8) |
+		(uint64(b[2]) << 16) |
+		(uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) |
+		(uint64(b[5]) << 40) |
+		(uint64(b[6]) << 48) |
+		(uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+	i := d.i
+	d.i++
+	if d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+	if length < 0 {
+		corrupted()
+	}
+	start := d.i
+	d.i += int(length)
+	if d.i < start || d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[start : start+int(length)]
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/encode.go b/automation/vendor/gopkg.in/mgo.v2/bson/encode.go
new file mode 100644
index 0000000..add39e8
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/encode.go
@@ -0,0 +1,514 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+	typeBinary         = reflect.TypeOf(Binary{})
+	typeObjectId       = reflect.TypeOf(ObjectId(""))
+	typeDBPointer      = reflect.TypeOf(DBPointer{"", ObjectId("")})
+	typeSymbol         = reflect.TypeOf(Symbol(""))
+	typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+	typeOrderKey       = reflect.TypeOf(MinKey)
+	typeDocElem        = reflect.TypeOf(DocElem{})
+	typeRawDocElem     = reflect.TypeOf(RawDocElem{})
+	typeRaw            = reflect.TypeOf(Raw{})
+	typeURL            = reflect.TypeOf(url.URL{})
+	typeTime           = reflect.TypeOf(time.Time{})
+	typeString         = reflect.TypeOf("")
+	typeJSONNumber     = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+	itoaCache = make([]string, itoaCacheSize)
+	for i := 0; i != itoaCacheSize; i++ {
+		itoaCache[i] = strconv.Itoa(i)
+	}
+}
+
+func itoa(i int) string {
+	if i < itoaCacheSize {
+		return itoaCache[i]
+	}
+	return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+	out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+	for {
+		if vi, ok := v.Interface().(Getter); ok {
+			getv, err := vi.GetBSON()
+			if err != nil {
+				panic(err)
+			}
+			v = reflect.ValueOf(getv)
+			continue
+		}
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+			continue
+		}
+		break
+	}
+
+	if v.Type() == typeRaw {
+		raw := v.Interface().(Raw)
+		if raw.Kind != 0x03 && raw.Kind != 0x00 {
+			panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+		}
+		if len(raw.Data) == 0 {
+			panic("Attempted to marshal empty Raw document")
+		}
+		e.addBytes(raw.Data...)
+		return
+	}
+
+	start := e.reserveInt32()
+
+	switch v.Kind() {
+	case reflect.Map:
+		e.addMap(v)
+	case reflect.Struct:
+		e.addStruct(v)
+	case reflect.Array, reflect.Slice:
+		e.addSlice(v)
+	default:
+		panic("Can't marshal " + v.Type().String() + " as a BSON document")
+	}
+
+	e.addBytes(0)
+	e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+	for _, k := range v.MapKeys() {
+		e.addElem(k.String(), v.MapIndex(k), false)
+	}
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+	sinfo, err := getStructInfo(v.Type())
+	if err != nil {
+		panic(err)
+	}
+	var value reflect.Value
+	if sinfo.InlineMap >= 0 {
+		m := v.Field(sinfo.InlineMap)
+		if m.Len() > 0 {
+			for _, k := range m.MapKeys() {
+				ks := k.String()
+				if _, found := sinfo.FieldsMap[ks]; found {
+					panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+				}
+				e.addElem(ks, m.MapIndex(k), false)
+			}
+		}
+	}
+	for _, info := range sinfo.FieldsList {
+		if info.Inline == nil {
+			value = v.Field(info.Num)
+		} else {
+			value = v.FieldByIndex(info.Inline)
+		}
+		if info.OmitEmpty && isZero(value) {
+			continue
+		}
+		e.addElem(info.Key, value, info.MinSize)
+	}
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		if vt == typeTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		for i := 0; i < v.NumField(); i++ {
+			if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+	vi := v.Interface()
+	if d, ok := vi.(D); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if d, ok := vi.(RawD); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	l := v.Len()
+	et := v.Type().Elem()
+	if et == typeDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(DocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if et == typeRawDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(RawDocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	for i := 0; i < l; i++ {
+		e.addElem(itoa(i), v.Index(i), false)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+	e.addBytes(kind)
+	e.addBytes([]byte(name)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+	if !v.IsValid() {
+		e.addElemName(0x0A, name)
+		return
+	}
+
+	if getter, ok := v.Interface().(Getter); ok {
+		getv, err := getter.GetBSON()
+		if err != nil {
+			panic(err)
+		}
+		e.addElem(name, reflect.ValueOf(getv), minSize)
+		return
+	}
+
+	switch v.Kind() {
+
+	case reflect.Interface:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.Ptr:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.String:
+		s := v.String()
+		switch v.Type() {
+		case typeObjectId:
+			if len(s) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s)) + ")")
+			}
+			e.addElemName(0x07, name)
+			e.addBytes([]byte(s)...)
+		case typeSymbol:
+			e.addElemName(0x0E, name)
+			e.addStr(s)
+		case typeJSONNumber:
+			n := v.Interface().(json.Number)
+			if i, err := n.Int64(); err == nil {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			} else if f, err := n.Float64(); err == nil {
+				e.addElemName(0x01, name)
+				e.addFloat64(f)
+			} else {
+				panic("failed to convert json.Number to a number: " + s)
+			}
+		default:
+			e.addElemName(0x02, name)
+			e.addStr(s)
+		}
+
+	case reflect.Float32, reflect.Float64:
+		e.addElemName(0x01, name)
+		e.addFloat64(v.Float())
+
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		u := v.Uint()
+		if int64(u) < 0 {
+			panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+		} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+			e.addElemName(0x10, name)
+			e.addInt32(int32(u))
+		} else {
+			e.addElemName(0x12, name)
+			e.addInt64(int64(u))
+		}
+
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Type() {
+		case typeMongoTimestamp:
+			e.addElemName(0x11, name)
+			e.addInt64(v.Int())
+
+		case typeOrderKey:
+			if v.Int() == int64(MaxKey) {
+				e.addElemName(0x7F, name)
+			} else {
+				e.addElemName(0xFF, name)
+			}
+
+		default:
+			i := v.Int()
+			if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+				// It fits into an int32, encode as such.
+				e.addElemName(0x10, name)
+				e.addInt32(int32(i))
+			} else {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			}
+		}
+
+	case reflect.Bool:
+		e.addElemName(0x08, name)
+		if v.Bool() {
+			e.addBytes(1)
+		} else {
+			e.addBytes(0)
+		}
+
+	case reflect.Map:
+		e.addElemName(0x03, name)
+		e.addDoc(v)
+
+	case reflect.Slice:
+		vt := v.Type()
+		et := vt.Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			e.addBinary(0x00, v.Bytes())
+		} else if et == typeDocElem || et == typeRawDocElem {
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Array:
+		et := v.Type().Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			if v.CanAddr() {
+				e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+			} else {
+				n := v.Len()
+				e.addInt32(int32(n))
+				e.addBytes(0x00)
+				for i := 0; i < n; i++ {
+					el := v.Index(i)
+					e.addBytes(byte(el.Uint()))
+				}
+			}
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Struct:
+		switch s := v.Interface().(type) {
+
+		case Raw:
+			kind := s.Kind
+			if kind == 0x00 {
+				kind = 0x03
+			}
+			if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+				panic("Attempted to marshal empty Raw document")
+			}
+			e.addElemName(kind, name)
+			e.addBytes(s.Data...)
+
+		case Binary:
+			e.addElemName(0x05, name)
+			e.addBinary(s.Kind, s.Data)
+
+		case Decimal128:
+			e.addElemName(0x13, name)
+			e.addInt64(int64(s.l))
+			e.addInt64(int64(s.h))
+
+		case DBPointer:
+			e.addElemName(0x0C, name)
+			e.addStr(s.Namespace)
+			if len(s.Id) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s.Id)) + ")")
+			}
+			e.addBytes([]byte(s.Id)...)
+
+		case RegEx:
+			e.addElemName(0x0B, name)
+			e.addCStr(s.Pattern)
+			e.addCStr(s.Options)
+
+		case JavaScript:
+			if s.Scope == nil {
+				e.addElemName(0x0D, name)
+				e.addStr(s.Code)
+			} else {
+				e.addElemName(0x0F, name)
+				start := e.reserveInt32()
+				e.addStr(s.Code)
+				e.addDoc(reflect.ValueOf(s.Scope))
+				e.setInt32(start, int32(len(e.out)-start))
+			}
+
+		case time.Time:
+			// MongoDB handles timestamps as milliseconds.
+			e.addElemName(0x09, name)
+			e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+		case url.URL:
+			e.addElemName(0x02, name)
+			e.addStr(s.String())
+
+		case undefined:
+			e.addElemName(0x06, name)
+
+		default:
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		}
+
+	default:
+		panic("Can't marshal " + v.Type().String() + " in a BSON document")
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+	if subtype == 0x02 {
+		// Wonder how that brilliant idea came to life. Obsolete, luckily.
+		e.addInt32(int32(len(v) + 4))
+		e.addBytes(subtype)
+		e.addInt32(int32(len(v)))
+	} else {
+		e.addInt32(int32(len(v)))
+		e.addBytes(subtype)
+	}
+	e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+	e.addInt32(int32(len(v) + 1))
+	e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+	e.addBytes([]byte(v)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+	pos = len(e.out)
+	e.addBytes(0, 0, 0, 0)
+	return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+	e.out[pos+0] = byte(v)
+	e.out[pos+1] = byte(v >> 8)
+	e.out[pos+2] = byte(v >> 16)
+	e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+	u := uint32(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+	u := uint64(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+		byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+	e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+	e.out = append(e.out, v...)
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/bson/json.go b/automation/vendor/gopkg.in/mgo.v2/bson/json.go
new file mode 100644
index 0000000..09df826
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/bson/json.go
@@ -0,0 +1,380 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"gopkg.in/mgo.v2/internal/json"
+	"strconv"
+	"time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&jsonExt)
+	return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	e := json.NewEncoder(&buf)
+	e.Extend(&jsonExt)
+	err := e.Encode(value)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&funcExt)
+	return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+	jsonExt.DecodeUnquotedKeys(true)
+	jsonExt.DecodeTrailingCommas(true)
+
+	funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+	jsonExt.DecodeKeyed("$binary", jdecBinary)
+	jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+	jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+	jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+	funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+	funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+	jsonExt.DecodeKeyed("$date", jdecDate)
+	jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+	jsonExt.EncodeType(time.Time{}, jencDate)
+
+	funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+	jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+	jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+	funcExt.DecodeConst("undefined", Undefined)
+
+	jsonExt.DecodeKeyed("$regex", jdecRegEx)
+	jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+	funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+	jsonExt.DecodeKeyed("$oid", jdecObjectId)
+	jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+	jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+	funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+	jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+	funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+	jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+	jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+	jsonExt.EncodeType(int64(0), jencNumberLong)
+	jsonExt.EncodeType(int(0), jencInt)
+
+	funcExt.DecodeConst("MinKey", MinKey)
+	funcExt.DecodeConst("MaxKey", MaxKey)
+	jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+	jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+	jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+	jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+	jsonExt.EncodeType(Undefined, jencUndefined)
+
+	jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, format, args...)
+	return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+	var v struct {
+		Binary []byte `json:"$binary"`
+		Type   string `json:"$type"`
+		Func   struct {
+			Binary []byte `json:"$binary"`
+			Type   int64  `json:"$type"`
+		} `json:"$binaryFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+
+	var binData []byte
+	var binKind int64
+	if v.Type == "" && v.Binary == nil {
+		binData = v.Func.Binary
+		binKind = v.Func.Type
+	} else if v.Type == "" {
+		return v.Binary, nil
+	} else {
+		binData = v.Binary
+		binKind, err = strconv.ParseInt(v.Type, 0, 64)
+		if err != nil {
+			binKind = -1
+		}
+	}
+
+	if binKind == 0 {
+		return binData, nil
+	}
+	if binKind < 0 || binKind > 255 {
+		return nil, fmt.Errorf("invalid type in binary object: %s", data)
+	}
+
+	return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+	in := v.([]byte)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+	base64.StdEncoding.Encode(out, in)
+	return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+	in := v.(Binary)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+	base64.StdEncoding.Encode(out, in.Data)
+	return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+	var v struct {
+		S    string `json:"$date"`
+		Func struct {
+			S string
+		} `json:"$dateFunc"`
+	}
+	_ = jdec(data, &v)
+	if v.S == "" {
+		v.S = v.Func.S
+	}
+	if v.S != "" {
+		for _, format := range []string{jdateFormat, "2006-01-02"} {
+			t, err := time.Parse(format, v.S)
+			if err == nil {
+				return t, nil
+			}
+		}
+		return nil, fmt.Errorf("cannot parse date: %q", v.S)
+	}
+
+	var vn struct {
+		Date struct {
+			N int64 `json:"$numberLong,string"`
+		} `json:"$date"`
+		Func struct {
+			S int64
+		} `json:"$dateFunc"`
+	}
+	err := jdec(data, &vn)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse date: %q", data)
+	}
+	n := vn.Date.N
+	if n == 0 {
+		n = vn.Func.S
+	}
+	return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+	t := v.(time.Time)
+	return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+	var v struct {
+		Func struct {
+			T int32 `json:"t"`
+			I int32 `json:"i"`
+		} `json:"$timestamp"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+	ts := uint64(v.(MongoTimestamp))
+	return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+	var v struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+	re := v.(RegEx)
+	type regex struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+	var v struct {
+		Id   string `json:"$oid"`
+		Func struct {
+			Id string
+		} `json:"$oidFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.Id == "" {
+		v.Id = v.Func.Id
+	}
+	return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+	return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+	// TODO Support unmarshaling $ref and $id into the input value.
+	var v struct {
+		Obj map[string]interface{} `json:"$dbrefFunc"`
+	}
+	// TODO Fix this. Must not be required.
+	v.Obj = make(map[string]interface{})
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+	var v struct {
+		N    int64 `json:"$numberLong,string"`
+		Func struct {
+			N int64 `json:",string"`
+		} `json:"$numberLongFunc"`
+	}
+	var vn struct {
+		N    int64 `json:"$numberLong"`
+		Func struct {
+			N int64
+		} `json:"$numberLongFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		err = jdec(data, &vn)
+		v.N = vn.N
+		v.Func.N = vn.Func.N
+	}
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 0 {
+		return v.N, nil
+	}
+	return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+	n := v.(int64)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `{"$numberLong":%d}`
+	}
+	return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+	n := v.(int)
+	f := `{"$numberLong":"%d"}`
+	if int64(n) <= 1<<53 {
+		f = `%d`
+	}
+	return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$minKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $minKey object: %s", data)
+	}
+	return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$maxKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+	}
+	return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+	switch v.(orderKey) {
+	case MinKey:
+		return []byte(`{"$minKey":1}`), nil
+	case MaxKey:
+		return []byte(`{"$maxKey":1}`), nil
+	}
+	panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+	var v struct {
+		B bool `json:"$undefined"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if !v.B {
+		return nil, fmt.Errorf("invalid $undefined object: %s", data)
+	}
+	return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+	return []byte(`{"$undefined":true}`), nil
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/LICENSE b/automation/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/decode.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/decode.go
new file mode 100644
index 0000000..ce7c7d2
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/decode.go
@@ -0,0 +1,1685 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"reflect"
+	"runtime"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	string, for JSON strings
+//	[]interface{}, for JSON arrays
+//	map[string]interface{}, for JSON objects
+//	nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores key-
+// value pairs from the JSON object into the map.  The map's key type must
+// either be a string or implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+	// Check for well-formedness.
+	// Avoids filling out half a data structure
+	// before discovering a JSON syntax error.
+	var d decodeState
+	err := checkValid(data, &d.scan)
+	if err != nil {
+		return err
+	}
+
+	d.init(data)
+	return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+	Value  string       // description of JSON value - "bool", "array", "number -5"
+	Type   reflect.Type // type of Go value it could not be assigned to
+	Offset int64        // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+	Key   string
+	Type  reflect.Type
+	Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+	if e.Type == nil {
+		return "json: Unmarshal(nil)"
+	}
+
+	if e.Type.Kind() != reflect.Ptr {
+		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+	}
+	return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = r.(error)
+		}
+	}()
+
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return &InvalidUnmarshalError{reflect.TypeOf(v)}
+	}
+
+	d.scan.reset()
+	// We decode rv not rv.Elem because the Unmarshaler interface
+	// test must be applied at the top level of the value.
+	d.value(rv)
+	return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+	// This function implements the JSON numbers grammar.
+	// See https://tools.ietf.org/html/rfc7159#section-6
+	// and http://json.org/number.gif
+
+	if s == "" {
+		return false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		s = s[1:]
+		if s == "" {
+			return false
+		}
+	}
+
+	// Digits
+	switch {
+	default:
+		return false
+
+	case s[0] == '0':
+		s = s[1:]
+
+	case '1' <= s[0] && s[0] <= '9':
+		s = s[1:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		s = s[2:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			if s == "" {
+				return false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// Make sure we are at the end.
+	return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+	data       []byte
+	off        int // read offset in data
+	scan       scanner
+	nextscan   scanner // for calls to nextValue
+	savedError error
+	useNumber  bool
+	ext        Extension
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+	d.data = data
+	d.off = 0
+	d.savedError = nil
+	return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+	panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+	if d.savedError == nil {
+		d.savedError = err
+	}
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+	c := d.data[d.off]
+	item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+	if err != nil {
+		d.error(err)
+	}
+	d.off = len(d.data) - len(rest)
+
+	// Our scanner has seen the opening brace/bracket
+	// and thinks we're still in the middle of the object.
+	// invent a closing brace/bracket to get it out.
+	if c == '{' {
+		d.scan.step(&d.scan, '}')
+	} else if c == '[' {
+		d.scan.step(&d.scan, ']')
+	} else {
+		// Was inside a function name. Get out of it.
+		d.scan.step(&d.scan, '(')
+		d.scan.step(&d.scan, ')')
+	}
+
+	return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+	var newOp int
+	for {
+		if d.off >= len(d.data) {
+			newOp = d.scan.eof()
+			d.off = len(d.data) + 1 // mark processed EOF with len+1
+		} else {
+			c := d.data[d.off]
+			d.off++
+			newOp = d.scan.step(&d.scan, c)
+		}
+		if newOp != op {
+			break
+		}
+	}
+	return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+	if !v.IsValid() {
+		_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+		if err != nil {
+			d.error(err)
+		}
+		d.off = len(d.data) - len(rest)
+
+		// d.scan thinks we're still at the beginning of the item.
+		// Feed in an empty string - the shortest, simplest value -
+		// so that it knows we got to the end of the value.
+		if d.scan.redo {
+			// rewind.
+			d.scan.redo = false
+			d.scan.step = stateBeginValue
+		}
+		d.scan.step(&d.scan, '"')
+		d.scan.step(&d.scan, '"')
+
+		n := len(d.scan.parseState)
+		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+			// d.scan thinks we just read an object key; finish the object
+			d.scan.step(&d.scan, ':')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '}')
+		}
+
+		return
+	}
+
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(v)
+
+	case scanBeginObject:
+		d.object(v)
+
+	case scanBeginLiteral:
+		d.literal(v)
+
+	case scanBeginName:
+		d.name(v)
+	}
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(reflect.Value{})
+
+	case scanBeginObject:
+		d.object(reflect.Value{})
+
+	case scanBeginName:
+		switch v := d.nameInterface().(type) {
+		case nil, string:
+			return v
+		}
+
+	case scanBeginLiteral:
+		switch v := d.literalInterface().(type) {
+		case nil, string:
+			return v
+		}
+	}
+	return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(Unmarshaler); ok {
+				return u, nil, v
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, v
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	}
+
+	v = pv
+
+	// Check type of target.
+	switch v.Kind() {
+	case reflect.Interface:
+		if v.NumMethod() == 0 {
+			// Decoding into nil interface?  Switch to non-reflect code.
+			v.Set(reflect.ValueOf(d.arrayInterface()))
+			return
+		}
+		// Otherwise it's invalid.
+		fallthrough
+	default:
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	case reflect.Array:
+	case reflect.Slice:
+		break
+	}
+
+	i := 0
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		// Get element of array, growing if necessary.
+		if v.Kind() == reflect.Slice {
+			// Grow slice if necessary
+			if i >= v.Cap() {
+				newcap := v.Cap() + v.Cap()/2
+				if newcap < 4 {
+					newcap = 4
+				}
+				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+				reflect.Copy(newv, v)
+				v.Set(newv)
+			}
+			if i >= v.Len() {
+				v.SetLen(i + 1)
+			}
+		}
+
+		if i < v.Len() {
+			// Decode into element.
+			d.value(v.Index(i))
+		} else {
+			// Ran out of fixed array: skip.
+			d.value(reflect.Value{})
+		}
+		i++
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+
+	if i < v.Len() {
+		if v.Kind() == reflect.Array {
+			// Array. Zero the rest.
+			z := reflect.Zero(v.Type().Elem())
+			for ; i < v.Len(); i++ {
+				v.Index(i).Set(z)
+			}
+		} else {
+			v.SetLen(i)
+		}
+	}
+	if i == 0 && v.Kind() == reflect.Slice {
+		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+	}
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		v.Set(reflect.ValueOf(d.objectInterface()))
+		return
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	var mapElem reflect.Value
+
+	empty := true
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if !empty && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		empty = false
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key []byte
+		if unquotedKey {
+			key = item
+			// TODO Fix code below to quote item when necessary.
+		} else {
+			var ok bool
+			key, ok = unquoteBytes(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(item, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+}
+
+// isNull returns whether there's a null literal at the provided offset.
+func (d *decodeState) isNull(off int) bool {
+	if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' {
+		return false
+	}
+	d.nextscan.reset()
+	for i, c := range d.data[off:] {
+		if i > 4 {
+			return false
+		}
+		switch d.nextscan.step(&d.nextscan, c) {
+		case scanContinue, scanBeginName:
+			continue
+		}
+		break
+	}
+	return true
+}
+
+// name consumes a const or function from d.data[d.off-1:], decoding into the value v.
+// the first byte of the function name has been read already.
+func (d *decodeState) name(v reflect.Value) {
+	if d.isNull(d.off-1) {
+		d.literal(v)
+		return
+	}
+
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over function in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		out := d.nameInterface()
+		if out == nil {
+			v.Set(reflect.Zero(v.Type()))
+		} else {
+			v.Set(reflect.ValueOf(out))
+		}
+		return
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			d.storeValue(v, l)
+			return
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	// TODO Fix case of func field as map.
+	//topv := v
+
+	// Figure out field corresponding to function.
+	key := []byte(funcData.key)
+	if v.Kind() == reflect.Map {
+		elemType := v.Type().Elem()
+		v = reflect.New(elemType).Elem()
+	} else {
+		var f *field
+		fields := cachedTypeFields(v.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if bytes.Equal(ff.nameBytes, key) {
+				f = ff
+				break
+			}
+			if f == nil && ff.equalFold(ff.nameBytes, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			for _, i := range f.index {
+				if v.Kind() == reflect.Ptr {
+					if v.IsNil() {
+						v.Set(reflect.New(v.Type().Elem()))
+					}
+					v = v.Elem()
+				}
+				v = v.Field(i)
+			}
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					v.Set(reflect.New(v.Type().Elem()))
+				}
+				v = v.Elem()
+			}
+		}
+	}
+
+	// Check for unmarshaler on func field itself.
+	u, ut, pv = d.indirect(v, false)
+	if u != nil {
+		d.off = nameStart
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	var mapElem reflect.Value
+
+	// Parse function arguments.
+	for i := 0; ; i++ {
+		// closing ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		key := []byte(funcData.args[i])
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(key, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+}
+
+// keyed attempts to decode an object or function using a keyed doc extension,
+// and returns the value and true on success, or nil and false otherwise.
+func (d *decodeState) keyed() (interface{}, bool) {
+	if len(d.ext.keyed) == 0 {
+		return nil, false
+	}
+
+	unquote := false
+
+	// Look-ahead first key to check for a keyed document extension.
+	d.nextscan.reset()
+	var start, end int
+	for i, c := range d.data[d.off-1:] {
+		switch op := d.nextscan.step(&d.nextscan, c); op {
+		case scanSkipSpace, scanContinue, scanBeginObject:
+			continue
+		case scanBeginLiteral, scanBeginName:
+			unquote = op == scanBeginLiteral
+			start = i
+			continue
+		}
+		end = i
+		break
+	}
+
+	name := d.data[d.off-1+start : d.off-1+end]
+
+	var key []byte
+	var ok bool
+	if unquote {
+		key, ok = unquoteBytes(name)
+		if !ok {
+			d.error(errPhase)
+		}
+	} else {
+		funcData, ok := d.ext.funcs[string(name)]
+		if !ok {
+			return nil, false
+		}
+		key = []byte(funcData.key)
+	}
+
+	decode, ok := d.ext.keyed[string(key)]
+	if !ok {
+		return nil, false
+	}
+
+	d.off--
+	out, err := decode(d.next())
+	if err != nil {
+		d.error(err)
+	}
+	return out, true
+}
+
+func (d *decodeState) storeKeyed(v reflect.Value) bool {
+	keyed, ok := d.keyed()
+	if !ok {
+		return false
+	}
+	d.storeValue(v, keyed)
+	return true
+}
+
+var (
+	trueBytes = []byte("true")
+	falseBytes = []byte("false")
+	nullBytes = []byte("null")
+)
+
+func (d *decodeState) storeValue(v reflect.Value, from interface{}) {
+	switch from {
+	case nil:
+		d.literalStore(nullBytes, v, false)
+		return
+	case true:
+		d.literalStore(trueBytes, v, false)
+		return
+	case false:
+		d.literalStore(falseBytes, v, false)
+		return
+	}
+	fromv := reflect.ValueOf(from)
+	for fromv.Kind() == reflect.Ptr && !fromv.IsNil() {
+		fromv = fromv.Elem()
+	}
+	fromt := fromv.Type()
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	vt := v.Type()
+	if fromt.AssignableTo(vt) {
+		v.Set(fromv)
+	} else if fromt.ConvertibleTo(vt) {
+		v.Set(fromv.Convert(vt))
+	} else {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+	}
+}
+
+func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) {
+	if len(name) == 0 {
+		return nil, false
+	}
+	switch name[0] {
+	case 't':
+		if bytes.Equal(name, trueBytes) {
+			return true, true
+		}
+	case 'f':
+		if bytes.Equal(name, falseBytes) {
+			return false, true
+		}
+	case 'n':
+		if bytes.Equal(name, nullBytes) {
+			return nil, true
+		}
+	}
+	if l, ok := d.ext.consts[string(name)]; ok {
+		return l, true
+	}
+	return nil, false
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+
+	d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+	if d.useNumber {
+		return Number(s), nil
+	}
+	f, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+	}
+	return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+	// Check for unmarshaler.
+	if len(item) == 0 {
+		//Empty string given
+		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+		return
+	}
+	wantptr := item[0] == 'n' // null
+	u, ut, pv := d.indirect(v, wantptr)
+	if u != nil {
+		err := u.UnmarshalJSON(item)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		if item[0] != '"' {
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+			return
+		}
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		err := ut.UnmarshalText(s)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	v = pv
+
+	switch c := item[0]; c {
+	case 'n': // null
+		switch v.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			v.Set(reflect.Zero(v.Type()))
+			// otherwise, ignore null for primitives/string
+		}
+	case 't', 'f': // true, false
+		value := c == 't'
+		switch v.Kind() {
+		default:
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		case reflect.Bool:
+			v.SetBool(value)
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(value))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		}
+
+	case '"': // string
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		switch v.Kind() {
+		default:
+			d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+		case reflect.Slice:
+			if v.Type().Elem().Kind() != reflect.Uint8 {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+				break
+			}
+			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+			n, err := base64.StdEncoding.Decode(b, s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			v.SetBytes(b[:n])
+		case reflect.String:
+			v.SetString(string(s))
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(string(s)))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+		}
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		s := string(item)
+		switch v.Kind() {
+		default:
+			if v.Kind() == reflect.String && v.Type() == numberType {
+				v.SetString(s)
+				if !isValidNumber(s) {
+					d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+				}
+				break
+			}
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+			}
+		case reflect.Interface:
+			n, err := d.convertNumber(s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			if v.NumMethod() != 0 {
+				d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+				break
+			}
+			v.Set(reflect.ValueOf(n))
+
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			n, err := strconv.ParseInt(s, 10, 64)
+			if err != nil || v.OverflowInt(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetInt(n)
+
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			n, err := strconv.ParseUint(s, 10, 64)
+			if err != nil || v.OverflowUint(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetUint(n)
+
+		case reflect.Float32, reflect.Float64:
+			n, err := strconv.ParseFloat(s, v.Type().Bits())
+			if err != nil || v.OverflowFloat(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetFloat(n)
+		}
+	}
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+	switch d.scanWhile(scanSkipSpace) {
+	default:
+		d.error(errPhase)
+		panic("unreachable")
+	case scanBeginArray:
+		return d.arrayInterface()
+	case scanBeginObject:
+		return d.objectInterface()
+	case scanBeginLiteral:
+		return d.literalInterface()
+	case scanBeginName:
+		return d.nameInterface()
+	}
+}
+
+func (d *decodeState) syntaxError(expected string) {
+	msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected)
+	d.error(&SyntaxError{msg, int64(d.off)})
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+	var v = make([]interface{}, 0)
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			if len(v) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of value")
+			}
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		v = append(v, d.valueInterface())
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+	return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	m := make(map[string]interface{})
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if len(m) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read string key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key string
+		if unquotedKey {
+			key = string(item)
+		} else {
+			var ok bool
+			key, ok = unquote(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		m[key] = d.valueInterface()
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+	return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+	item := d.data[start:d.off]
+
+	switch c := item[0]; c {
+	case 'n': // null
+		return nil
+
+	case 't', 'f': // true, false
+		return c == 't'
+
+	case '"': // string
+		s, ok := unquote(item)
+		if !ok {
+			d.error(errPhase)
+		}
+		return s
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			d.error(errPhase)
+		}
+		n, err := d.convertNumber(string(item))
+		if err != nil {
+			d.saveError(err)
+		}
+		return n
+	}
+}
+
+// nameInterface is like function but returns map[string]interface{}.
+func (d *decodeState) nameInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			return l
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	m := make(map[string]interface{})
+	for i := 0; ; i++ {
+		// Look ahead for ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		m[funcData.args[i]] = d.valueInterface()
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+	return map[string]interface{}{funcData.key: m}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+	if err != nil {
+		return -1
+	}
+	return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+	s, ok = unquoteBytes(s)
+	t = string(s)
+	return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+		return
+	}
+	s = s[1 : len(s)-1]
+
+	// Check for unusual characters. If there are none,
+	// then no unquoting is needed, so return a slice of the
+	// original bytes.
+	r := 0
+	for r < len(s) {
+		c := s[r]
+		if c == '\\' || c == '"' || c < ' ' {
+			break
+		}
+		if c < utf8.RuneSelf {
+			r++
+			continue
+		}
+		rr, size := utf8.DecodeRune(s[r:])
+		if rr == utf8.RuneError && size == 1 {
+			break
+		}
+		r += size
+	}
+	if r == len(s) {
+		return s, true
+	}
+
+	b := make([]byte, len(s)+2*utf8.UTFMax)
+	w := copy(b, s[0:r])
+	for r < len(s) {
+		// Out of room?  Can only happen if s is full of
+		// malformed UTF-8 and we're replacing each
+		// byte with RuneError.
+		if w >= len(b)-2*utf8.UTFMax {
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+			copy(nb, b[0:w])
+			b = nb
+		}
+		switch c := s[r]; {
+		case c == '\\':
+			r++
+			if r >= len(s) {
+				return
+			}
+			switch s[r] {
+			default:
+				return
+			case '"', '\\', '/', '\'':
+				b[w] = s[r]
+				r++
+				w++
+			case 'b':
+				b[w] = '\b'
+				r++
+				w++
+			case 'f':
+				b[w] = '\f'
+				r++
+				w++
+			case 'n':
+				b[w] = '\n'
+				r++
+				w++
+			case 'r':
+				b[w] = '\r'
+				r++
+				w++
+			case 't':
+				b[w] = '\t'
+				r++
+				w++
+			case 'u':
+				r--
+				rr := getu4(s[r:])
+				if rr < 0 {
+					return
+				}
+				r += 6
+				if utf16.IsSurrogate(rr) {
+					rr1 := getu4(s[r:])
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+						// A valid pair; consume.
+						r += 6
+						w += utf8.EncodeRune(b[w:], dec)
+						break
+					}
+					// Invalid surrogate; fall back to replacement rune.
+					rr = unicode.ReplacementChar
+				}
+				w += utf8.EncodeRune(b[w:], rr)
+			}
+
+		// Quote, control characters are invalid.
+		case c == '"', c < ' ':
+			return
+
+		// ASCII
+		case c < utf8.RuneSelf:
+			b[w] = c
+			r++
+			w++
+
+		// Coerce to well-formed UTF-8.
+		default:
+			rr, size := utf8.DecodeRune(s[r:])
+			r += size
+			w += utf8.EncodeRune(b[w:], rr)
+		}
+	}
+	return b[0:w], true
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/encode.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/encode.go
new file mode 100644
index 0000000..67a0f00
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/encode.go
@@ -0,0 +1,1256 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 4627. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder with DisableHTMLEscaping.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+//   - the field's tag is "-", or
+//   - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+//   // Field is ignored by this package.
+//   Field int `json:"-"`
+//
+//   // Field appears in JSON as key "myName".
+//   Field int `json:"myName"`
+//
+//   // Field appears in JSON as key "myName" and
+//   // the field is omitted from the object if its value is empty,
+//   // as defined above.
+//   Field int `json:"myName,omitempty"`
+//
+//   // Field appears in JSON as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   // Note the leading comma.
+//   Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+//    Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a string
+// or implement encoding.TextMarshaler.  The map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+	e := &encodeState{}
+	err := e.marshal(v, encOpts{escapeHTML: true})
+	if err != nil {
+		return nil, err
+	}
+	return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	b, err := Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	var buf bytes.Buffer
+	err = Indent(&buf, b, prefix, indent)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+	// The characters can only appear in string literals,
+	// so just scan the string one byte at a time.
+	start := 0
+	for i, c := range src {
+		if c == '<' || c == '>' || c == '&' {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+	Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+	return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+	Value reflect.Value
+	Str   string
+}
+
+func (e *UnsupportedValueError) Error() string {
+	return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+	S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+	Type reflect.Type
+	Err  error
+}
+
+func (e *MarshalerError) Error() string {
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+	bytes.Buffer // accumulated output
+	scratch      [64]byte
+	ext          Extension
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+	if v := encodeStatePool.Get(); v != nil {
+		e := v.(*encodeState)
+		e.Reset()
+		return e
+	}
+	return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			if s, ok := r.(string); ok {
+				panic(s)
+			}
+			err = r.(error)
+		}
+	}()
+	e.reflectValue(reflect.ValueOf(v), opts)
+	return nil
+}
+
+func (e *encodeState) error(err error) {
+	panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
+	valueEncoder(v)(e, v, opts)
+}
+
+type encOpts struct {
+	// quoted causes primitive fields to be encoded inside JSON strings.
+	quoted bool
+	// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
+	escapeHTML bool
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
+
+var encoderCache struct {
+	sync.RWMutex
+	m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+	if !v.IsValid() {
+		return invalidValueEncoder
+	}
+	return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+	encoderCache.RLock()
+	f := encoderCache.m[t]
+	encoderCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// To deal with recursive types, populate the map with an
+	// indirect func before we build it. This type waits on the
+	// real func (f) to be ready and then calls it. This indirect
+	// func is only used for recursive types.
+	encoderCache.Lock()
+	if encoderCache.m == nil {
+		encoderCache.m = make(map[reflect.Type]encoderFunc)
+	}
+	var wg sync.WaitGroup
+	wg.Add(1)
+	encoderCache.m[t] = func(e *encodeState, v reflect.Value, opts encOpts) {
+		wg.Wait()
+		f(e, v, opts)
+	}
+	encoderCache.Unlock()
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	innerf := newTypeEncoder(t, true)
+	f = func(e *encodeState, v reflect.Value, opts encOpts) {
+		encode, ok := e.ext.encode[v.Type()]
+		if !ok {
+			innerf(e, v, opts)
+			return
+		}
+
+		b, err := encode(v.Interface())
+		if err == nil {
+			// copy JSON into buffer, checking validity.
+			err = compact(&e.Buffer, b, opts.escapeHTML)
+		}
+		if err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	wg.Done()
+	encoderCache.Lock()
+	encoderCache.m[t] = f
+	encoderCache.Unlock()
+	return f
+}
+
+var (
+	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
+	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+	if t.Implements(marshalerType) {
+		return marshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(marshalerType) {
+			return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	if t.Implements(textMarshalerType) {
+		return textMarshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(textMarshalerType) {
+			return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		return boolEncoder
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intEncoder
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintEncoder
+	case reflect.Float32:
+		return float32Encoder
+	case reflect.Float64:
+		return float64Encoder
+	case reflect.String:
+		return stringEncoder
+	case reflect.Interface:
+		return interfaceEncoder
+	case reflect.Struct:
+		return newStructEncoder(t)
+	case reflect.Map:
+		return newMapEncoder(t)
+	case reflect.Slice:
+		return newSliceEncoder(t)
+	case reflect.Array:
+		return newArrayEncoder(t)
+	case reflect.Ptr:
+		return newPtrEncoder(t)
+	default:
+		return unsupportedTypeEncoder
+	}
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, opts.escapeHTML)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, true)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	if v.Bool() {
+		e.WriteString("true")
+	} else {
+		e.WriteString("false")
+	}
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	f := v.Float()
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+	}
+	b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+var (
+	float32Encoder = (floatEncoder(32)).encode
+	float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Type() == numberType {
+		numStr := v.String()
+		// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+		// we keep compatibility so check validity after this.
+		if numStr == "" {
+			numStr = "0" // Number's zero-val
+		}
+		if !isValidNumber(numStr) {
+			e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+		}
+		e.WriteString(numStr)
+		return
+	}
+	if opts.quoted {
+		sb, err := Marshal(v.String())
+		if err != nil {
+			e.error(err)
+		}
+		e.string(string(sb), opts.escapeHTML)
+	} else {
+		e.string(v.String(), opts.escapeHTML)
+	}
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.reflectValue(v.Elem(), opts)
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+	fields    []field
+	fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('{')
+	first := true
+	for i, f := range se.fields {
+		fv := fieldByIndex(v, f.index)
+		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+			continue
+		}
+		if first {
+			first = false
+		} else {
+			e.WriteByte(',')
+		}
+		e.string(f.name, opts.escapeHTML)
+		e.WriteByte(':')
+		opts.quoted = f.quoted
+		se.fieldEncs[i](e, fv, opts)
+	}
+	e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+	fields := cachedTypeFields(t)
+	se := &structEncoder{
+		fields:    fields,
+		fieldEncs: make([]encoderFunc, len(fields)),
+	}
+	for i, f := range fields {
+		se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+	}
+	return se.encode
+}
+
+type mapEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.WriteByte('{')
+
+	// Extract and sort the keys.
+	keys := v.MapKeys()
+	sv := make([]reflectWithString, len(keys))
+	for i, v := range keys {
+		sv[i].v = v
+		if err := sv[i].resolve(); err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	sort.Sort(byString(sv))
+
+	for i, kv := range sv {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		e.string(kv.s, opts.escapeHTML)
+		e.WriteByte(':')
+		me.elemEnc(e, v.MapIndex(kv.v), opts)
+	}
+	e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+	if t.Key().Kind() != reflect.String && !t.Key().Implements(textMarshalerType) {
+		return unsupportedTypeEncoder
+	}
+	me := &mapEncoder{typeEncoder(t.Elem())}
+	return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	s := v.Bytes()
+	e.WriteByte('"')
+	if len(s) < 1024 {
+		// for small buffers, using Encode directly is much faster.
+		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+		base64.StdEncoding.Encode(dst, s)
+		e.Write(dst)
+	} else {
+		// for large buffers, avoid unnecessary extra temporary
+		// buffer space.
+		enc := base64.NewEncoder(base64.StdEncoding, e)
+		enc.Write(s)
+		enc.Close()
+	}
+	e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+	arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	se.arrayEnc(e, v, opts)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+	// Byte slices get special treatment; arrays don't.
+	if t.Elem().Kind() == reflect.Uint8 &&
+		!t.Elem().Implements(marshalerType) &&
+		!t.Elem().Implements(textMarshalerType) {
+		return encodeByteSlice
+	}
+	enc := &sliceEncoder{newArrayEncoder(t)}
+	return enc.encode
+}
+
+type arrayEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('[')
+	n := v.Len()
+	for i := 0; i < n; i++ {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		ae.elemEnc(e, v.Index(i), opts)
+	}
+	e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+	enc := &arrayEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type ptrEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	pe.elemEnc(e, v.Elem(), opts)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+	enc := &ptrEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type condAddrEncoder struct {
+	canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.CanAddr() {
+		ce.canAddrEnc(e, v, opts)
+	} else {
+		ce.elseEnc(e, v, opts)
+	}
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+	return enc.encode
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+	for _, i := range index {
+		if v.Kind() == reflect.Ptr {
+			if v.IsNil() {
+				return reflect.Value{}
+			}
+			v = v.Elem()
+		}
+		v = v.Field(i)
+	}
+	return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+	for _, i := range index {
+		if t.Kind() == reflect.Ptr {
+			t = t.Elem()
+		}
+		t = t.Field(i).Type
+	}
+	return t
+}
+
+type reflectWithString struct {
+	v reflect.Value
+	s string
+}
+
+func (w *reflectWithString) resolve() error {
+	if w.v.Kind() == reflect.String {
+		w.s = w.v.String()
+		return nil
+	}
+	buf, err := w.v.Interface().(encoding.TextMarshaler).MarshalText()
+	w.s = string(buf)
+	return err
+}
+
+// byString is a slice of reflectWithString where the reflect.Value is either
+// a string or an encoding.TextMarshaler.
+// It implements the methods to sort by string.
+type byString []reflectWithString
+
+func (sv byString) Len() int           { return len(sv) }
+func (sv byString) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv byString) Less(i, j int) bool { return sv[i].s < sv[j].s }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.WriteString(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.Write(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Only strings, floats, integers, and booleans can be quoted.
+				quoted := false
+				if opts.Contains("string") {
+					switch ft.Kind() {
+					case reflect.Bool,
+						reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+						reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+						reflect.Float32, reflect.Float64,
+						reflect.String:
+						quoted = true
+					}
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    quoted,
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/extension.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/extension.go
new file mode 100644
index 0000000..1c8fd45
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/extension.go
@@ -0,0 +1,95 @@
+package json
+
+import (
+	"reflect"
+)
+
+// Extension holds a set of additional rules to be used when unmarshaling
+// strict JSON or JSON-like content.
+type Extension struct {
+	funcs  map[string]funcExt
+	consts map[string]interface{}
+	keyed  map[string]func([]byte) (interface{}, error)
+	encode map[reflect.Type]func(v interface{}) ([]byte, error)
+
+	unquotedKeys   bool
+	trailingCommas bool
+}
+
+type funcExt struct {
+	key  string
+	args []string
+}
+
+// Extend changes the decoder behavior to consider the provided extension.
+func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
+
+// Extend changes the encoder behavior to consider the provided extension.
+func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
+
+// Extend includes in e the extensions defined in ext.
+func (e *Extension) Extend(ext *Extension) {
+	for name, fext := range ext.funcs {
+		e.DecodeFunc(name, fext.key, fext.args...)
+	}
+	for name, value := range ext.consts {
+		e.DecodeConst(name, value)
+	}
+	for key, decode := range ext.keyed {
+		e.DecodeKeyed(key, decode)
+	}
+	for typ, encode := range ext.encode {
+		if e.encode == nil {
+			e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+		}
+		e.encode[typ] = encode
+	}
+}
+
+// DecodeFunc defines a function call that may be observed inside JSON content.
+// A function with the provided name will be unmarshaled as the document
+// {key: {args[0]: ..., args[N]: ...}}.
+func (e *Extension) DecodeFunc(name string, key string, args ...string) {
+	if e.funcs == nil {
+		e.funcs = make(map[string]funcExt)
+	}
+	e.funcs[name] = funcExt{key, args}
+}
+
+// DecodeConst defines a constant name that may be observed inside JSON content
+// and will be decoded with the provided value.
+func (e *Extension) DecodeConst(name string, value interface{}) {
+	if e.consts == nil {
+		e.consts = make(map[string]interface{})
+	}
+	e.consts[name] = value
+}
+
+// DecodeKeyed defines a key that when observed as the first element inside a
+// JSON document triggers the decoding of that document via the provided
+// decode function.
+func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
+	if e.keyed == nil {
+		e.keyed = make(map[string]func([]byte) (interface{}, error))
+	}
+	e.keyed[key] = decode
+}
+
+// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
+func (e *Extension) DecodeUnquotedKeys(accept bool) {
+	e.unquotedKeys = accept
+}
+
+// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
+func (e *Extension) DecodeTrailingCommas(accept bool) {
+	e.trailingCommas = accept
+}
+
+// EncodeType registers a function to encode values with the same type of the
+// provided sample.
+func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
+	if e.encode == nil {
+		e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+	}
+	e.encode[reflect.TypeOf(sample)] = encode
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/fold.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/fold.go
new file mode 100644
index 0000000..9e17012
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/indent.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/indent.go
new file mode 100644
index 0000000..fba1954
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+	return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	start := 0
+	for i, c := range src {
+		if escape && (c == '<' || c == '>' || c == '&') {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+		v := scan.step(&scan, c)
+		if v >= scanSkipSpace {
+			if v == scanError {
+				break
+			}
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			start = i + 1
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+	return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+	dst.WriteByte('\n')
+	dst.WriteString(prefix)
+	for i := 0; i < depth; i++ {
+		dst.WriteString(indent)
+	}
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	needIndent := false
+	depth := 0
+	for _, c := range src {
+		scan.bytes++
+		v := scan.step(&scan, c)
+		if v == scanSkipSpace {
+			continue
+		}
+		if v == scanError {
+			break
+		}
+		if needIndent && v != scanEndObject && v != scanEndArray {
+			needIndent = false
+			depth++
+			newline(dst, prefix, indent, depth)
+		}
+
+		// Emit semantically uninteresting bytes
+		// (in particular, punctuation in strings) unmodified.
+		if v == scanContinue {
+			dst.WriteByte(c)
+			continue
+		}
+
+		// Add spacing around real punctuation.
+		switch c {
+		case '{', '[':
+			// delay indent so that empty object and array are formatted as {} and [].
+			needIndent = true
+			dst.WriteByte(c)
+
+		case ',':
+			dst.WriteByte(c)
+			newline(dst, prefix, indent, depth)
+
+		case ':':
+			dst.WriteByte(c)
+			dst.WriteByte(' ')
+
+		case '}', ']':
+			if needIndent {
+				// suppress indent in empty object/array
+				needIndent = false
+			} else {
+				depth--
+				newline(dst, prefix, indent, depth)
+			}
+			dst.WriteByte(c)
+
+		default:
+			dst.WriteByte(c)
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	return nil
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/scanner.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/scanner.go
new file mode 100644
index 0000000..9708043
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/scanner.go
@@ -0,0 +1,697 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+	scan.reset()
+	for _, c := range data {
+		scan.bytes++
+		if scan.step(scan, c) == scanError {
+			return scan.err
+		}
+	}
+	if scan.eof() == scanError {
+		return scan.err
+	}
+	return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+	scan.reset()
+	for i, c := range data {
+		v := scan.step(scan, c)
+		if v >= scanEndObject {
+			switch v {
+			// probe the scanner with a space to determine whether we will
+			// get scanEnd on the next character. Otherwise, if the next character
+			// is not a space, scanEndTop allocates a needless error.
+			case scanEndObject, scanEndArray, scanEndParams:
+				if scan.step(scan, ' ') == scanEnd {
+					return data[:i+1], data[i+1:], nil
+				}
+			case scanError:
+				return nil, nil, scan.err
+			case scanEnd:
+				return data[:i], data[i:], nil
+			}
+		}
+	}
+	if scan.eof() == scanError {
+		return nil, nil, scan.err
+	}
+	return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in.  (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+	// The step is a func to be called to execute the next transition.
+	// Also tried using an integer constant and a single func
+	// with a switch, but using the func directly was 10% faster
+	// on a 64-bit Mac Mini, and it's nicer to read.
+	step func(*scanner, byte) int
+
+	// Reached end of top-level value.
+	endTop bool
+
+	// Stack of what we're in the middle of - array values, object keys, object values.
+	parseState []int
+
+	// Error that happened, if any.
+	err error
+
+	// 1-byte redo (see undo method)
+	redo      bool
+	redoCode  int
+	redoState func(*scanner, byte) int
+
+	// total bytes consumed, updated by decoder.Decode
+	bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+	// Continue.
+	scanContinue     = iota // uninteresting byte
+	scanBeginLiteral        // end implied by next result != scanContinue
+	scanBeginObject         // begin object
+	scanObjectKey           // just finished object key (string)
+	scanObjectValue         // just finished non-last object value
+	scanEndObject           // end object (implies scanObjectValue if possible)
+	scanBeginArray          // begin array
+	scanArrayValue          // just finished array value
+	scanEndArray            // end array (implies scanArrayValue if possible)
+	scanBeginName           // begin function call
+	scanParam               // begin function argument
+	scanEndParams           // end function call
+	scanSkipSpace           // space byte; can skip; known to be last "continue" result
+
+	// Stop.
+	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
+	scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+	parseObjectKey   = iota // parsing object key (before colon)
+	parseObjectValue        // parsing object value (after colon)
+	parseArrayValue         // parsing array value
+	parseName               // parsing unquoted name
+	parseParam              // parsing function argument value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+	s.step = stateBeginValue
+	s.parseState = s.parseState[0:0]
+	s.err = nil
+	s.redo = false
+	s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+	if s.err != nil {
+		return scanError
+	}
+	if s.endTop {
+		return scanEnd
+	}
+	s.step(s, ' ')
+	if s.endTop {
+		return scanEnd
+	}
+	if s.err == nil {
+		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+	}
+	return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+	s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+	n := len(s.parseState) - 1
+	s.parseState = s.parseState[0:n]
+	s.redo = false
+	if n == 0 {
+		s.step = stateEndTop
+		s.endTop = true
+	} else {
+		s.step = stateEndValue
+	}
+}
+
+func isSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ']' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	switch c {
+	case '{':
+		s.step = stateBeginStringOrEmpty
+		s.pushParseState(parseObjectKey)
+		return scanBeginObject
+	case '[':
+		s.step = stateBeginValueOrEmpty
+		s.pushParseState(parseArrayValue)
+		return scanBeginArray
+	case '"':
+		s.step = stateInString
+		return scanBeginLiteral
+	case '-':
+		s.step = stateNeg
+		return scanBeginLiteral
+	case '0': // beginning of 0.123
+		s.step = state0
+		return scanBeginLiteral
+	case 'n':
+		s.step = stateNew0
+		return scanBeginName
+	}
+	if '1' <= c && c <= '9' { // beginning of 1234.5
+		s.step = state1
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of value")
+}
+
+func isName(c byte) bool {
+	return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '}' {
+		n := len(s.parseState)
+		s.parseState[n-1] = parseObjectValue
+		return stateEndValue(s, c)
+	}
+	return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '"' {
+		s.step = stateInString
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+	n := len(s.parseState)
+	if n == 0 {
+		// Completed top-level before the current byte.
+		s.step = stateEndTop
+		s.endTop = true
+		return stateEndTop(s, c)
+	}
+	if c <= ' ' && isSpace(c) {
+		s.step = stateEndValue
+		return scanSkipSpace
+	}
+	ps := s.parseState[n-1]
+	switch ps {
+	case parseObjectKey:
+		if c == ':' {
+			s.parseState[n-1] = parseObjectValue
+			s.step = stateBeginValue
+			return scanObjectKey
+		}
+		return s.error(c, "after object key")
+	case parseObjectValue:
+		if c == ',' {
+			s.parseState[n-1] = parseObjectKey
+			s.step = stateBeginStringOrEmpty
+			return scanObjectValue
+		}
+		if c == '}' {
+			s.popParseState()
+			return scanEndObject
+		}
+		return s.error(c, "after object key:value pair")
+	case parseArrayValue:
+		if c == ',' {
+			s.step = stateBeginValueOrEmpty
+			return scanArrayValue
+		}
+		if c == ']' {
+			s.popParseState()
+			return scanEndArray
+		}
+		return s.error(c, "after array element")
+	case parseParam:
+		if c == ',' {
+			s.step = stateBeginValue
+			return scanParam
+		}
+		if c == ')' {
+			s.popParseState()
+			return scanEndParams
+		}
+		return s.error(c, "after array element")
+	}
+	return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+		// Complain about non-space byte on next call.
+		s.error(c, "after top-level value")
+	}
+	return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+	if c == '"' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	if c == '\\' {
+		s.step = stateInStringEsc
+		return scanContinue
+	}
+	if c < 0x20 {
+		return s.error(c, "in string literal")
+	}
+	return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+	switch c {
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+		s.step = stateInString
+		return scanContinue
+	case 'u':
+		s.step = stateInStringEscU
+		return scanContinue
+	}
+	return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU1
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU12
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU123
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInString
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+	if c == '0' {
+		s.step = state0
+		return scanContinue
+	}
+	if '1' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+	if c == '.' {
+		s.step = stateDot
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateDot0
+		return scanContinue
+	}
+	return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+	if c == '+' || c == '-' {
+		s.step = stateESign
+		return scanContinue
+	}
+	return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateE0
+		return scanContinue
+	}
+	return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateNew0 is the state after reading `n`.
+func stateNew0(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateNew1
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew1 is the state after reading `ne`.
+func stateNew1(s *scanner, c byte) int {
+	if c == 'w' {
+		s.step = stateNew2
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew2 is the state after reading `new`.
+func stateNew2(s *scanner, c byte) int {
+	s.step = stateName
+	if c == ' ' {
+		return scanContinue
+	}
+	return stateName(s, c)
+}
+
+// stateName is the state while reading an unquoted function name.
+func stateName(s *scanner, c byte) int {
+	if isName(c) {
+		return scanContinue
+	}
+	if c == '(' {
+		s.step = stateParamOrEmpty
+		s.pushParseState(parseParam)
+		return scanParam
+	}
+	return stateEndValue(s, c)
+}
+
+// stateParamOrEmpty is the state after reading `(`.
+func stateParamOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ')' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+	if c == 'r' {
+		s.step = stateTr
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateTru
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+	if c == 'a' {
+		s.step = stateFa
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateFal
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+	if c == 's' {
+		s.step = stateFals
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateNu
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateNul
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+	return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+	s.step = stateError
+	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+	return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+	// special cases - different from quoted strings
+	if c == '\'' {
+		return `'\''`
+	}
+	if c == '"' {
+		return `'"'`
+	}
+
+	// use quoted string with different quotation marks
+	s := strconv.Quote(string(c))
+	return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+	if s.redo {
+		panic("json: invalid use of scanner")
+	}
+	s.redoCode = scanCode
+	s.redoState = s.step
+	s.step = stateRedo
+	s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+	s.redo = false
+	s.step = s.redoState
+	return s.redoCode
+}
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/stream.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/stream.go
new file mode 100644
index 0000000..e023702
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/stream.go
@@ -0,0 +1,510 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// A Decoder reads and decodes JSON values from an input stream.
+type Decoder struct {
+	r     io.Reader
+	buf   []byte
+	d     decodeState
+	scanp int // start of unread data in buf
+	scan  scanner
+	err   error
+
+	tokenState int
+	tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+	if dec.err != nil {
+		return dec.err
+	}
+
+	if err := dec.tokenPrepareForDecode(); err != nil {
+		return err
+	}
+
+	if !dec.tokenValueAllowed() {
+		return &SyntaxError{msg: "not at beginning of value"}
+	}
+
+	// Read whole value into buffer.
+	n, err := dec.readValue()
+	if err != nil {
+		return err
+	}
+	dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+	dec.scanp += n
+
+	// Don't save err from unmarshal into dec.err:
+	// the connection is still usable since we read a complete JSON
+	// object from it before the error happened.
+	err = dec.d.unmarshal(v)
+
+	// fixup token streaming state
+	dec.tokenValueEnd()
+
+	return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+	return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+	dec.scan.reset()
+
+	scanp := dec.scanp
+	var err error
+Input:
+	for {
+		// Look in the buffer for a new value.
+		for i, c := range dec.buf[scanp:] {
+			dec.scan.bytes++
+			v := dec.scan.step(&dec.scan, c)
+			if v == scanEnd {
+				scanp += i
+				break Input
+			}
+			// scanEnd is delayed one byte.
+			// We might block trying to get that byte from src,
+			// so instead invent a space byte.
+			if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+				scanp += i + 1
+				break Input
+			}
+			if v == scanError {
+				dec.err = dec.scan.err
+				return 0, dec.scan.err
+			}
+		}
+		scanp = len(dec.buf)
+
+		// Did the last read have an error?
+		// Delayed until now to allow buffer scan.
+		if err != nil {
+			if err == io.EOF {
+				if dec.scan.step(&dec.scan, ' ') == scanEnd {
+					break Input
+				}
+				if nonSpace(dec.buf) {
+					err = io.ErrUnexpectedEOF
+				}
+			}
+			dec.err = err
+			return 0, err
+		}
+
+		n := scanp - dec.scanp
+		err = dec.refill()
+		scanp = dec.scanp + n
+	}
+	return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+	// Make room to read more into the buffer.
+	// First slide down data already consumed.
+	if dec.scanp > 0 {
+		n := copy(dec.buf, dec.buf[dec.scanp:])
+		dec.buf = dec.buf[:n]
+		dec.scanp = 0
+	}
+
+	// Grow buffer if not large enough.
+	const minRead = 512
+	if cap(dec.buf)-len(dec.buf) < minRead {
+		newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+		copy(newBuf, dec.buf)
+		dec.buf = newBuf
+	}
+
+	// Read. Delay error for next iteration (after scan).
+	n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+	dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+	return err
+}
+
+func nonSpace(b []byte) bool {
+	for _, c := range b {
+		if !isSpace(c) {
+			return true
+		}
+	}
+	return false
+}
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+	w          io.Writer
+	err        error
+	escapeHTML bool
+
+	indentBuf    *bytes.Buffer
+	indentPrefix string
+	indentValue  string
+
+	ext Extension
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w: w, escapeHTML: true}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+	if enc.err != nil {
+		return enc.err
+	}
+	e := newEncodeState()
+	e.ext = enc.ext
+	err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
+	if err != nil {
+		return err
+	}
+
+	// Terminate each value with a newline.
+	// This makes the output look a little nicer
+	// when debugging, and some kind of space
+	// is required if the encoded value was a number,
+	// so that the reader knows there aren't more
+	// digits coming.
+	e.WriteByte('\n')
+
+	b := e.Bytes()
+	if enc.indentBuf != nil {
+		enc.indentBuf.Reset()
+		err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
+		if err != nil {
+			return err
+		}
+		b = enc.indentBuf.Bytes()
+	}
+	if _, err = enc.w.Write(b); err != nil {
+		enc.err = err
+	}
+	encodeStatePool.Put(e)
+	return err
+}
+
+// Indent sets the encoder to format each encoded value with Indent.
+func (enc *Encoder) Indent(prefix, indent string) {
+	enc.indentBuf = new(bytes.Buffer)
+	enc.indentPrefix = prefix
+	enc.indentValue = indent
+}
+
+// DisableHTMLEscaping causes the encoder not to escape angle brackets
+// ("<" and ">") or ampersands ("&") in JSON strings.
+func (enc *Encoder) DisableHTMLEscaping() {
+	enc.escapeHTML = false
+}
+
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+	return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+	if m == nil {
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+	}
+	*m = append((*m)[0:0], data...)
+	return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+//	Delim, for the four JSON delimiters [ ] { }
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	Number, for JSON numbers
+//	string, for JSON string literals
+//	nil, for JSON null
+//
+type Token interface{}
+
+const (
+	tokenTopValue = iota
+	tokenArrayStart
+	tokenArrayValue
+	tokenArrayComma
+	tokenObjectStart
+	tokenObjectKey
+	tokenObjectColon
+	tokenObjectValue
+	tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+	// Note: Not calling peek before switch, to avoid
+	// putting peek into the standard Decode path.
+	// peek is only called when using the Token API.
+	switch dec.tokenState {
+	case tokenArrayComma:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ',' {
+			return &SyntaxError{"expected comma after array element", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenArrayValue
+	case tokenObjectColon:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ':' {
+			return &SyntaxError{"expected colon after object key", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenObjectValue
+	}
+	return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+	switch dec.tokenState {
+	case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		return true
+	}
+	return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+	switch dec.tokenState {
+	case tokenArrayStart, tokenArrayValue:
+		dec.tokenState = tokenArrayComma
+	case tokenObjectValue:
+		dec.tokenState = tokenObjectComma
+	}
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+	return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+	for {
+		c, err := dec.peek()
+		if err != nil {
+			return nil, err
+		}
+		switch c {
+		case '[':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenArrayStart
+			return Delim('['), nil
+
+		case ']':
+			if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim(']'), nil
+
+		case '{':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenObjectStart
+			return Delim('{'), nil
+
+		case '}':
+			if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim('}'), nil
+
+		case ':':
+			if dec.tokenState != tokenObjectColon {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = tokenObjectValue
+			continue
+
+		case ',':
+			if dec.tokenState == tokenArrayComma {
+				dec.scanp++
+				dec.tokenState = tokenArrayValue
+				continue
+			}
+			if dec.tokenState == tokenObjectComma {
+				dec.scanp++
+				dec.tokenState = tokenObjectKey
+				continue
+			}
+			return dec.tokenError(c)
+
+		case '"':
+			if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+				var x string
+				old := dec.tokenState
+				dec.tokenState = tokenTopValue
+				err := dec.Decode(&x)
+				dec.tokenState = old
+				if err != nil {
+					clearOffset(err)
+					return nil, err
+				}
+				dec.tokenState = tokenObjectColon
+				return x, nil
+			}
+			fallthrough
+
+		default:
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			var x interface{}
+			if err := dec.Decode(&x); err != nil {
+				clearOffset(err)
+				return nil, err
+			}
+			return x, nil
+		}
+	}
+}
+
+func clearOffset(err error) {
+	if s, ok := err.(*SyntaxError); ok {
+		s.Offset = 0
+	}
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+	var context string
+	switch dec.tokenState {
+	case tokenTopValue:
+		context = " looking for beginning of value"
+	case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		context = " looking for beginning of value"
+	case tokenArrayComma:
+		context = " after array element"
+	case tokenObjectKey:
+		context = " looking for beginning of object key string"
+	case tokenObjectColon:
+		context = " after object key"
+	case tokenObjectComma:
+		context = " after object key:value pair"
+	}
+	return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+	c, err := dec.peek()
+	return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+	var err error
+	for {
+		for i := dec.scanp; i < len(dec.buf); i++ {
+			c := dec.buf[i]
+			if isSpace(c) {
+				continue
+			}
+			dec.scanp = i
+			return c, nil
+		}
+		// buffer has been scanned, now report any error
+		if err != nil {
+			return 0, err
+		}
+		err = dec.refill()
+	}
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error  {
+	...
+}
+
+*/
diff --git a/automation/vendor/gopkg.in/mgo.v2/internal/json/tags.go b/automation/vendor/gopkg.in/mgo.v2/internal/json/tags.go
new file mode 100644
index 0000000..c38fd51
--- /dev/null
+++ b/automation/vendor/gopkg.in/mgo.v2/internal/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/LICENSE b/automation/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/automation/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/automation/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/automation/vendor/gopkg.in/yaml.v2/README.md b/automation/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..1884de6
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+    go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+        "fmt"
+        "log"
+
+        "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+`
+
+type T struct {
+        A string
+        B struct {
+                RenamedC int   `yaml:"c"`
+                D        []int `yaml:",flow"`
+        }
+}
+
+func main() {
+        t := T{}
+    
+        err := yaml.Unmarshal([]byte(data), &t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t:\n%v\n\n", t)
+    
+        d, err := yaml.Marshal(&t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t dump:\n%s\n\n", string(d))
+    
+        m := make(map[interface{}]interface{})
+    
+        err = yaml.Unmarshal([]byte(data), &m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m:\n%v\n\n", m)
+    
+        d, err = yaml.Marshal(&m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+  c: 2
+  d:
+  - 3
+  - 4
+```
+
diff --git a/automation/vendor/gopkg.in/yaml.v2/apic.go b/automation/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..95ec014
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+	"io"
+	"os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_file_read_handler
+	parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+	return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_file.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_file_write_handler
+	emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+	return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+	return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+	return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+	return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+	return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/automation/vendor/gopkg.in/yaml.v2/decode.go b/automation/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..b13ab9f
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,683 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	value        string
+	implicit     bool
+	children     []*node
+	anchors      map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser yaml_parser_t
+	event  yaml_event_t
+	doc    *node
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+
+	yaml_parser_set_input_string(&p.parser, b)
+
+	p.skip()
+	if p.event.typ != yaml_STREAM_START_EVENT {
+		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return &p
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+	if p.event.typ != yaml_NO_EVENT {
+		if p.event.typ == yaml_STREAM_END_EVENT {
+			failf("attempted to go past the end of stream; corrupted value?")
+		}
+		yaml_event_delete(&p.event)
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	switch p.event.typ {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+	}
+	panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.skip()
+	n.children = append(n.children, p.parse())
+	if p.event.typ != yaml_DOCUMENT_END_EVENT {
+		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.skip()
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[string]bool
+	mapType reflect.Type
+	terrors []string
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+	d := &decoder{mapType: defaultMapType}
+	d.aliases = make(map[string]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	an, ok := d.doc.anchors[n.value]
+	if !ok {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	if d.aliases[n.value] {
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n.value] = true
+	good = d.unmarshal(an, out)
+	delete(d.aliases, n.value)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if s, ok := resolved.(string); ok && out.CanAddr() {
+		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+			err := u.UnmarshalText([]byte(s))
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			good = true
+		} else if resolved != nil {
+			out.SetString(n.value)
+			good = true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		good = true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				good = true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					good = true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			good = true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			good = true
+		case int64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case float64:
+			out.SetFloat(resolved)
+			good = true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			good = true
+		}
+	}
+	if !good {
+		d.terror(n, tag, out)
+	}
+	return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	out.Set(out.Slice(0, j))
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				out.SetMapIndex(k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			inlineMap.SetMapIndex(name, value)
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		an, ok := d.doc.anchors[n.value]
+		if ok && an.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				an, ok := d.doc.anchors[ni.value]
+				if ok && an.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/emitterc.go b/automation/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..2befd55
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+	}
+	return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceeded_by_whitespace = false
+		followed_by_whitespace  = false
+		previous_space          = false
+		previous_break          = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceeded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceeded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceeded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/encode.go b/automation/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..84f8499
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+}
+
+func newEncoder() (e *encoder) {
+	e = &encoder{}
+	e.must(yaml_emitter_initialize(&e.emitter))
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+	e.emit()
+	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+	e.emit()
+	return e
+}
+
+func (e *encoder) finish() {
+	e.must(yaml_document_end_event_initialize(&e.event, true))
+	e.emit()
+	e.emitter.open_ended = false
+	e.must(yaml_stream_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+		e.must(false)
+	}
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	if m, ok := iface.(Marshaler); ok {
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		e.structv(tag, in)
+	case reflect.Slice:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	f()
+	e.must(yaml_mapping_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	rtag, rs := resolve("", s)
+	if rtag == yaml_BINARY_TAG {
+		if tag == "" || tag == yaml_STR_TAG {
+			tag = rtag
+			s = rs.(string)
+		} else if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		} else {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+	}
+	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	} else if strings.Contains(s, "\n") {
+		style = yaml_LITERAL_SCALAR_STYLE
+	} else {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// FIXME: Handle 64 bits here.
+	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/parserc.go b/automation/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..0a7037a
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+	return false
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/readerc.go b/automation/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..f450791
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,394 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/resolve.go b/automation/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..93a8632
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,203 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+		return true
+	}
+	return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			floatv, err := strconv.ParseFloat(plain, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, -int(intv)
+					} else {
+						return yaml_INT_TAG, -intv
+					}
+				}
+			}
+			// XXX Handle timestamps here.
+
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	if tag == yaml_BINARY_TAG {
+		return yaml_BINARY_TAG, in
+	}
+	if utf8.ValidString(in) {
+		return yaml_STR_TAG, in
+	}
+	return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/scannerc.go b/automation/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..2580800
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// Check if we really need to fetch more tokens.
+		need_more_tokens := false
+
+		if parser.tokens_head == len(parser.tokens) {
+			// Queue is empty.
+			need_more_tokens = true
+		} else {
+			// Check if any potential simple key may occupy the head position.
+			if !yaml_parser_stale_simple_keys(parser) {
+				return false
+			}
+
+			for i := range parser.simple_keys {
+				simple_key := &parser.simple_keys[i]
+				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+					need_more_tokens = true
+					break
+				}
+			}
+		}
+
+		// We are finished.
+		if !need_more_tokens {
+			break
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Remove obsolete potential simple keys.
+	if !yaml_parser_stale_simple_keys(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+	// Check for a potential simple key for each flow level.
+	for i := range parser.simple_keys {
+		simple_key := &parser.simple_keys[i]
+
+		// The specification requires that a simple key
+		//
+		//  - is limited to a single line,
+		//  - is shorter than 1024 characters.
+		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+			// Check if the potential simple key to be removed is required.
+			if simple_key.required {
+				return yaml_parser_set_scanner_error(parser,
+					"while scanning a simple key", simple_key.mark,
+					"could not find expected ':'")
+			}
+			simple_key.possible = false
+		}
+	}
+	return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	// A simple key is required only when it is the first token in the current
+	// line.  Therefore it is always allowed.  But we add a check anyway.
+	if required && !parser.simple_key_allowed {
+		panic("should not happen")
+	}
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		}
+		simple_key.mark = parser.mark
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+	}
+	// Remove the key from the stack.
+	parser.simple_keys[i].possible = false
+	return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// Increase the flow level.
+	parser.flow_level++
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+	}
+	return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if simple_key.possible {
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && !(s[0] == '!' && s[1] == 0) {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the tag is non-empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+			if parser.flow_level > 0 &&
+				parser.buffer[parser.buffer_pos] == ':' &&
+				!is_blankz(parser.buffer, parser.buffer_pos+1) {
+				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+					start_mark, "found unexpected ':'")
+				return false
+			}
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab character that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violate indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/sorter.go b/automation/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..5958822
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/writerc.go b/automation/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..190362f
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	// If the output encoding is UTF-8, we don't need to recode the buffer.
+	if emitter.encoding == yaml_UTF8_ENCODING {
+		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+		}
+		emitter.buffer_pos = 0
+		return true
+	}
+
+	// Recode the buffer into the raw buffer.
+	var low, high int
+	if emitter.encoding == yaml_UTF16LE_ENCODING {
+		low, high = 0, 1
+	} else {
+		high, low = 1, 0
+	}
+
+	pos := 0
+	for pos < emitter.buffer_pos {
+		// See the "reader.c" code for more details on UTF-8 encoding.  Note
+		// that we assume that the buffer contains a valid UTF-8 sequence.
+
+		// Read the next UTF-8 character.
+		octet := emitter.buffer[pos]
+
+		var w int
+		var value rune
+		switch {
+		case octet&0x80 == 0x00:
+			w, value = 1, rune(octet&0x7F)
+		case octet&0xE0 == 0xC0:
+			w, value = 2, rune(octet&0x1F)
+		case octet&0xF0 == 0xE0:
+			w, value = 3, rune(octet&0x0F)
+		case octet&0xF8 == 0xF0:
+			w, value = 4, rune(octet&0x07)
+		}
+		for k := 1; k < w; k++ {
+			octet = emitter.buffer[pos+k]
+			value = (value << 6) + (rune(octet) & 0x3F)
+		}
+		pos += w
+
+		// Write the character.
+		if value < 0x10000 {
+			var b [2]byte
+			b[high] = byte(value >> 8)
+			b[low] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+		} else {
+			// Write the character using a surrogate pair (check "reader.c").
+			var b [4]byte
+			value -= 0x10000
+			b[high] = byte(0xD8 + (value >> 18))
+			b[low] = byte((value >> 10) & 0xFF)
+			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+			b[low+2] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+		}
+	}
+
+	// Write the raw buffer.
+	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	emitter.raw_buffer = emitter.raw_buffer[:0]
+	return true
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/yaml.go b/automation/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..36d6b88
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	defer handleErr(&err)
+	d := newDecoder()
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Does not apply to zero valued structs.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int "a,omitempty"
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshal("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/yamlh.go b/automation/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..d60a6b6
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occured.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_file io.Reader // File input data.
+	input      []byte    // String input data.
+	input_pos  int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_file   io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/automation/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/automation/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/automation/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}
diff --git a/automation/vendor/vendor.json b/automation/vendor/vendor.json
new file mode 100644
index 0000000..6b13c25
--- /dev/null
+++ b/automation/vendor/vendor.json
@@ -0,0 +1,127 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "b4Bd+o7lqSpiJr8eYHrfn6Y5iPE=",
+			"path": "github.com/juju/ansiterm",
+			"revision": "35c59b9e0fe275705a71bf5d58ee293f27efbbc4",
+			"revisionTime": "2016-11-07T20:46:39Z"
+		},
+		{
+			"checksumSHA1": "mqeBN4W37/KmjWBQT4xqIQGjGYE=",
+			"path": "github.com/juju/ansiterm/tabwriter",
+			"revision": "35c59b9e0fe275705a71bf5d58ee293f27efbbc4",
+			"revisionTime": "2016-11-07T20:46:39Z"
+		},
+		{
+			"checksumSHA1": "KIX/3RadQkfl4ZxCmOQ01vAGLEI=",
+			"path": "github.com/juju/errors",
+			"revision": "6f54ff6318409d31ff16261533ce2c8381a4fd5d",
+			"revisionTime": "2016-08-09T03:08:48Z"
+		},
+		{
+			"checksumSHA1": "kFWNEYI94c34qJ3twdktZxjLQPE=",
+			"path": "github.com/juju/gomaasapi",
+			"revision": "f0300c96aa5e0deb3cc599ca8a8949097b955e60",
+			"revisionTime": "2016-11-02T12:07:14Z"
+		},
+		{
+			"checksumSHA1": "WttBhWbKX60YUiSiyF+mQKpfdVo=",
+			"path": "github.com/juju/loggo",
+			"revision": "3b7ece48644d35850f4ced4c2cbc2cf8413f58e0",
+			"revisionTime": "2016-08-18T02:57:24Z"
+		},
+		{
+			"checksumSHA1": "WsQOT3cgol8O3aenURnzUokuIAA=",
+			"path": "github.com/juju/schema",
+			"revision": "e4e05803c9a103fdfa880476044100ac17e54830",
+			"revisionTime": "2016-09-16T14:28:50Z"
+		},
+		{
+			"checksumSHA1": "uBuPqeE5+azM1/N7YtTU2IUXuTc=",
+			"path": "github.com/juju/utils",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "CEj9efal05UefJa0E6s1Uskv5XU=",
+			"path": "github.com/juju/utils/clock",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "i9VUhzmNRF4jDTwpDuAgoAru+k0=",
+			"path": "github.com/juju/utils/set",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "DqG7AvEBxxWLRzQPgLM69dUpkL0=",
+			"path": "github.com/juju/version",
+			"revision": "1f41e27e54f21acccf9b2dddae063a782a8a7ceb",
+			"revisionTime": "2016-10-31T05:19:06Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		},
+		{
+			"checksumSHA1": "0nXta8PKYGYicKWeydn0A7ELKcs=",
+			"path": "github.com/lunixbochs/vtclean",
+			"revision": "4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36",
+			"revisionTime": "2016-01-25T03:51:06Z"
+		},
+		{
+			"checksumSHA1": "I4njd26dG5hxFT2nawuByM4pxzY=",
+			"path": "github.com/mattn/go-colorable",
+			"revision": "d228849504861217f796da67fae4f6e347643f15",
+			"revisionTime": "2016-11-03T16:00:40Z"
+		},
+		{
+			"checksumSHA1": "xZuhljnmBysJPta/lMyYmJdujCg=",
+			"path": "github.com/mattn/go-isatty",
+			"revision": "30a891c33c7cde7b02a981314b4228ec99380cca",
+			"revisionTime": "2016-11-23T14:36:37Z"
+		},
+		{
+			"checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=",
+			"path": "golang.org/x/crypto/pbkdf2",
+			"revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
+			"revisionTime": "2017-01-13T19:21:00Z"
+		},
+		{
+			"checksumSHA1": "TuN733jTVL7T0QVzNx6Tn6BAgrg=",
+			"path": "gopkg.in/juju/names.v2",
+			"revision": "0847c26d322a121e52614f969fb82eae2820c715",
+			"revisionTime": "2016-11-02T13:43:03Z"
+		},
+		{
+			"checksumSHA1": "YsB2DChSV9HxdzHaKATllAUKWSI=",
+			"path": "gopkg.in/mgo.v2/bson",
+			"revision": "3f83fa5005286a7fe593b055f0d7771a7dce4655",
+			"revisionTime": "2016-08-18T02:01:20Z"
+		},
+		{
+			"checksumSHA1": "XQsrqoNT1U0KzLxOFcAZVvqhLfk=",
+			"path": "gopkg.in/mgo.v2/internal/json",
+			"revision": "3f83fa5005286a7fe593b055f0d7771a7dce4655",
+			"revisionTime": "2016-08-18T02:01:20Z"
+		},
+		{
+			"checksumSHA1": "12GqsW8PiRPnezDDy0v4brZrndM=",
+			"path": "gopkg.in/yaml.v2",
+			"revision": "a5b47d31c556af34a302ce5d659e6fea44d90de0",
+			"revisionTime": "2016-09-28T15:37:09Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.org/maas/automation"
+}
diff --git a/build.gradle b/build.gradle
index 4af4504..bd559e3 100644
--- a/build.gradle
+++ b/build.gradle
@@ -136,12 +136,13 @@
 }
 
 task buildSwitchqImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-maas-switchq', './switchq'
+    workingDir 'switchq'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagSwitchqImage(type: Exec) {
    dependsOn buildSwitchqImage
-   commandLine "docker", 'tag', 'cord-maas-switchq', "$targetReg/cord-maas-switchq:$targetTag"
+   commandLine "docker", 'tag', 'cord-maas-switchq:candidate', "$targetReg/cord-maas-switchq:$targetTag"
 }
 
 task publishSwitchqImage(type: Exec) {
@@ -152,12 +153,13 @@
 // IP Allocator Image
 
 task buildAllocationImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-ip-allocator', './ip-allocator'
+    workingDir 'ip-allocator'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagAllocationImage(type: Exec) {
    dependsOn buildAllocationImage
-   commandLine "docker", 'tag', 'cord-ip-allocator', "$targetReg/cord-ip-allocator:$targetTag"
+   commandLine "docker", 'tag', 'cord-ip-allocator:candidate', "$targetReg/cord-ip-allocator:$targetTag"
 }
 
 task publishAllocationImage(type: Exec) {
@@ -168,12 +170,13 @@
 // Provisioner Image
 
 task buildProvisionerImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'cord-provisioner', './provisioner'
+    workingDir 'provisioner'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagProvisionerImage(type: Exec) {
    dependsOn buildProvisionerImage
-   commandLine "docker", 'tag', 'cord-provisioner', "$targetReg/cord-provisioner:$targetTag"
+   commandLine "docker", 'tag', 'cord-provisioner:candidate', "$targetReg/cord-provisioner:$targetTag"
 }
 
 task publishProvisionerImage(type: Exec) {
@@ -184,12 +187,13 @@
 // Config Generator Image
 
 task buildConfigGeneratorImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', 'config-generator', './config-generator'
+    workingDir 'config-generator'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagConfigGeneratorImage(type: Exec) {
    dependsOn buildConfigGeneratorImage
-   commandLine "docker", 'tag', 'config-generator', "$targetReg/config-generator:$targetTag"
+   commandLine "docker", 'tag', 'config-generator:candidate', "$targetReg/config-generator:$targetTag"
 }
 
 task publishConfigGeneratorImage(type: Exec) {
@@ -200,12 +204,13 @@
 // Automation Image
 
 task buildAutomationImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', "cord-maas-automation", "-f", "./automation/Dockerfile", "./automation"
+    workingDir 'automation'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagAutomationImage(type: Exec) {
     dependsOn buildAutomationImage
-    commandLine "docker", 'tag', 'cord-maas-automation', "$targetReg/cord-maas-automation:$targetTag"
+    commandLine "docker", 'tag', 'cord-maas-automation:candidate', "$targetReg/cord-maas-automation:$targetTag"
 }
 
 task publishAutomationImage(type: Exec) {
@@ -216,12 +221,13 @@
 // DHCP Harvester Images
 
 task buildHarvesterImage(type: Exec) {
-    commandLine "docker", 'build', '--label', 'org.label-schema.build-date=' + getBuildTimestamp(), '--label', 'org.label-schema.vcs-ref=' + getCommitHash(), '--label', 'org.label-schema.vcs-ref-date=' + getCommitDate(), '--label', 'org.label-schema.version=' + getBranchName(), '-t', "cord-dhcp-harvester", "./harvester"
+    workingDir 'harvester'
+    commandLine 'make', 'build', 'package'
 }
 
 task tagHarvesterImage(type: Exec) {
     dependsOn buildHarvesterImage
-    commandLine "docker", 'tag', 'cord-dhcp-harvester', "$targetReg/cord-dhcp-harvester:$targetTag"
+    commandLine "docker", 'tag', 'cord-dhcp-harvester:candidate', "$targetReg/cord-dhcp-harvester:$targetTag"
 }
 
 task publishHarvesterImage(type: Exec) {
@@ -344,7 +350,7 @@
             .p(config.seedServer.virtualbox_support, "virtualbox_support")
             .p(config.seedServer.power_helper_user, "power_helper_user")
             .p(config.seedServer.power_helper_host, "power_helper_host")
-            .p(config.seedServer.port, "ansible_ssh_port")
+            .p(config.seedServer.ansible_ssh_port, "ansible_ssh_port")
     }
 
     if (config.passwords) {
@@ -405,7 +411,7 @@
             .p(config.seedServer.virtualbox_support, "virtualbox_support")
             .p(config.seedServer.power_helper_user, "power_helper_user")
             .p(config.seedServer.power_helper_host, "power_helper_host")
-            .p(config.seedServer.port, "ansible_ssh_port")
+            .p(config.seedServer.ansible_ssh_port, "ansible_ssh_port")
     }
 
     if (config.passwords) {
diff --git a/config-generator/Dockerfile b/config-generator/Dockerfile
index 917c8ab..58dc80a 100644
--- a/config-generator/Dockerfile
+++ b/config-generator/Dockerfile
@@ -11,27 +11,17 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM golang:alpine
-
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-RUN apk --update add git
+RUN mkdir /service
 
-# install godep
 WORKDIR /go
-RUN go get github.com/tools/godep
 ADD . /go/src/gerrit.opencord.org/maas/config-generator
-
-# restore dependancies
-WORKDIR /go/src/gerrit.opencord.org/maas/config-generator
-RUN /go/bin/godep restore || true
-
-# generate the binary
-WORKDIR /go/
-RUN go install gerrit.opencord.org/maas/config-generator
+RUN go build -o /service/entry-point  gerrit.opencord.org/maas/config-generator
 
 # copy templates to the image
-COPY netconfig.tpl /go/
+COPY netconfig.tpl /service/netconfig.tpl
 
 EXPOSE 1337
 
@@ -41,4 +31,5 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/config-generator"]
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/config-generator/Dockerfile.release b/config-generator/Dockerfile.release
new file mode 100644
index 0000000..fcfdabc
--- /dev/null
+++ b/config-generator/Dockerfile.release
@@ -0,0 +1,32 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+RUN mkdir /service
+ADD entry-point /service/entry-point
+
+# copy templates to the image
+COPY netconfig.tpl /service
+
+EXPOSE 1337
+
+LABEL org.label-schema.name="generator" \
+      org.label-schema.description="Provides generation of the fabric configuration" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/config-generator/Godeps/Godeps.json b/config-generator/Godeps/Godeps.json
deleted file mode 100644
index e7fbfc7..0000000
--- a/config-generator/Godeps/Godeps.json
+++ /dev/null
@@ -1,27 +0,0 @@
-{
-	"ImportPath": "gerrit.opencord.org/maas/config-generator",
-	"GoVersion": "go1.7",
-	"GodepVersion": "v75",
-	"Deps": [
-		{
-			"ImportPath": "github.com/Sirupsen/logrus",
-			"Comment": "v0.10.0-19-gf3cfb45",
-			"Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-		},
-		{
-			"ImportPath": "github.com/gorilla/context",
-			"Comment": "v1.1-4-gaed02d1",
-			"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
-		},
-		{
-			"ImportPath": "github.com/gorilla/mux",
-			"Comment": "v1.1-9-gbd09be0",
-			"Rev": "bd09be08ed4377796d312df0a45314e11b8f5dc1"
-		},
-		{
-			"ImportPath": "github.com/kelseyhightower/envconfig",
-			"Comment": "1.1.0-17-g91921eb",
-			"Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-		}
-	]
-}
diff --git a/config-generator/Godeps/Readme b/config-generator/Godeps/Readme
deleted file mode 100644
index 4cdaa53..0000000
--- a/config-generator/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/config-generator/Makefile b/config-generator/Makefile
index f06879f..0b2bde8 100644
--- a/config-generator/Makefile
+++ b/config-generator/Makefile
@@ -1,35 +1,38 @@
+IMAGE_NAME=config-generator
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
 
-default: image
+.PHONY: help
+help:
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
+	@echo "help      - this message"
 
-ansible:
-	ansible-playbook ./setup.yml -v -vvvv -u gunjan -i ./host.yml --ask-pass
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
 
-get:
-	goimports -w .
-	go get -t -d -v ./...
-
-src:
-	docker run --rm -it -v "$GOPATH":/gopath -v "$(pwd)":/app -e "GOPATH=/gopath" -w /app golang:1.6.1 sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o app'
-
-fmt:
-	gofmt -w .
-	#TODO: go lint, go vet
-test:
-	go test -v -race ./...
-	go test -cover -v ./...
-
-image:
-	docker build -t config-gen .
-
-run:
-	docker run --rm -p 1337:1337 -it config-gen
-
+.PHONY: build
 build:
-	GOOS=linux GOARCH=amd64 go build -o app
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
 
-depsave:
-	godep save
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release netconfig.tpl $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
 
-depupdate:
-	go get -t -v ./...
-	godep update
+.PHONY: clean
+clean:
+	@echo ""
diff --git a/config-generator/configGen.go b/config-generator/configGen.go
index c23ce53..063e46e 100644
--- a/config-generator/configGen.go
+++ b/config-generator/configGen.go
@@ -14,8 +14,10 @@
 package main
 
 import (
+	"flag"
 	"fmt"
 	"net/http"
+	"os"
 
 	"github.com/Sirupsen/logrus"
 
@@ -23,29 +25,43 @@
 	"github.com/kelseyhightower/envconfig"
 )
 
+const appName = "CONFIGGEN"
+
 type Config struct {
-	Port       int    `default:"1337"`
-	Listen     string `default:"0.0.0.0"`
-	Controller string `default:"http://%s:%s@127.0.0.1:8181"`
-	Username   string `default:"karaf"`
-	Password   string `default:"karaf"`
-	LogLevel   string `default:"warning" envconfig:"LOG_LEVEL"`
-	LogFormat  string `default:"text" envconfig:"LOG_FORMAT"`
+	Port       int    `default:"1337" desc:"port on which to listen for requests"`
+	Listen     string `default:"0.0.0.0" desc:"IP address on which to listen for requests"`
+	Controller string `default:"http://%s:%s@127.0.0.1:8181" desc:"connection string with which to connect to ONOS"`
+	Username   string `default:"karaf" desc:"username with which to connect to ONOS"`
+	Password   string `default:"karaf" desc:"password with which to connect to ONOS"`
+	LogLevel   string `default:"warning" envconfig:"LOG_LEVEL" desc:"detail level for logging"`
+	LogFormat  string `default:"text" envconfig:"LOG_FORMAT" desc:"log output format, text or json"`
 
 	connect string
 }
 
-var c Config
 var log = logrus.New()
+var appFlags = flag.NewFlagSet("", flag.ContinueOnError)
 
 func main() {
 
-	err := envconfig.Process("CONFIGGEN", &c)
+	config := Config{}
+	appFlags.Usage = func() {
+		envconfig.Usage(appName, &config)
+	}
+	if err := appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+
+	err := envconfig.Process("CONFIGGEN", &config)
 	if err != nil {
 		log.Fatalf("[ERROR] Unable to parse configuration options : %s", err)
 	}
 
-	switch c.LogFormat {
+	switch config.LogFormat {
 	case "json":
 		log.Formatter = &logrus.JSONFormatter{}
 	default:
@@ -55,7 +71,7 @@
 		}
 	}
 
-	level, err := logrus.ParseLevel(c.LogLevel)
+	level, err := logrus.ParseLevel(config.LogLevel)
 	if err != nil {
 		level = logrus.WarnLevel
 	}
@@ -69,15 +85,15 @@
         PASSWORD:   %s
         LOG_LEVEL:  %s
         LOG_FORMAT: %s`,
-		c.Listen, c.Port, c.Controller,
-		c.Username, c.Password,
-		c.LogLevel, c.LogFormat)
+		config.Listen, config.Port, config.Controller,
+		config.Username, config.Password,
+		config.LogLevel, config.LogFormat)
 
 	router := mux.NewRouter()
-	router.HandleFunc("/config/", c.configGenHandler).Methods("POST")
+	router.HandleFunc("/config/", config.configGenHandler).Methods("POST")
 	http.Handle("/", router)
 
-	c.connect = fmt.Sprintf(c.Controller, c.Username, c.Password)
+	config.connect = fmt.Sprintf(config.Controller, config.Username, config.Password)
 
-	panic(http.ListenAndServe(fmt.Sprintf(":%d", c.Port), nil))
+	panic(http.ListenAndServe(fmt.Sprintf(":%d", config.Port), nil))
 }
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/.gitignore b/config-generator/vendor/github.com/Sirupsen/logrus/.gitignore
deleted file mode 100644
index 66be63a..0000000
--- a/config-generator/vendor/github.com/Sirupsen/logrus/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-logrus
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/.travis.yml b/config-generator/vendor/github.com/Sirupsen/logrus/.travis.yml
deleted file mode 100644
index dee4eb2..0000000
--- a/config-generator/vendor/github.com/Sirupsen/logrus/.travis.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-language: go
-go:
-  - 1.3
-  - 1.4
-  - 1.5
-  - 1.6
-  - tip
-install:
-  - go get -t ./...
-script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./...
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/README.md b/config-generator/vendor/github.com/Sirupsen/logrus/README.md
index 6f04c8a..206c746 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/README.md
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/README.md
@@ -1,5 +1,11 @@
 # Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
 
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
 Logrus is a structured logger for Go (golang), completely API compatible with
 the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
 yet stable (pre 1.0). Logrus itself is completely stable and has been used in
@@ -81,8 +87,8 @@
   // Log as JSON instead of the default ASCII formatter.
   log.SetFormatter(&log.JSONFormatter{})
 
-  // Output to stderr instead of stdout, could also be a file.
-  log.SetOutput(os.Stderr)
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
 
   // Only log the warning severity or above.
   log.SetLevel(log.WarnLevel)
@@ -228,7 +234,14 @@
 | [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
 | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
 | [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
 | [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
 
 #### Level logging
 
@@ -306,14 +319,10 @@
     field to `true`.  To force no colored output even if there is a TTY  set the
     `DisableColors` field to `true`
 * `logrus.JSONFormatter`. Logs fields as JSON.
-* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
-
-    ```go
-      logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
-    ```
 
 Third party logging formatters:
 
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
 * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
 * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
 
@@ -369,6 +378,7 @@
 | Tool | Description |
 | ---- | ----------- |
 |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
 
 #### Testing
 
@@ -388,3 +398,36 @@
 hook.Reset()
 assert.Nil(hook.LastEntry())
 ```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/alt_exit.go b/config-generator/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/entry.go b/config-generator/vendor/github.com/Sirupsen/logrus/entry.go
index 89e966e..4edbe7a 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/entry.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/entry.go
@@ -3,11 +3,21 @@
 import (
 	"bytes"
 	"fmt"
-	"io"
 	"os"
+	"sync"
 	"time"
 )
 
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
 // Defines the key when adding errors using WithError.
 var ErrorKey = "error"
 
@@ -29,6 +39,9 @@
 
 	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
 	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
 }
 
 func NewEntry(logger *Logger) *Entry {
@@ -39,21 +52,15 @@
 	}
 }
 
-// Returns a reader for the entry, which is a proxy to the formatter.
-func (entry *Entry) Reader() (*bytes.Buffer, error) {
-	serialized, err := entry.Logger.Formatter.Format(entry)
-	return bytes.NewBuffer(serialized), err
-}
-
 // Returns the string representation from the reader and ultimately the
 // formatter.
 func (entry *Entry) String() (string, error) {
-	reader, err := entry.Reader()
+	serialized, err := entry.Logger.Formatter.Format(entry)
 	if err != nil {
 		return "", err
 	}
-
-	return reader.String(), err
+	str := string(serialized)
+	return str, nil
 }
 
 // Add an error as single field (using the key defined in ErrorKey) to the Entry.
@@ -81,6 +88,7 @@
 // This function is not declared with a pointer value because otherwise
 // race conditions will occur when using multiple goroutines
 func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
 	entry.Time = time.Now()
 	entry.Level = level
 	entry.Message = msg
@@ -90,20 +98,23 @@
 		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
 		entry.Logger.mu.Unlock()
 	}
-
-	reader, err := entry.Reader()
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
 	if err != nil {
 		entry.Logger.mu.Lock()
 		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
 		entry.Logger.mu.Unlock()
-	}
-
-	entry.Logger.mu.Lock()
-	defer entry.Logger.mu.Unlock()
-
-	_, err = io.Copy(entry.Logger.Out, reader)
-	if err != nil {
-		fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
 	}
 
 	// To avoid Entry#log() returning a value that only would make sense for
@@ -150,7 +161,7 @@
 	if entry.Logger.Level >= FatalLevel {
 		entry.log(FatalLevel, fmt.Sprint(args...))
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (entry *Entry) Panic(args ...interface{}) {
@@ -198,7 +209,7 @@
 	if entry.Logger.Level >= FatalLevel {
 		entry.Fatal(fmt.Sprintf(format, args...))
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (entry *Entry) Panicf(format string, args ...interface{}) {
@@ -245,7 +256,7 @@
 	if entry.Logger.Level >= FatalLevel {
 		entry.Fatal(entry.sprintlnn(args...))
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (entry *Entry) Panicln(args ...interface{}) {
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/json_formatter.go b/config-generator/vendor/github.com/Sirupsen/logrus/json_formatter.go
index 2ad6dc5..266554e 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -5,9 +5,40 @@
 	"fmt"
 )
 
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
 type JSONFormatter struct {
 	// TimestampFormat sets the format used for marshaling timestamps.
 	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
 }
 
 func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
@@ -29,9 +60,11 @@
 		timestampFormat = DefaultTimestampFormat
 	}
 
-	data["time"] = entry.Time.Format(timestampFormat)
-	data["msg"] = entry.Message
-	data["level"] = entry.Level.String()
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
 
 	serialized, err := json.Marshal(data)
 	if err != nil {
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/logger.go b/config-generator/vendor/github.com/Sirupsen/logrus/logger.go
index 2fdb231..b769f3d 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/logger.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/logger.go
@@ -26,8 +26,31 @@
 	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
 	// logged. `logrus.Debug` is useful in
 	Level Level
-	// Used to sync writing to the log.
-	mu sync.Mutex
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
 }
 
 // Creates a new logger. Configuration should be set by changing `Formatter`,
@@ -51,162 +74,235 @@
 	}
 }
 
-// Adds a field to the log entry, note that you it doesn't log until you call
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
 // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
 // If you want multiple fields, use `WithFields`.
 func (logger *Logger) WithField(key string, value interface{}) *Entry {
-	return NewEntry(logger).WithField(key, value)
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
 }
 
 // Adds a struct of fields to the log entry. All it does is call `WithField` for
 // each `Field`.
 func (logger *Logger) WithFields(fields Fields) *Entry {
-	return NewEntry(logger).WithFields(fields)
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
 }
 
 // Add an error as single field to the log entry.  All it does is call
 // `WithError` for the given `error`.
 func (logger *Logger) WithError(err error) *Entry {
-	return NewEntry(logger).WithError(err)
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
 }
 
 func (logger *Logger) Debugf(format string, args ...interface{}) {
 	if logger.Level >= DebugLevel {
-		NewEntry(logger).Debugf(format, args...)
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Infof(format string, args ...interface{}) {
 	if logger.Level >= InfoLevel {
-		NewEntry(logger).Infof(format, args...)
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Printf(format string, args ...interface{}) {
-	NewEntry(logger).Printf(format, args...)
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
 }
 
 func (logger *Logger) Warnf(format string, args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warnf(format, args...)
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Warningf(format string, args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warnf(format, args...)
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Errorf(format string, args ...interface{}) {
 	if logger.Level >= ErrorLevel {
-		NewEntry(logger).Errorf(format, args...)
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Fatalf(format string, args ...interface{}) {
 	if logger.Level >= FatalLevel {
-		NewEntry(logger).Fatalf(format, args...)
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (logger *Logger) Panicf(format string, args ...interface{}) {
 	if logger.Level >= PanicLevel {
-		NewEntry(logger).Panicf(format, args...)
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Debug(args ...interface{}) {
 	if logger.Level >= DebugLevel {
-		NewEntry(logger).Debug(args...)
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Info(args ...interface{}) {
 	if logger.Level >= InfoLevel {
-		NewEntry(logger).Info(args...)
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Print(args ...interface{}) {
-	NewEntry(logger).Info(args...)
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
 }
 
 func (logger *Logger) Warn(args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warn(args...)
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Warning(args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warn(args...)
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Error(args ...interface{}) {
 	if logger.Level >= ErrorLevel {
-		NewEntry(logger).Error(args...)
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Fatal(args ...interface{}) {
 	if logger.Level >= FatalLevel {
-		NewEntry(logger).Fatal(args...)
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (logger *Logger) Panic(args ...interface{}) {
 	if logger.Level >= PanicLevel {
-		NewEntry(logger).Panic(args...)
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Debugln(args ...interface{}) {
 	if logger.Level >= DebugLevel {
-		NewEntry(logger).Debugln(args...)
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Infoln(args ...interface{}) {
 	if logger.Level >= InfoLevel {
-		NewEntry(logger).Infoln(args...)
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Println(args ...interface{}) {
-	NewEntry(logger).Println(args...)
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
 }
 
 func (logger *Logger) Warnln(args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warnln(args...)
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Warningln(args ...interface{}) {
 	if logger.Level >= WarnLevel {
-		NewEntry(logger).Warnln(args...)
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Errorln(args ...interface{}) {
 	if logger.Level >= ErrorLevel {
-		NewEntry(logger).Errorln(args...)
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
 	}
 }
 
 func (logger *Logger) Fatalln(args ...interface{}) {
 	if logger.Level >= FatalLevel {
-		NewEntry(logger).Fatalln(args...)
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
 	}
-	os.Exit(1)
+	Exit(1)
 }
 
 func (logger *Logger) Panicln(args ...interface{}) {
 	if logger.Level >= PanicLevel {
-		NewEntry(logger).Panicln(args...)
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
 	}
 }
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
index 71f8d67..5f6be4d 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -1,4 +1,5 @@
 // +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
 
 package logrus
 
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_linux.go
index a2c0b40..308160c 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_linux.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -3,6 +3,8 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
+// +build !appengine
+
 package logrus
 
 import "syscall"
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
index b343b3a..329038f 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -4,6 +4,7 @@
 // license that can be found in the LICENSE file.
 
 // +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
 
 package logrus
 
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
index 3e70bf7..a3c6f6e 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -1,4 +1,4 @@
-// +build solaris
+// +build solaris,!appengine
 
 package logrus
 
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_windows.go
index 0146845..3727e8a 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -3,7 +3,7 @@
 // Use of this source code is governed by a BSD-style
 // license that can be found in the LICENSE file.
 
-// +build windows
+// +build windows,!appengine
 
 package logrus
 
diff --git a/config-generator/vendor/github.com/Sirupsen/logrus/text_formatter.go b/config-generator/vendor/github.com/Sirupsen/logrus/text_formatter.go
index 6afd0e0..076de5d 100644
--- a/config-generator/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ b/config-generator/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -28,10 +28,6 @@
 	isTerminal = IsTerminal()
 }
 
-func miniTS() int {
-	return int(time.Since(baseTimestamp) / time.Second)
-}
-
 type TextFormatter struct {
 	// Set to true to bypass checking for a TTY before outputting colors.
 	ForceColors bool
@@ -57,7 +53,8 @@
 }
 
 func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
-	var keys []string = make([]string, 0, len(entry.Data))
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
 	for k := range entry.Data {
 		keys = append(keys, k)
 	}
@@ -65,8 +62,11 @@
 	if !f.DisableSorting {
 		sort.Strings(keys)
 	}
-
-	b := &bytes.Buffer{}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
 
 	prefixFieldClashes(entry.Data)
 
@@ -111,14 +111,17 @@
 
 	levelText := strings.ToUpper(entry.Level.String())[0:4]
 
-	if !f.FullTimestamp {
-		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
 	} else {
 		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
 	}
 	for _, k := range keys {
 		v := entry.Data[k]
-		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
 	}
 }
 
@@ -138,7 +141,11 @@
 
 	b.WriteString(key)
 	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
 
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
 	switch value := value.(type) {
 	case string:
 		if !needsQuoting(value) {
@@ -151,11 +158,9 @@
 		if !needsQuoting(errmsg) {
 			b.WriteString(errmsg)
 		} else {
-			fmt.Fprintf(b, "%q", value)
+			fmt.Fprintf(b, "%q", errmsg)
 		}
 	default:
 		fmt.Fprint(b, value)
 	}
-
-	b.WriteByte(' ')
 }
diff --git a/config-generator/vendor/github.com/gorilla/context/.travis.yml b/config-generator/vendor/github.com/gorilla/context/.travis.yml
deleted file mode 100644
index 24882fc..0000000
--- a/config-generator/vendor/github.com/gorilla/context/.travis.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-language: go
-sudo: false
-
-matrix:
-  include:
-    - go: 1.3
-    - go: 1.4
-    - go: 1.5
-    - go: 1.6
-    - go: tip
-  allow_failures:
-    - go: tip
-
-script:
-  - go get -t -v ./...
-  - diff -u <(echo -n) <(gofmt -d .)
-  - go vet $(go list ./... | grep -v /vendor/)
-  - go test -v -race ./...
diff --git a/config-generator/vendor/github.com/gorilla/context/README.md b/config-generator/vendor/github.com/gorilla/context/README.md
deleted file mode 100644
index c60a31b..0000000
--- a/config-generator/vendor/github.com/gorilla/context/README.md
+++ /dev/null
@@ -1,7 +0,0 @@
-context
-=======
-[![Build Status](https://travis-ci.org/gorilla/context.png?branch=master)](https://travis-ci.org/gorilla/context)
-
-gorilla/context is a general purpose registry for global request variables.
-
-Read the full documentation here: http://www.gorillatoolkit.org/pkg/context
diff --git a/config-generator/vendor/github.com/gorilla/context/context.go b/config-generator/vendor/github.com/gorilla/context/context.go
deleted file mode 100644
index 81cb128..0000000
--- a/config-generator/vendor/github.com/gorilla/context/context.go
+++ /dev/null
@@ -1,143 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package context
-
-import (
-	"net/http"
-	"sync"
-	"time"
-)
-
-var (
-	mutex sync.RWMutex
-	data  = make(map[*http.Request]map[interface{}]interface{})
-	datat = make(map[*http.Request]int64)
-)
-
-// Set stores a value for a given key in a given request.
-func Set(r *http.Request, key, val interface{}) {
-	mutex.Lock()
-	if data[r] == nil {
-		data[r] = make(map[interface{}]interface{})
-		datat[r] = time.Now().Unix()
-	}
-	data[r][key] = val
-	mutex.Unlock()
-}
-
-// Get returns a value stored for a given key in a given request.
-func Get(r *http.Request, key interface{}) interface{} {
-	mutex.RLock()
-	if ctx := data[r]; ctx != nil {
-		value := ctx[key]
-		mutex.RUnlock()
-		return value
-	}
-	mutex.RUnlock()
-	return nil
-}
-
-// GetOk returns stored value and presence state like multi-value return of map access.
-func GetOk(r *http.Request, key interface{}) (interface{}, bool) {
-	mutex.RLock()
-	if _, ok := data[r]; ok {
-		value, ok := data[r][key]
-		mutex.RUnlock()
-		return value, ok
-	}
-	mutex.RUnlock()
-	return nil, false
-}
-
-// GetAll returns all stored values for the request as a map. Nil is returned for invalid requests.
-func GetAll(r *http.Request) map[interface{}]interface{} {
-	mutex.RLock()
-	if context, ok := data[r]; ok {
-		result := make(map[interface{}]interface{}, len(context))
-		for k, v := range context {
-			result[k] = v
-		}
-		mutex.RUnlock()
-		return result
-	}
-	mutex.RUnlock()
-	return nil
-}
-
-// GetAllOk returns all stored values for the request as a map and a boolean value that indicates if
-// the request was registered.
-func GetAllOk(r *http.Request) (map[interface{}]interface{}, bool) {
-	mutex.RLock()
-	context, ok := data[r]
-	result := make(map[interface{}]interface{}, len(context))
-	for k, v := range context {
-		result[k] = v
-	}
-	mutex.RUnlock()
-	return result, ok
-}
-
-// Delete removes a value stored for a given key in a given request.
-func Delete(r *http.Request, key interface{}) {
-	mutex.Lock()
-	if data[r] != nil {
-		delete(data[r], key)
-	}
-	mutex.Unlock()
-}
-
-// Clear removes all values stored for a given request.
-//
-// This is usually called by a handler wrapper to clean up request
-// variables at the end of a request lifetime. See ClearHandler().
-func Clear(r *http.Request) {
-	mutex.Lock()
-	clear(r)
-	mutex.Unlock()
-}
-
-// clear is Clear without the lock.
-func clear(r *http.Request) {
-	delete(data, r)
-	delete(datat, r)
-}
-
-// Purge removes request data stored for longer than maxAge, in seconds.
-// It returns the amount of requests removed.
-//
-// If maxAge <= 0, all request data is removed.
-//
-// This is only used for sanity check: in case context cleaning was not
-// properly set some request data can be kept forever, consuming an increasing
-// amount of memory. In case this is detected, Purge() must be called
-// periodically until the problem is fixed.
-func Purge(maxAge int) int {
-	mutex.Lock()
-	count := 0
-	if maxAge <= 0 {
-		count = len(data)
-		data = make(map[*http.Request]map[interface{}]interface{})
-		datat = make(map[*http.Request]int64)
-	} else {
-		min := time.Now().Unix() - int64(maxAge)
-		for r := range data {
-			if datat[r] < min {
-				clear(r)
-				count++
-			}
-		}
-	}
-	mutex.Unlock()
-	return count
-}
-
-// ClearHandler wraps an http.Handler and clears request values at the end
-// of a request lifetime.
-func ClearHandler(h http.Handler) http.Handler {
-	return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
-		defer Clear(r)
-		h.ServeHTTP(w, r)
-	})
-}
diff --git a/config-generator/vendor/github.com/gorilla/context/doc.go b/config-generator/vendor/github.com/gorilla/context/doc.go
deleted file mode 100644
index 73c7400..0000000
--- a/config-generator/vendor/github.com/gorilla/context/doc.go
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2012 The Gorilla Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package context stores values shared during a request lifetime.
-
-For example, a router can set variables extracted from the URL and later
-application handlers can access those values, or it can be used to store
-sessions values to be saved at the end of a request. There are several
-others common uses.
-
-The idea was posted by Brad Fitzpatrick to the go-nuts mailing list:
-
-	http://groups.google.com/group/golang-nuts/msg/e2d679d303aa5d53
-
-Here's the basic usage: first define the keys that you will need. The key
-type is interface{} so a key can be of any type that supports equality.
-Here we define a key using a custom int type to avoid name collisions:
-
-	package foo
-
-	import (
-		"github.com/gorilla/context"
-	)
-
-	type key int
-
-	const MyKey key = 0
-
-Then set a variable. Variables are bound to an http.Request object, so you
-need a request instance to set a value:
-
-	context.Set(r, MyKey, "bar")
-
-The application can later access the variable using the same key you provided:
-
-	func MyHandler(w http.ResponseWriter, r *http.Request) {
-		// val is "bar".
-		val := context.Get(r, foo.MyKey)
-
-		// returns ("bar", true)
-		val, ok := context.GetOk(r, foo.MyKey)
-		// ...
-	}
-
-And that's all about the basic usage. We discuss some other ideas below.
-
-Any type can be stored in the context. To enforce a given type, make the key
-private and wrap Get() and Set() to accept and return values of a specific
-type:
-
-	type key int
-
-	const mykey key = 0
-
-	// GetMyKey returns a value for this package from the request values.
-	func GetMyKey(r *http.Request) SomeType {
-		if rv := context.Get(r, mykey); rv != nil {
-			return rv.(SomeType)
-		}
-		return nil
-	}
-
-	// SetMyKey sets a value for this package in the request values.
-	func SetMyKey(r *http.Request, val SomeType) {
-		context.Set(r, mykey, val)
-	}
-
-Variables must be cleared at the end of a request, to remove all values
-that were stored. This can be done in an http.Handler, after a request was
-served. Just call Clear() passing the request:
-
-	context.Clear(r)
-
-...or use ClearHandler(), which conveniently wraps an http.Handler to clear
-variables at the end of a request lifetime.
-
-The Routers from the packages gorilla/mux and gorilla/pat call Clear()
-so if you are using either of them you don't need to clear the context manually.
-*/
-package context
diff --git a/config-generator/vendor/github.com/gorilla/mux/.travis.yml b/config-generator/vendor/github.com/gorilla/mux/.travis.yml
deleted file mode 100644
index f4084bd..0000000
--- a/config-generator/vendor/github.com/gorilla/mux/.travis.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-language: go
-sudo: false
-
-matrix:
-  include:
-    - go: 1.2
-    - go: 1.3
-    - go: 1.4
-    - go: 1.5
-    - go: 1.6
-    - go: tip
-
-script:
-  - go get -t -v ./...
-  - diff -u <(echo -n) <(gofmt -d .)
-  - go tool vet .
-  - go test -v -race ./...
diff --git a/config-generator/vendor/github.com/gorilla/mux/README.md b/config-generator/vendor/github.com/gorilla/mux/README.md
index 960ef7c..94d396c 100644
--- a/config-generator/vendor/github.com/gorilla/mux/README.md
+++ b/config-generator/vendor/github.com/gorilla/mux/README.md
@@ -1,19 +1,44 @@
-mux
+gorilla/mux
 ===
 [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
 [![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
 
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
 http://www.gorillatoolkit.org/pkg/mux
 
-Package `gorilla/mux` implements a request router and dispatcher.
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
 
 The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
 
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
 * Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
 * URL hosts and paths can have variables with an optional regular expression.
 * Registered URLs can be built, or "reversed", which helps maintaining references to resources.
 * Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
-* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
 
 Let's start registering a couple of URL paths and handlers:
 
@@ -41,12 +66,17 @@
 The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
 
 ```go
-vars := mux.Vars(request)
-category := vars["category"]
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
 ```
 
 And this is all you need to know about the basic usage. More advanced options are explained below.
 
+### Matching Routes
+
 Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
 
 ```go
@@ -118,7 +148,7 @@
 ```go
 s.HandleFunc("/products/", ProductsHandler)
 s.HandleFunc("/products/{key}", ProductHandler)
-s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
 ```
 
 The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
@@ -138,6 +168,73 @@
 s.HandleFunc("/{key}/details", ProductDetailsHandler)
 ```
 
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
 Now let's see how to build registered URLs.
 
 Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
diff --git a/config-generator/vendor/github.com/gorilla/mux/context_gorilla.go b/config-generator/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/config-generator/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/config-generator/vendor/github.com/gorilla/mux/context_native.go b/config-generator/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/config-generator/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/config-generator/vendor/github.com/gorilla/mux/doc.go b/config-generator/vendor/github.com/gorilla/mux/doc.go
index 835f534..00daf4a 100644
--- a/config-generator/vendor/github.com/gorilla/mux/doc.go
+++ b/config-generator/vendor/github.com/gorilla/mux/doc.go
@@ -47,12 +47,21 @@
 	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
 	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
 
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
 The names are used to create a map of route variables which can be retrieved
 calling mux.Vars():
 
 	vars := mux.Vars(request)
 	category := vars["category"]
 
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
 And this is all you need to know about the basic usage. More advanced options
 are explained below.
 
@@ -136,6 +145,31 @@
 	// "/products/{key}/details"
 	s.HandleFunc("/{key}/details", ProductDetailsHandler)
 
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
 Now let's see how to build registered URLs.
 
 Routes can be named. All routes that define a name can have their URLs built,
diff --git a/config-generator/vendor/github.com/gorilla/mux/mux.go b/config-generator/vendor/github.com/gorilla/mux/mux.go
index 94f5ddd..d66ec38 100644
--- a/config-generator/vendor/github.com/gorilla/mux/mux.go
+++ b/config-generator/vendor/github.com/gorilla/mux/mux.go
@@ -10,8 +10,7 @@
 	"net/http"
 	"path"
 	"regexp"
-
-	"github.com/gorilla/context"
+	"strings"
 )
 
 // NewRouter returns a new router instance.
@@ -50,8 +49,12 @@
 	strictSlash bool
 	// See Router.SkipClean(). This defines the flag for new routes.
 	skipClean bool
-	// If true, do not clear the request context after handling the request
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
 	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
 }
 
 // Match matches registered routes against the request.
@@ -76,8 +79,12 @@
 // mux.Vars(request).
 func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
 	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
 		// Clean path to canonical form and redirect.
-		if p := cleanPath(req.URL.Path); p != req.URL.Path {
+		if p := cleanPath(path); p != path {
 
 			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
 			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
@@ -95,14 +102,14 @@
 	var handler http.Handler
 	if r.Match(req, &match) {
 		handler = match.Handler
-		setVars(req, match.Vars)
-		setCurrentRoute(req, match.Route)
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
 	}
 	if handler == nil {
 		handler = http.NotFoundHandler()
 	}
 	if !r.KeepContext {
-		defer context.Clear(req)
+		defer contextClear(req)
 	}
 	handler.ServeHTTP(w, req)
 }
@@ -150,6 +157,21 @@
 	return r
 }
 
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
 // ----------------------------------------------------------------------------
 // parentRoute
 // ----------------------------------------------------------------------------
@@ -187,7 +209,7 @@
 
 // NewRoute registers an empty route.
 func (r *Router) NewRoute() *Route {
-	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean}
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
 	r.routes = append(r.routes, route)
 	return route
 }
@@ -285,6 +307,9 @@
 		if err == SkipRouter {
 			continue
 		}
+		if err != nil {
+			return err
+		}
 		for _, sr := range t.matchers {
 			if h, ok := sr.(*Router); ok {
 				err := h.walk(walkFn, ancestors)
@@ -325,7 +350,7 @@
 
 // Vars returns the route variables for the current request, if any.
 func Vars(r *http.Request) map[string]string {
-	if rv := context.Get(r, varsKey); rv != nil {
+	if rv := contextGet(r, varsKey); rv != nil {
 		return rv.(map[string]string)
 	}
 	return nil
@@ -337,28 +362,46 @@
 // after the handler returns, unless the KeepContext option is set on the
 // Router.
 func CurrentRoute(r *http.Request) *Route {
-	if rv := context.Get(r, routeKey); rv != nil {
+	if rv := contextGet(r, routeKey); rv != nil {
 		return rv.(*Route)
 	}
 	return nil
 }
 
-func setVars(r *http.Request, val interface{}) {
-	if val != nil {
-		context.Set(r, varsKey, val)
-	}
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
 }
 
-func setCurrentRoute(r *http.Request, val interface{}) {
-	if val != nil {
-		context.Set(r, routeKey, val)
-	}
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
 }
 
 // ----------------------------------------------------------------------------
 // Helpers
 // ----------------------------------------------------------------------------
 
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
 // cleanPath returns the canonical path for p, eliminating . and .. elements.
 // Borrowed from the net/http package.
 func cleanPath(p string) string {
diff --git a/config-generator/vendor/github.com/gorilla/mux/regexp.go b/config-generator/vendor/github.com/gorilla/mux/regexp.go
index 08710bc..0189ad3 100644
--- a/config-generator/vendor/github.com/gorilla/mux/regexp.go
+++ b/config-generator/vendor/github.com/gorilla/mux/regexp.go
@@ -24,7 +24,7 @@
 // Previously we accepted only Python-like identifiers for variable
 // names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
 // name and pattern can't be empty, and names can't contain a colon.
-func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash bool) (*routeRegexp, error) {
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
 	// Check if it is well-formed.
 	idxs, errBraces := braceIndices(tpl)
 	if errBraces != nil {
@@ -109,16 +109,24 @@
 	if errCompile != nil {
 		return nil, errCompile
 	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
 	// Done!
 	return &routeRegexp{
-		template:    template,
-		matchHost:   matchHost,
-		matchQuery:  matchQuery,
-		strictSlash: strictSlash,
-		regexp:      reg,
-		reverse:     reverse.String(),
-		varsN:       varsN,
-		varsR:       varsR,
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
 	}, nil
 }
 
@@ -133,6 +141,9 @@
 	matchQuery bool
 	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
 	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
 	// Expanded regexp.
 	regexp *regexp.Regexp
 	// Reverse template.
@@ -149,8 +160,11 @@
 		if r.matchQuery {
 			return r.matchQueryString(req)
 		}
-
-		return r.regexp.MatchString(req.URL.Path)
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
 	}
 
 	return r.regexp.MatchString(getHost(req))
@@ -253,14 +267,18 @@
 			extractVars(host, matches, v.host.varsN, m.Vars)
 		}
 	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
 	// Store path variables.
 	if v.path != nil {
-		matches := v.path.regexp.FindStringSubmatchIndex(req.URL.Path)
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
 		if len(matches) > 0 {
-			extractVars(req.URL.Path, matches, v.path.varsN, m.Vars)
+			extractVars(path, matches, v.path.varsN, m.Vars)
 			// Check if we should redirect.
 			if v.path.strictSlash {
-				p1 := strings.HasSuffix(req.URL.Path, "/")
+				p1 := strings.HasSuffix(path, "/")
 				p2 := strings.HasSuffix(v.path.template, "/")
 				if p1 != p2 {
 					u, _ := url.Parse(req.URL.String())
@@ -299,14 +317,7 @@
 }
 
 func extractVars(input string, matches []int, names []string, output map[string]string) {
-	matchesCount := 0
-	prevEnd := -1
-	for i := 2; i < len(matches) && matchesCount < len(names); i += 2 {
-		if prevEnd < matches[i+1] {
-			value := input[matches[i]:matches[i+1]]
-			output[names[matchesCount]] = value
-			prevEnd = matches[i+1]
-			matchesCount++
-		}
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
 	}
 }
diff --git a/config-generator/vendor/github.com/gorilla/mux/route.go b/config-generator/vendor/github.com/gorilla/mux/route.go
index 6c53f9f..9221915 100644
--- a/config-generator/vendor/github.com/gorilla/mux/route.go
+++ b/config-generator/vendor/github.com/gorilla/mux/route.go
@@ -29,6 +29,8 @@
 	// If true, when the path pattern is "/path//to", accessing "/path//to"
 	// will not redirect
 	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
 	// If true, this route never matches: it is only used to build URLs.
 	buildOnly bool
 	// The name used to build URLs.
@@ -151,14 +153,14 @@
 	}
 	r.regexp = r.getRegexpGroup()
 	if !matchHost && !matchQuery {
-		if len(tpl) == 0 || tpl[0] != '/' {
+		if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
 			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
 		}
 		if r.regexp.path != nil {
 			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
 		}
 	}
-	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash)
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
 	if err != nil {
 		return err
 	}
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/.travis.yml b/config-generator/vendor/github.com/kelseyhightower/envconfig/.travis.yml
deleted file mode 100644
index e15301a..0000000
--- a/config-generator/vendor/github.com/kelseyhightower/envconfig/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-
-go:
-  - 1.4
-  - 1.5
-  - 1.6
-  - tip
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/README.md b/config-generator/vendor/github.com/kelseyhightower/envconfig/README.md
index dd516a2..09de74b 100644
--- a/config-generator/vendor/github.com/kelseyhightower/envconfig/README.md
+++ b/config-generator/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -87,20 +87,30 @@
 
 ```Go
 type Specification struct {
-    MultiWordVar string `envconfig:"multi_word_var"`
-    DefaultVar   string `default:"foobar"`
-    RequiredVar  string `required:"true"`
-    IgnoredVar   string `ignored:"true"`
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
 }
 ```
 
-Envconfig will process value for `MultiWordVar` by populating it with the
-value for `MYAPP_MULTI_WORD_VAR`.
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
 
 ```Bash
-export MYAPP_MULTI_WORD_VAR="this will be the value"
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
 
-# export MYAPP_MULTIWORDVAR="and this will not"
+# export MYAPP_MANUALOVERRIDE1="and this will not"
 ```
 
 If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
@@ -135,6 +145,7 @@
   * int8, int16, int32, int64
   * bool
   * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
 
 Embedded structs using these fields are also supported.
 
@@ -159,3 +170,6 @@
     Address IPDecoder `envconfig:"DNS_SERVER"`
 }
 ```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/env_os.go b/config-generator/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/config-generator/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/config-generator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/config-generator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/config-generator/vendor/github.com/kelseyhightower/envconfig/envconfig.go
index 1daf389..3ad5e7d 100644
--- a/config-generator/vendor/github.com/kelseyhightower/envconfig/envconfig.go
+++ b/config-generator/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -5,12 +5,13 @@
 package envconfig
 
 import (
+	"encoding"
 	"errors"
 	"fmt"
 	"reflect"
+	"regexp"
 	"strconv"
 	"strings"
-	"syscall"
 	"time"
 )
 
@@ -24,83 +25,164 @@
 	FieldName string
 	TypeName  string
 	Value     string
+	Err       error
 }
 
-// A Decoder is a type that knows how to de-serialize environment variables
-// into itself.
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
 type Decoder interface {
 	Decode(value string) error
 }
 
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
 func (e *ParseError) Error() string {
-	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s", e.KeyName, e.FieldName, e.Value, e.TypeName)
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
 }
 
 // Process populates the specified struct based on environment variables
 func Process(prefix string, spec interface{}) error {
-	s := reflect.ValueOf(spec)
+	infos, err := gatherInfo(prefix, spec)
 
-	if s.Kind() != reflect.Ptr {
-		return ErrInvalidSpecification
-	}
-	s = s.Elem()
-	if s.Kind() != reflect.Struct {
-		return ErrInvalidSpecification
-	}
-	typeOfSpec := s.Type()
-	for i := 0; i < s.NumField(); i++ {
-		f := s.Field(i)
-		if !f.CanSet() || typeOfSpec.Field(i).Tag.Get("ignored") == "true" {
-			continue
-		}
+	for _, info := range infos {
 
-		if typeOfSpec.Field(i).Anonymous && f.Kind() == reflect.Struct {
-			embeddedPtr := f.Addr().Interface()
-			if err := Process(prefix, embeddedPtr); err != nil {
-				return err
-			}
-			f.Set(reflect.ValueOf(embeddedPtr).Elem())
-		}
-
-		alt := typeOfSpec.Field(i).Tag.Get("envconfig")
-		fieldName := typeOfSpec.Field(i).Name
-		if alt != "" {
-			fieldName = alt
-		}
-		key := strings.ToUpper(fmt.Sprintf("%s_%s", prefix, fieldName))
 		// `os.Getenv` cannot differentiate between an explicitly set empty value
 		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
-		// but it is only available in go1.5 or newer.
-		value, ok := syscall.Getenv(key)
-		if !ok && alt != "" {
-			key := strings.ToUpper(fieldName)
-			value, ok = syscall.Getenv(key)
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
 		}
 
-		def := typeOfSpec.Field(i).Tag.Get("default")
+		def := info.Tags.Get("default")
 		if def != "" && !ok {
 			value = def
 		}
 
-		req := typeOfSpec.Field(i).Tag.Get("required")
+		req := info.Tags.Get("required")
 		if !ok && def == "" {
 			if req == "true" {
-				return fmt.Errorf("required key %s missing value", key)
+				return fmt.Errorf("required key %s missing value", info.Key)
 			}
 			continue
 		}
 
-		err := processField(value, f)
+		err := processField(value, info.Field)
 		if err != nil {
 			return &ParseError{
-				KeyName:   key,
-				FieldName: fieldName,
-				TypeName:  f.Type().String(),
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
 				Value:     value,
+				Err:       err,
 			}
 		}
 	}
-	return nil
+
+	return err
 }
 
 // MustProcess is the same as Process but panics if an error occurs
@@ -117,6 +199,15 @@
 	if decoder != nil {
 		return decoder.Decode(value)
 	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
 
 	if typ.Kind() == reflect.Ptr {
 		typ = typ.Elem()
@@ -179,23 +270,29 @@
 	return nil
 }
 
-func decoderFrom(field reflect.Value) Decoder {
-	if field.CanInterface() {
-		dec, ok := field.Interface().(Decoder)
-		if ok {
-			return dec
-		}
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
 	}
-
-	// also check if pointer-to-type implements Decoder,
-	// and we can get a pointer to our field
-	if field.CanAddr() {
-		field = field.Addr()
-		dec, ok := field.Interface().(Decoder)
-		if ok {
-			return dec
-		}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
 	}
+}
 
-	return nil
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
 }
diff --git a/config-generator/vendor/github.com/kelseyhightower/envconfig/usage.go b/config-generator/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/config-generator/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/config-generator/vendor/vendor.json b/config-generator/vendor/vendor.json
new file mode 100644
index 0000000..c09c103
--- /dev/null
+++ b/config-generator/vendor/vendor.json
@@ -0,0 +1,25 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "F5dR3/i70EhSIMZfeIV+H8/PtvM=",
+			"path": "github.com/gorilla/mux",
+			"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
+			"revisionTime": "2017-01-18T13:43:44Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.org/maas/config-generator"
+}
diff --git a/harvester/Dockerfile b/harvester/Dockerfile
index aa304a8..f0233ec 100644
--- a/harvester/Dockerfile
+++ b/harvester/Dockerfile
@@ -11,19 +11,15 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM golang:1.6-alpine
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-RUN apk --update add openssh-client git bind
+RUN apk --update add bind
 
-RUN go get github.com/tools/godep
-ADD . /go/src/gerrit.opencord.com/maas/harvester
-
-WORKDIR /go/src/gerrit.opencord.com/maas/harvester
-RUN /go/bin/godep restore || true
-
+RUN mkdir /service
 WORKDIR /go
-RUN go install gerrit.opencord.com/maas/harvester
+ADD . /go/src/gerrit.opencord.com/maas/harvester
+RUN go build -o /service/entry-point gerrit.opencord.com/maas/harvester
 
 LABEL org.label-schema.name="harvester" \
       org.label-schema.description="Provides DHCP havesting and insertion into DNS" \
@@ -31,4 +27,5 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/harvester"]
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/harvester/Dockerfile.release b/harvester/Dockerfile.release
new file mode 100644
index 0000000..b6d636d
--- /dev/null
+++ b/harvester/Dockerfile.release
@@ -0,0 +1,28 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+RUN apk --update add bind
+
+ADD entry-point /service/entry-point
+
+LABEL org.label-schema.name="harvester" \
+      org.label-schema.description="Provides DHCP havesting and insertion into DNS" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/harvester/Godeps/Godeps.json b/harvester/Godeps/Godeps.json
deleted file mode 100644
index bda40ad..0000000
--- a/harvester/Godeps/Godeps.json
+++ /dev/null
@@ -1,54 +0,0 @@
-{
-	"ImportPath": "gerrit.opencord.org/maas/cord-provisioner",
-	"GoVersion": "go1.6",
-	"GodepVersion": "v72",
-	"Deps": [
-		{
-			"ImportPath": "github.com/tatsushid/go-fastping",
-			"Rev": "d7bb493dee3e090e2ffb6914adddf17c1e7c026c"
-		},
-		{
-			"ImportPath": "github.com/gorilla/context",
-			"Comment": "v1.1-4-gaed02d1",
-			"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
-		},
-		{
-			"ImportPath": "github.com/gorilla/mux",
-			"Comment": "v1.1-13-g9fa818a",
-			"Rev": "9fa818a44c2bf1396a17f9d5a3c0f6dd39d2ff8e"
-		},
-		{
-			"ImportPath": "github.com/kelseyhightower/envconfig",
-			"Comment": "1.1.0-17-g91921eb",
-			"Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-		},
-		{
-			"ImportPath": "github.com/Sirupsen/logrus",
-			"Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-		},
-		{
-			"ImportPath": "golang.org/x/net/icmp",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		},
-		{
-			"ImportPath": "golang.org/x/net/internal/iana",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		},
-		{
-			"ImportPath": "golang.org/x/net/internal/netreflect",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		},
-		{
-			"ImportPath": "golang.org/x/net/internal/nettest",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		},
-		{
-			"ImportPath": "golang.org/x/net/ipv4",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		},
-		{
-			"ImportPath": "golang.org/x/net/ipv6",
-			"Rev": "88c1a61b3dd4d98651f24775ca288fac5d2544ce"
-		}
-	]
-}
diff --git a/harvester/Makefile b/harvester/Makefile
new file mode 100644
index 0000000..39e2868
--- /dev/null
+++ b/harvester/Makefile
@@ -0,0 +1,38 @@
+IMAGE_NAME=cord-dhcp-harvester
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
+
+.PHONY: help
+help:
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
+	@echo "help      - this message"
+
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
+
+.PHONY: build
+build:
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
+
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
+
+.PHONY: clean
+clean:
+	@echo ""
diff --git a/harvester/harvester.go b/harvester/harvester.go
index 7e10599..e0314e6 100644
--- a/harvester/harvester.go
+++ b/harvester/harvester.go
@@ -14,11 +14,13 @@
 package main
 
 import (
+	"flag"
 	"fmt"
 	"github.com/Sirupsen/logrus"
 	"github.com/gorilla/mux"
 	"github.com/kelseyhightower/envconfig"
 	"net/http"
+	"os"
 	"regexp"
 	"strconv"
 	"strings"
@@ -27,6 +29,8 @@
 	"time"
 )
 
+const appName = "HARVESTER"
+
 // application application configuration and internal state
 type application struct {
 	Port               int           `default:"4246" desc:"port on which the service will listen for requests"`
@@ -50,6 +54,7 @@
 	BadClientNames     []string      `default:"localhost" envconfig:"BAD_CLIENT_NAMES" desc:"list of invalid hostnames for clients"`
 	ClientNameTemplate string        `default:"UKN-{{with $x:=.HardwareAddress|print}}{{regex $x \":\" \"\"}}{{end}}" envconfig:"CLIENT_NAME_TEMPLATE" desc:"template for generated host name"`
 
+	appFlags           *flag.FlagSet      `ignored:"true"`
 	log                *logrus.Logger     `ignored:"true"`
 	interchange        sync.RWMutex       `ignored:"true"`
 	leases             map[string]*Lease  `ignored:"true"`
@@ -62,12 +67,25 @@
 }
 
 func main() {
+
 	// initialize application state
 	app := &application{
 		log:      logrus.New(),
+		appFlags: flag.NewFlagSet("", flag.ContinueOnError),
 		requests: make(chan *chan uint, 100),
 	}
 
+	app.appFlags.Usage = func() {
+		envconfig.Usage(appName, app)
+	}
+	if err := app.appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+
 	// process and validate the application configuration
 	err := envconfig.Process("HARVESTER", app)
 	if err != nil {
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/harvester/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/LICENSE b/harvester/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/README.md b/harvester/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/alt_exit.go b/harvester/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/doc.go b/harvester/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/entry.go b/harvester/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/exported.go b/harvester/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/formatter.go b/harvester/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/hooks.go b/harvester/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/json_formatter.go b/harvester/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/logger.go b/harvester/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/logrus.go b/harvester/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/harvester/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/text_formatter.go b/harvester/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/harvester/vendor/github.com/Sirupsen/logrus/writer.go b/harvester/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/harvester/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/config-generator/vendor/github.com/gorilla/context/LICENSE b/harvester/vendor/github.com/gorilla/mux/LICENSE
similarity index 100%
rename from config-generator/vendor/github.com/gorilla/context/LICENSE
rename to harvester/vendor/github.com/gorilla/mux/LICENSE
diff --git a/harvester/vendor/github.com/gorilla/mux/README.md b/harvester/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..94d396c
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,339 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", HomeHandler)
+	r.HandleFunc("/products", ProductsHandler)
+	r.HandleFunc("/articles", ArticlesHandler)
+	http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+	return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+	"net/http"
+	"log"
+	"github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+	w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+	r := mux.NewRouter()
+	// Routes consist of a path and a handler function.
+	r.HandleFunc("/", YourHandler)
+
+	// Bind to a port and pass our router in
+	log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/harvester/vendor/github.com/gorilla/mux/context_gorilla.go b/harvester/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/harvester/vendor/github.com/gorilla/mux/context_native.go b/harvester/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/harvester/vendor/github.com/gorilla/mux/doc.go b/harvester/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..00daf4a
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts and paths can have variables with an optional regular
+	  expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+*/
+package mux
diff --git a/harvester/vendor/github.com/gorilla/mux/mux.go b/harvester/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..d66ec38
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+	// Parent route, if this is a subrouter.
+	parent parentRoute
+	// Routes to be matched, in order.
+	routes []*Route
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+	// See Router.StrictSlash(). This defines the flag for new routes.
+	strictSlash bool
+	// See Router.SkipClean(). This defines the flag for new routes.
+	skipClean bool
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
+	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			return true
+		}
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		return true
+	}
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
+	}
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+	if !r.KeepContext {
+		defer contextClear(req)
+	}
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+	if r.namedRoutes == nil {
+		if r.parent != nil {
+			r.namedRoutes = r.parent.getNamedRoutes()
+		} else {
+			r.namedRoutes = make(map[string]*Route)
+		}
+	}
+	return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+	if r.parent != nil {
+		return r.parent.getRegexpGroup()
+	}
+	return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+			continue
+		}
+
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := contextGet(r, varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := contextGet(r, routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
diff --git a/harvester/vendor/github.com/gorilla/mux/regexp.go b/harvester/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0189ad3
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if matchQuery {
+		defaultPattern = "[^?&]*"
+	} else if matchHost {
+		defaultPattern = "[^.]+"
+		matchPrefix = false
+	}
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
+		strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if matchQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if !matchPrefix {
+		pattern.WriteByte('$')
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// True for host match, false for path or query string match.
+	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
+	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
+	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if !r.matchHost {
+		if r.matchQuery {
+			return r.matchQueryString(req)
+		}
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
+	}
+
+	return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if !r.matchQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	for key, vals := range req.URL.Query() {
+		if key == templateKey && len(vals) > 0 {
+			return key + "=" + vals[0]
+		}
+	}
+	return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), 301)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/harvester/vendor/github.com/gorilla/mux/route.go b/harvester/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..9221915
--- /dev/null
+++ b/harvester/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Parent where the route was registered (a Router).
+	parent parentRoute
+	// Request handler for the route.
+	handler http.Handler
+	// List of matchers.
+	matchers []matcher
+	// Manager for the variables from host and path.
+	regexp *routeRegexpGroup
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			return false
+		}
+	}
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+	// Set variables.
+	if r.regexp != nil {
+		r.regexp.setMatch(req, match, r)
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.getNamedRoutes()[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+	if r.err != nil {
+		return r.err
+	}
+	r.regexp = r.getRegexpGroup()
+	if !matchHost && !matchQuery {
+		if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if matchHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	r.buildVarsFunc = f
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	router := &Router{parent: r, strictSlash: r.strictSlash}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.Host("{subdomain}.domain.com").
+//       HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil {
+		return nil, errors.New("mux: route doesn't have a host or path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	if r.regexp.host != nil {
+		// Set a default scheme.
+		scheme = "http"
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	return &url.URL{
+		Scheme: scheme,
+		Host:   host,
+		Path:   path,
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+	getNamedRoutes() map[string]*Route
+	getRegexpGroup() *routeRegexpGroup
+	buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+	if r.parent == nil {
+		// During tests router is not always set.
+		r.parent = NewRouter()
+	}
+	return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+	if r.regexp == nil {
+		if r.parent == nil {
+			// During tests router is not always set.
+			r.parent = NewRouter()
+		}
+		regexp := r.parent.getRegexpGroup()
+		if regexp == nil {
+			r.regexp = new(routeRegexpGroup)
+		} else {
+			// Copy.
+			r.regexp = &routeRegexpGroup{
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
+			}
+		}
+	}
+	return r.regexp
+}
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/LICENSE b/harvester/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/harvester/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/README.md b/harvester/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/doc.go b/harvester/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/env_os.go b/harvester/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/harvester/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/harvester/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/harvester/vendor/github.com/kelseyhightower/envconfig/usage.go b/harvester/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/harvester/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/harvester/vendor/github.com/tatsushid/go-fastping/LICENSE b/harvester/vendor/github.com/tatsushid/go-fastping/LICENSE
new file mode 100644
index 0000000..abb8dee
--- /dev/null
+++ b/harvester/vendor/github.com/tatsushid/go-fastping/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Tatsushi Demachi
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/harvester/vendor/github.com/tatsushid/go-fastping/README.md b/harvester/vendor/github.com/tatsushid/go-fastping/README.md
new file mode 100644
index 0000000..d77a261
--- /dev/null
+++ b/harvester/vendor/github.com/tatsushid/go-fastping/README.md
@@ -0,0 +1,54 @@
+go-fastping
+===========
+
+go-fastping is a Go language ICMP ping library, inspired by the `AnyEvent::FastPing`
+Perl module, for quickly sending ICMP ECHO REQUEST packets. Original Perl module
+is available at http://search.cpan.org/~mlehmann/AnyEvent-FastPing-2.01/
+
+All original functions haven't been implemented yet.
+
+[![GoDoc](https://godoc.org/github.com/tatsushid/go-fastping?status.svg)](https://godoc.org/github.com/tatsushid/go-fastping)
+
+## Installation
+
+Install and update with `go get -u github.com/tatsushid/go-fastping`
+
+## Examples
+
+Import this package and write
+
+```go
+p := fastping.NewPinger()
+ra, err := net.ResolveIPAddr("ip4:icmp", os.Args[1])
+if err != nil {
+	fmt.Println(err)
+	os.Exit(1)
+}
+p.AddIPAddr(ra)
+p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) {
+	fmt.Printf("IP Addr: %s receive, RTT: %v\n", addr.String(), rtt)
+}
+p.OnIdle = func() {
+	fmt.Println("finish")
+}
+err = p.Run()
+if err != nil {
+	fmt.Println(err)
+}
+```
+
+The example sends an ICMP packet and waits for a response. If it receives a
+response, it calls the "receive" callback. After that, once MaxRTT time has
+passed, it calls the "idle" callback. For more details,
+refer [to the godoc][godoc], and if you need more examples,
+please see "cmd/ping/ping.go".
+
+## Caution
+This package implements ICMP ping using both raw socket and UDP. If your program
+uses this package in raw socket mode, it needs to be run as a root user.
+
+## License
+go-fastping is under MIT License. See the [LICENSE][license] file for details.
+
+[godoc]: http://godoc.org/github.com/tatsushid/go-fastping
+[license]: https://github.com/tatsushid/go-fastping/blob/master/LICENSE
diff --git a/harvester/vendor/github.com/tatsushid/go-fastping/fastping.go b/harvester/vendor/github.com/tatsushid/go-fastping/fastping.go
new file mode 100644
index 0000000..96950ca
--- /dev/null
+++ b/harvester/vendor/github.com/tatsushid/go-fastping/fastping.go
@@ -0,0 +1,685 @@
+// Package fastping is an ICMP ping library inspired by AnyEvent::FastPing Perl
+// module to send ICMP ECHO REQUEST packets quickly. Original Perl module is
+// available at
+// http://search.cpan.org/~mlehmann/AnyEvent-FastPing-2.01/
+//
+// It hasn't been fully implemented original functions yet.
+//
+// Here is an example:
+//
+//	p := fastping.NewPinger()
+//	ra, err := net.ResolveIPAddr("ip4:icmp", os.Args[1])
+//	if err != nil {
+//		fmt.Println(err)
+//		os.Exit(1)
+//	}
+//	p.AddIPAddr(ra)
+//	p.OnRecv = func(addr *net.IPAddr, rtt time.Duration) {
+//		fmt.Printf("IP Addr: %s receive, RTT: %v\n", addr.String(), rtt)
+//	}
+//	p.OnIdle = func() {
+//		fmt.Println("finish")
+//	}
+//	err = p.Run()
+//	if err != nil {
+//		fmt.Println(err)
+//	}
+//
+// It sends an ICMP packet and wait a response. If it receives a response,
+// it calls "receive" callback. After that, MaxRTT time passed, it calls
+// "idle" callback. If you need more example, please see "cmd/ping/ping.go".
+//
+// This library needs to run as a superuser for sending ICMP packets when
+// privileged raw ICMP endpoints is used so in such a case, to run go test
+// for the package, please run like a following
+//
+//	sudo go test
+//
+package fastping
+
+import (
+	"errors"
+	"fmt"
+	"log"
+	"math/rand"
+	"net"
+	"sync"
+	"syscall"
+	"time"
+
+	"golang.org/x/net/icmp"
+	"golang.org/x/net/ipv4"
+	"golang.org/x/net/ipv6"
+)
+
+const (
+	TimeSliceLength  = 8
+	ProtocolICMP     = 1
+	ProtocolIPv6ICMP = 58
+)
+
+var (
+	ipv4Proto = map[string]string{"ip": "ip4:icmp", "udp": "udp4"}
+	ipv6Proto = map[string]string{"ip": "ip6:ipv6-icmp", "udp": "udp6"}
+)
+
+func byteSliceOfSize(n int) []byte {
+	b := make([]byte, n)
+	for i := 0; i < len(b); i++ {
+		b[i] = 1
+	}
+
+	return b
+}
+
+func timeToBytes(t time.Time) []byte {
+	nsec := t.UnixNano()
+	b := make([]byte, 8)
+	for i := uint8(0); i < 8; i++ {
+		b[i] = byte((nsec >> ((7 - i) * 8)) & 0xff)
+	}
+	return b
+}
+
+func bytesToTime(b []byte) time.Time {
+	var nsec int64
+	for i := uint8(0); i < 8; i++ {
+		nsec += int64(b[i]) << ((7 - i) * 8)
+	}
+	return time.Unix(nsec/1000000000, nsec%1000000000)
+}
+
+func isIPv4(ip net.IP) bool {
+	return len(ip.To4()) == net.IPv4len
+}
+
+func isIPv6(ip net.IP) bool {
+	return len(ip) == net.IPv6len
+}
+
+func ipv4Payload(b []byte) []byte {
+	if len(b) < ipv4.HeaderLen {
+		return b
+	}
+	hdrlen := int(b[0]&0x0f) << 2
+	return b[hdrlen:]
+}
+
+type packet struct {
+	bytes []byte
+	addr  net.Addr
+}
+
+type context struct {
+	stop chan bool
+	done chan bool
+	err  error
+}
+
+func newContext() *context {
+	return &context{
+		stop: make(chan bool),
+		done: make(chan bool),
+	}
+}
+
+// Pinger represents ICMP packet sender/receiver
+type Pinger struct {
+	id  int
+	seq int
+	// key string is IPAddr.String()
+	addrs   map[string]*net.IPAddr
+	network string
+	source  string
+	source6 string
+	hasIPv4 bool
+	hasIPv6 bool
+	ctx     *context
+	mu      sync.Mutex
+
+	// Size in bytes of the payload to send
+	Size int
+	// Number of (nano,milli)seconds of an idle timeout. Once it passed,
+	// the library calls an idle callback function. It is also used for an
+	// interval time of RunLoop() method
+	MaxRTT time.Duration
+	// OnRecv is called with a response packet's source address and its
+	// elapsed time when Pinger receives a response packet.
+	OnRecv func(*net.IPAddr, time.Duration)
+	// OnIdle is called when MaxRTT time passed
+	OnIdle func()
+	// If Debug is true, it prints debug messages to stdout.
+	Debug bool
+}
+
+// NewPinger returns a new Pinger struct pointer
+func NewPinger() *Pinger {
+	rand.Seed(time.Now().UnixNano())
+	return &Pinger{
+		id:      rand.Intn(0xffff),
+		seq:     rand.Intn(0xffff),
+		addrs:   make(map[string]*net.IPAddr),
+		network: "ip",
+		source:  "",
+		source6: "",
+		hasIPv4: false,
+		hasIPv6: false,
+		Size:    TimeSliceLength,
+		MaxRTT:  time.Second,
+		OnRecv:  nil,
+		OnIdle:  nil,
+		Debug:   false,
+	}
+}
+
+// Network sets a network endpoints for ICMP ping and returns the previous
+// setting. network arg should be "ip" or "udp" string or if others are
+// specified, it returns an error. If this function isn't called, Pinger
+// uses "ip" as default.
+func (p *Pinger) Network(network string) (string, error) {
+	origNet := p.network
+	switch network {
+	case "ip":
+		fallthrough
+	case "udp":
+		p.network = network
+	default:
+		return origNet, errors.New(network + " can't be used as ICMP endpoint")
+	}
+	return origNet, nil
+}
+
+// Source sets ipv4/ipv6 source IP for sending ICMP packets and returns the previous
+// setting. Empty value indicates to use system default one (for both ipv4 and ipv6).
+func (p *Pinger) Source(source string) (string, error) {
+	// using ipv4 previous value for new empty one
+	origSource := p.source
+	if "" == source {
+		p.mu.Lock()
+		p.source = ""
+		p.source6 = ""
+		p.mu.Unlock()
+		return origSource, nil
+	}
+
+	addr := net.ParseIP(source)
+	if addr == nil {
+		return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address")
+	}
+
+	if isIPv4(addr) {
+		p.mu.Lock()
+		p.source = source
+		p.mu.Unlock()
+	} else if isIPv6(addr) {
+		origSource = p.source6
+		p.mu.Lock()
+		p.source6 = source
+		p.mu.Unlock()
+	} else {
+		return origSource, errors.New(source + " is not a valid textual representation of an IPv4/IPv6 address")
+	}
+
+	return origSource, nil
+}
+
+// AddIP adds an IP address to Pinger. ipaddr arg should be a string like
+// "192.0.2.1".
+func (p *Pinger) AddIP(ipaddr string) error {
+	addr := net.ParseIP(ipaddr)
+	if addr == nil {
+		return fmt.Errorf("%s is not a valid textual representation of an IP address", ipaddr)
+	}
+	p.mu.Lock()
+	p.addrs[addr.String()] = &net.IPAddr{IP: addr}
+	if isIPv4(addr) {
+		p.hasIPv4 = true
+	} else if isIPv6(addr) {
+		p.hasIPv6 = true
+	}
+	p.mu.Unlock()
+	return nil
+}
+
+// AddIPAddr adds an IP address to Pinger. ip arg should be a net.IPAddr
+// pointer.
+func (p *Pinger) AddIPAddr(ip *net.IPAddr) {
+	p.mu.Lock()
+	p.addrs[ip.String()] = ip
+	if isIPv4(ip.IP) {
+		p.hasIPv4 = true
+	} else if isIPv6(ip.IP) {
+		p.hasIPv6 = true
+	}
+	p.mu.Unlock()
+}
+
+// RemoveIP removes an IP address from Pinger. ipaddr arg should be a string
+// like "192.0.2.1".
+func (p *Pinger) RemoveIP(ipaddr string) error {
+	addr := net.ParseIP(ipaddr)
+	if addr == nil {
+		return fmt.Errorf("%s is not a valid textual representation of an IP address", ipaddr)
+	}
+	p.mu.Lock()
+	delete(p.addrs, addr.String())
+	p.mu.Unlock()
+	return nil
+}
+
+// RemoveIPAddr removes an IP address from Pinger. ip arg should be a net.IPAddr
+// pointer.
+func (p *Pinger) RemoveIPAddr(ip *net.IPAddr) {
+	p.mu.Lock()
+	delete(p.addrs, ip.String())
+	p.mu.Unlock()
+}
+
+// AddHandler adds event handler to Pinger. event arg should be "receive" or
+// "idle" string.
+//
+// **CAUTION** This function is deprecated. Please use OnRecv and OnIdle field
+// of Pinger struct to set following handlers.
+//
+// "receive" handler should be
+//
+//	func(addr *net.IPAddr, rtt time.Duration)
+//
+// type function. The handler is called with a response packet's source address
+// and its elapsed time when Pinger receives a response packet.
+//
+// "idle" handler should be
+//
+//	func()
+//
+// type function. The handler is called when MaxRTT time passed. For more
+// detail, please see Run() and RunLoop().
+func (p *Pinger) AddHandler(event string, handler interface{}) error {
+	switch event {
+	case "receive":
+		if hdl, ok := handler.(func(*net.IPAddr, time.Duration)); ok {
+			p.mu.Lock()
+			p.OnRecv = hdl
+			p.mu.Unlock()
+			return nil
+		}
+		return errors.New("receive event handler should be `func(*net.IPAddr, time.Duration)`")
+	case "idle":
+		if hdl, ok := handler.(func()); ok {
+			p.mu.Lock()
+			p.OnIdle = hdl
+			p.mu.Unlock()
+			return nil
+		}
+		return errors.New("idle event handler should be `func()`")
+	}
+	return errors.New("No such event: " + event)
+}
+
+// Run invokes a single send/receive procedure. It sends packets to all hosts
+// which have already been added by AddIP() etc. and wait those responses. When
+// it receives a response, it calls "receive" handler registered by AddHander().
+// After MaxRTT seconds, it calls "idle" handler and returns to caller with
+// an error value. It means it blocks until MaxRTT seconds passed. For the
+// purpose of sending/receiving packets over and over, use RunLoop().
+func (p *Pinger) Run() error {
+	p.mu.Lock()
+	p.ctx = newContext()
+	p.mu.Unlock()
+	p.run(true)
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.ctx.err
+}
+
+// RunLoop invokes send/receive procedure repeatedly. It sends packets to all
+// hosts which have already been added by AddIP() etc. and wait those responses.
+// When it receives a response, it calls "receive" handler registered by
+// AddHander(). After MaxRTT seconds, it calls "idle" handler, resend packets
+// and wait those response. MaxRTT works as an interval time.
+//
+// This is a non-blocking method so immediately returns. If you want to monitor
+// and stop sending packets, use Done() and Stop() methods. For example,
+//
+//	p.RunLoop()
+//	ticker := time.NewTicker(time.Millisecond * 250)
+//	select {
+//	case <-p.Done():
+//		if err := p.Err(); err != nil {
+//			log.Fatalf("Ping failed: %v", err)
+//		}
+//	case <-ticker.C:
+//		break
+//	}
+//	ticker.Stop()
+//	p.Stop()
+//
+// For more details, please see "cmd/ping/ping.go".
+func (p *Pinger) RunLoop() {
+	p.mu.Lock()
+	p.ctx = newContext()
+	p.mu.Unlock()
+	go p.run(false)
+}
+
+// Done returns a channel that is closed when RunLoop() is stopped by an error
+// or Stop(). It must be called after RunLoop() call. If not, it causes panic.
+func (p *Pinger) Done() <-chan bool {
+	return p.ctx.done
+}
+
+// Stop stops RunLoop(). It must be called after RunLoop(). If not, it causes
+// panic.
+func (p *Pinger) Stop() {
+	p.debugln("Stop(): close(p.ctx.stop)")
+	close(p.ctx.stop)
+	p.debugln("Stop(): <-p.ctx.done")
+	<-p.ctx.done
+}
+
+// Err returns an error that is set by RunLoop(). It must be called after
+// RunLoop(). If not, it causes panic.
+func (p *Pinger) Err() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	return p.ctx.err
+}
+
+func (p *Pinger) listen(netProto string, source string) *icmp.PacketConn {
+	conn, err := icmp.ListenPacket(netProto, source)
+	if err != nil {
+		p.mu.Lock()
+		p.ctx.err = err
+		p.mu.Unlock()
+		p.debugln("Run(): close(p.ctx.done)")
+		close(p.ctx.done)
+		return nil
+	}
+	return conn
+}
+
+func (p *Pinger) run(once bool) {
+	p.debugln("Run(): Start")
+	var conn, conn6 *icmp.PacketConn
+	if p.hasIPv4 {
+		if conn = p.listen(ipv4Proto[p.network], p.source); conn == nil {
+			return
+		}
+		defer conn.Close()
+	}
+
+	if p.hasIPv6 {
+		if conn6 = p.listen(ipv6Proto[p.network], p.source6); conn6 == nil {
+			return
+		}
+		defer conn6.Close()
+	}
+
+	recv := make(chan *packet, 1)
+	recvCtx := newContext()
+	wg := new(sync.WaitGroup)
+
+	p.debugln("Run(): call recvICMP()")
+	if conn != nil {
+		wg.Add(1)
+		go p.recvICMP(conn, recv, recvCtx, wg)
+	}
+	if conn6 != nil {
+		wg.Add(1)
+		go p.recvICMP(conn6, recv, recvCtx, wg)
+	}
+
+	p.debugln("Run(): call sendICMP()")
+	queue, err := p.sendICMP(conn, conn6)
+
+	ticker := time.NewTicker(p.MaxRTT)
+
+mainloop:
+	for {
+		select {
+		case <-p.ctx.stop:
+			p.debugln("Run(): <-p.ctx.stop")
+			break mainloop
+		case <-recvCtx.done:
+			p.debugln("Run(): <-recvCtx.done")
+			p.mu.Lock()
+			err = recvCtx.err
+			p.mu.Unlock()
+			break mainloop
+		case <-ticker.C:
+			p.mu.Lock()
+			handler := p.OnIdle
+			p.mu.Unlock()
+			if handler != nil {
+				handler()
+			}
+			if once || err != nil {
+				break mainloop
+			}
+			p.debugln("Run(): call sendICMP()")
+			queue, err = p.sendICMP(conn, conn6)
+		case r := <-recv:
+			p.debugln("Run(): <-recv")
+			p.procRecv(r, queue)
+		}
+	}
+
+	ticker.Stop()
+
+	p.debugln("Run(): close(recvCtx.stop)")
+	close(recvCtx.stop)
+	p.debugln("Run(): wait recvICMP()")
+	wg.Wait()
+
+	p.mu.Lock()
+	p.ctx.err = err
+	p.mu.Unlock()
+
+	p.debugln("Run(): close(p.ctx.done)")
+	close(p.ctx.done)
+	p.debugln("Run(): End")
+}
+
+func (p *Pinger) sendICMP(conn, conn6 *icmp.PacketConn) (map[string]*net.IPAddr, error) {
+	p.debugln("sendICMP(): Start")
+	p.mu.Lock()
+	p.id = rand.Intn(0xffff)
+	p.seq = rand.Intn(0xffff)
+	p.mu.Unlock()
+	queue := make(map[string]*net.IPAddr)
+	wg := new(sync.WaitGroup)
+	for key, addr := range p.addrs {
+		var typ icmp.Type
+		var cn *icmp.PacketConn
+		if isIPv4(addr.IP) {
+			typ = ipv4.ICMPTypeEcho
+			cn = conn
+		} else if isIPv6(addr.IP) {
+			typ = ipv6.ICMPTypeEchoRequest
+			cn = conn6
+		} else {
+			continue
+		}
+		if cn == nil {
+			continue
+		}
+
+		t := timeToBytes(time.Now())
+
+		if p.Size-TimeSliceLength != 0 {
+			t = append(t, byteSliceOfSize(p.Size-TimeSliceLength)...)
+		}
+
+		p.mu.Lock()
+		bytes, err := (&icmp.Message{
+			Type: typ, Code: 0,
+			Body: &icmp.Echo{
+				ID: p.id, Seq: p.seq,
+				Data: t,
+			},
+		}).Marshal(nil)
+		p.mu.Unlock()
+		if err != nil {
+			wg.Wait()
+			return queue, err
+		}
+
+		queue[key] = addr
+		var dst net.Addr = addr
+		if p.network == "udp" {
+			dst = &net.UDPAddr{IP: addr.IP, Zone: addr.Zone}
+		}
+
+		p.debugln("sendICMP(): Invoke goroutine")
+		wg.Add(1)
+		go func(conn *icmp.PacketConn, ra net.Addr, b []byte) {
+			for {
+				if _, err := conn.WriteTo(bytes, ra); err != nil {
+					if neterr, ok := err.(*net.OpError); ok {
+						if neterr.Err == syscall.ENOBUFS {
+							continue
+						}
+					}
+				}
+				break
+			}
+			p.debugln("sendICMP(): WriteTo End")
+			wg.Done()
+		}(cn, dst, bytes)
+	}
+	wg.Wait()
+	p.debugln("sendICMP(): End")
+	return queue, nil
+}
+
+func (p *Pinger) recvICMP(conn *icmp.PacketConn, recv chan<- *packet, ctx *context, wg *sync.WaitGroup) {
+	p.debugln("recvICMP(): Start")
+	for {
+		select {
+		case <-ctx.stop:
+			p.debugln("recvICMP(): <-ctx.stop")
+			wg.Done()
+			p.debugln("recvICMP(): wg.Done()")
+			return
+		default:
+		}
+
+		bytes := make([]byte, 512)
+		conn.SetReadDeadline(time.Now().Add(time.Millisecond * 100))
+		p.debugln("recvICMP(): ReadFrom Start")
+		_, ra, err := conn.ReadFrom(bytes)
+		p.debugln("recvICMP(): ReadFrom End")
+		if err != nil {
+			if neterr, ok := err.(*net.OpError); ok {
+				if neterr.Timeout() {
+					p.debugln("recvICMP(): Read Timeout")
+					continue
+				} else {
+					p.debugln("recvICMP(): OpError happen", err)
+					p.mu.Lock()
+					ctx.err = err
+					p.mu.Unlock()
+					p.debugln("recvICMP(): close(ctx.done)")
+					close(ctx.done)
+					p.debugln("recvICMP(): wg.Done()")
+					wg.Done()
+					return
+				}
+			}
+		}
+		p.debugln("recvICMP(): p.recv <- packet")
+
+		select {
+		case recv <- &packet{bytes: bytes, addr: ra}:
+		case <-ctx.stop:
+			p.debugln("recvICMP(): <-ctx.stop")
+			wg.Done()
+			p.debugln("recvICMP(): wg.Done()")
+			return
+		}
+	}
+}
+
+func (p *Pinger) procRecv(recv *packet, queue map[string]*net.IPAddr) {
+	var ipaddr *net.IPAddr
+	switch adr := recv.addr.(type) {
+	case *net.IPAddr:
+		ipaddr = adr
+	case *net.UDPAddr:
+		ipaddr = &net.IPAddr{IP: adr.IP, Zone: adr.Zone}
+	default:
+		return
+	}
+
+	addr := ipaddr.String()
+	p.mu.Lock()
+	if _, ok := p.addrs[addr]; !ok {
+		p.mu.Unlock()
+		return
+	}
+	p.mu.Unlock()
+
+	var bytes []byte
+	var proto int
+	if isIPv4(ipaddr.IP) {
+		if p.network == "ip" {
+			bytes = ipv4Payload(recv.bytes)
+		} else {
+			bytes = recv.bytes
+		}
+		proto = ProtocolICMP
+	} else if isIPv6(ipaddr.IP) {
+		bytes = recv.bytes
+		proto = ProtocolIPv6ICMP
+	} else {
+		return
+	}
+
+	var m *icmp.Message
+	var err error
+	if m, err = icmp.ParseMessage(proto, bytes); err != nil {
+		return
+	}
+
+	if m.Type != ipv4.ICMPTypeEchoReply && m.Type != ipv6.ICMPTypeEchoReply {
+		return
+	}
+
+	var rtt time.Duration
+	switch pkt := m.Body.(type) {
+	case *icmp.Echo:
+		p.mu.Lock()
+		if pkt.ID == p.id && pkt.Seq == p.seq {
+			rtt = time.Since(bytesToTime(pkt.Data[:TimeSliceLength]))
+		}
+		p.mu.Unlock()
+	default:
+		return
+	}
+
+	if _, ok := queue[addr]; ok {
+		delete(queue, addr)
+		p.mu.Lock()
+		handler := p.OnRecv
+		p.mu.Unlock()
+		if handler != nil {
+			handler(ipaddr, rtt)
+		}
+	}
+}
+
+func (p *Pinger) debugln(args ...interface{}) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.Debug {
+		log.Println(args...)
+	}
+}
+
+func (p *Pinger) debugf(format string, args ...interface{}) {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if p.Debug {
+		log.Printf(format, args...)
+	}
+}
diff --git a/harvester/vendor/golang.org/x/net/LICENSE b/harvester/vendor/golang.org/x/net/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/harvester/vendor/golang.org/x/net/PATENTS b/harvester/vendor/golang.org/x/net/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/harvester/vendor/golang.org/x/net/bpf/asm.go b/harvester/vendor/golang.org/x/net/bpf/asm.go
new file mode 100644
index 0000000..15e21b1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/asm.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// Assemble converts insts into raw instructions suitable for loading
+// into a BPF virtual machine.
+//
+// Currently, no optimization is attempted, the assembled program flow
+// is exactly as provided.
+func Assemble(insts []Instruction) ([]RawInstruction, error) {
+	ret := make([]RawInstruction, len(insts))
+	var err error
+	for i, inst := range insts {
+		ret[i], err = inst.Assemble()
+		if err != nil {
+			return nil, fmt.Errorf("assembling instruction %d: %s", i+1, err)
+		}
+	}
+	return ret, nil
+}
+
+// Disassemble attempts to parse raw back into
+// Instructions. Unrecognized RawInstructions are assumed to be an
+// extension not implemented by this package, and are passed through
+// unchanged to the output. The allDecoded value reports whether insts
+// contains no RawInstructions.
+func Disassemble(raw []RawInstruction) (insts []Instruction, allDecoded bool) {
+	insts = make([]Instruction, len(raw))
+	allDecoded = true
+	for i, r := range raw {
+		insts[i] = r.Disassemble()
+		if _, ok := insts[i].(RawInstruction); ok {
+			allDecoded = false
+		}
+	}
+	return insts, allDecoded
+}
diff --git a/harvester/vendor/golang.org/x/net/bpf/constants.go b/harvester/vendor/golang.org/x/net/bpf/constants.go
new file mode 100644
index 0000000..ccf6ada
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/constants.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+// A Register is a register of the BPF virtual machine.
+type Register uint16
+
+const (
+	// RegA is the accumulator register. RegA is always the
+	// destination register of ALU operations.
+	RegA Register = iota
+	// RegX is the indirection register, used by LoadIndirect
+	// operations.
+	RegX
+)
+
+// An ALUOp is an arithmetic or logic operation.
+type ALUOp uint16
+
+// ALU binary operation types.
+const (
+	ALUOpAdd ALUOp = iota << 4
+	ALUOpSub
+	ALUOpMul
+	ALUOpDiv
+	ALUOpOr
+	ALUOpAnd
+	ALUOpShiftLeft
+	ALUOpShiftRight
+	aluOpNeg // Not exported because it's the only unary ALU operation, and gets its own instruction type.
+	ALUOpMod
+	ALUOpXor
+)
+
+// A JumpTest is a comparison operator used in conditional jumps.
+type JumpTest uint16
+
+// Supported operators for conditional jumps.
+const (
+	// K == A
+	JumpEqual JumpTest = iota
+	// K != A
+	JumpNotEqual
+	// K > A
+	JumpGreaterThan
+	// K < A
+	JumpLessThan
+	// K >= A
+	JumpGreaterOrEqual
+	// K <= A
+	JumpLessOrEqual
+	// K & A != 0
+	JumpBitsSet
+	// K & A == 0
+	JumpBitsNotSet
+)
+
+// An Extension is a function call provided by the kernel that
+// performs advanced operations that are expensive or impossible
+// within the BPF virtual machine.
+//
+// Extensions are only implemented by the Linux kernel.
+//
+// TODO: should we prune this list? Some of these extensions seem
+// either broken or near-impossible to use correctly, whereas other
+// (len, random, ifindex) are quite useful.
+type Extension int
+
+// Extension functions available in the Linux kernel.
+const (
+	// extOffset is the negative maximum number of instructions used
+	// to load instructions by overloading the K argument.
+	extOffset = -0x1000
+	// ExtLen returns the length of the packet.
+	ExtLen Extension = 1
+	// ExtProto returns the packet's L3 protocol type.
+	ExtProto = 0
+	// ExtType returns the packet's type (skb->pkt_type in the kernel)
+	//
+	// TODO: better documentation. How nice an API do we want to
+	// provide for these esoteric extensions?
+	ExtType = 4
+	// ExtPayloadOffset returns the offset of the packet payload, or
+	// the first protocol header that the kernel does not know how to
+	// parse.
+	ExtPayloadOffset = 52
+	// ExtInterfaceIndex returns the index of the interface on which
+	// the packet was received.
+	ExtInterfaceIndex = 8
+	// ExtNetlinkAttr returns the netlink attribute of type X at
+	// offset A.
+	ExtNetlinkAttr = 12
+	// ExtNetlinkAttrNested returns the nested netlink attribute of
+	// type X at offset A.
+	ExtNetlinkAttrNested = 16
+	// ExtMark returns the packet's mark value.
+	ExtMark = 20
+	// ExtQueue returns the packet's assigned hardware queue.
+	ExtQueue = 24
+	// ExtLinkLayerType returns the packet's hardware address type
+	// (e.g. Ethernet, Infiniband).
+	ExtLinkLayerType = 28
+	// ExtRXHash returns the packets receive hash.
+	//
+	// TODO: figure out what this rxhash actually is.
+	ExtRXHash = 32
+	// ExtCPUID returns the ID of the CPU processing the current
+	// packet.
+	ExtCPUID = 36
+	// ExtVLANTag returns the packet's VLAN tag.
+	ExtVLANTag = 44
+	// ExtVLANTagPresent returns non-zero if the packet has a VLAN
+	// tag.
+	//
+	// TODO: I think this might be a lie: it reads bit 0x1000 of the
+	// VLAN header, which changed meaning in recent revisions of the
+	// spec - this extension may now return meaningless information.
+	ExtVLANTagPresent = 48
+	// ExtVLANProto returns 0x8100 if the frame has a VLAN header,
+	// 0x88a8 if the frame has a "Q-in-Q" double VLAN header, or some
+	// other value if no VLAN information is present.
+	ExtVLANProto = 60
+	// ExtRand returns a uniformly random uint32.
+	ExtRand = 56
+)
+
+// The following gives names to various bit patterns used in opcode construction.
+
+const (
+	opMaskCls uint16 = 0x7
+	// opClsLoad masks
+	opMaskLoadDest  = 0x01
+	opMaskLoadWidth = 0x18
+	opMaskLoadMode  = 0xe0
+	// opClsALU
+	opMaskOperandSrc = 0x08
+	opMaskOperator   = 0xf0
+	// opClsJump
+	opMaskJumpConst = 0x0f
+	opMaskJumpCond  = 0xf0
+)
+
+const (
+	// +---------------+-----------------+---+---+---+
+	// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 0 |
+	// +---------------+-----------------+---+---+---+
+	opClsLoadA uint16 = iota
+	// +---------------+-----------------+---+---+---+
+	// | AddrMode (3b) | LoadWidth (2b)  | 0 | 0 | 1 |
+	// +---------------+-----------------+---+---+---+
+	opClsLoadX
+	// +---+---+---+---+---+---+---+---+
+	// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
+	// +---+---+---+---+---+---+---+---+
+	opClsStoreA
+	// +---+---+---+---+---+---+---+---+
+	// | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 |
+	// +---+---+---+---+---+---+---+---+
+	opClsStoreX
+	// +---------------+-----------------+---+---+---+
+	// | Operator (4b) | OperandSrc (1b) | 1 | 0 | 0 |
+	// +---------------+-----------------+---+---+---+
+	opClsALU
+	// +-----------------------------+---+---+---+---+
+	// |      TestOperator (4b)      | 0 | 1 | 0 | 1 |
+	// +-----------------------------+---+---+---+---+
+	opClsJump
+	// +---+-------------------------+---+---+---+---+
+	// | 0 | 0 | 0 |   RetSrc (1b)   | 0 | 1 | 1 | 0 |
+	// +---+-------------------------+---+---+---+---+
+	opClsReturn
+	// +---+-------------------------+---+---+---+---+
+	// | 0 | 0 | 0 |  TXAorTAX (1b)  | 0 | 1 | 1 | 1 |
+	// +---+-------------------------+---+---+---+---+
+	opClsMisc
+)
+
+const (
+	opAddrModeImmediate uint16 = iota << 5
+	opAddrModeAbsolute
+	opAddrModeIndirect
+	opAddrModeScratch
+	opAddrModePacketLen // actually an extension, not an addressing mode.
+	opAddrModeMemShift
+)
+
+const (
+	opLoadWidth4 uint16 = iota << 3
+	opLoadWidth2
+	opLoadWidth1
+)
+
+// Operator defined by ALUOp*
+
+const (
+	opALUSrcConstant uint16 = iota << 3
+	opALUSrcX
+)
+
+const (
+	opJumpAlways = iota << 4
+	opJumpEqual
+	opJumpGT
+	opJumpGE
+	opJumpSet
+)
+
+const (
+	opRetSrcConstant uint16 = iota << 4
+	opRetSrcA
+)
+
+const (
+	opMiscTAX = 0x00
+	opMiscTXA = 0x80
+)
diff --git a/harvester/vendor/golang.org/x/net/bpf/doc.go b/harvester/vendor/golang.org/x/net/bpf/doc.go
new file mode 100644
index 0000000..ae62feb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/doc.go
@@ -0,0 +1,82 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+
+Package bpf implements marshaling and unmarshaling of programs for the
+Berkeley Packet Filter virtual machine, and provides a Go implementation
+of the virtual machine.
+
+BPF's main use is to specify a packet filter for network taps, so that
+the kernel doesn't have to expensively copy every packet it sees to
+userspace. However, it's been repurposed to other areas where running
+user code in-kernel is needed. For example, Linux's seccomp uses BPF
+to apply security policies to system calls. For simplicity, this
+documentation refers only to packets, but other uses of BPF have their
+own data payloads.
+
+BPF programs run in a restricted virtual machine. It has almost no
+access to kernel functions, and while conditional branches are
+allowed, they can only jump forwards, to guarantee that there are no
+infinite loops.
+
+The virtual machine
+
+The BPF VM is an accumulator machine. Its main register, called
+register A, is an implicit source and destination in all arithmetic
+and logic operations. The machine also has 16 scratch registers for
+temporary storage, and an indirection register (register X) for
+indirect memory access. All registers are 32 bits wide.
+
+Each run of a BPF program is given one packet, which is placed in the
+VM's read-only "main memory". LoadAbsolute and LoadIndirect
+instructions can fetch up to 32 bits at a time into register A for
+examination.
+
+The goal of a BPF program is to produce and return a verdict (uint32),
+which tells the kernel what to do with the packet. In the context of
+packet filtering, the returned value is the number of bytes of the
+packet to forward to userspace, or 0 to ignore the packet. Other
+contexts like seccomp define their own return values.
+
+In order to simplify programs, attempts to read past the end of the
+packet terminate the program execution with a verdict of 0 (ignore
+packet). This means that the vast majority of BPF programs don't need
+to do any explicit bounds checking.
+
+In addition to the bytes of the packet, some BPF programs have access
+to extensions, which are essentially calls to kernel utility
+functions. Currently, the only extensions supported by this package
+are the Linux packet filter extensions.
+
+Examples
+
+This packet filter selects all ARP packets.
+
+	bpf.Assemble([]bpf.Instruction{
+		// Load "EtherType" field from the ethernet header.
+		bpf.LoadAbsolute{Off: 12, Size: 2},
+		// Skip over the next instruction if EtherType is not ARP.
+		bpf.JumpIf{Cond: bpf.JumpNotEqual, Val: 0x0806, SkipTrue: 1},
+		// Verdict is "send up to 4k of the packet to userspace."
+		bpf.RetConstant{Val: 4096},
+		// Verdict is "ignore packet."
+		bpf.RetConstant{Val: 0},
+	})
+
+This packet filter captures a random 1% sample of traffic.
+
+	bpf.Assemble([]bpf.Instruction{
+		// Get a 32-bit random number from the Linux kernel.
+		bpf.LoadExtension{Num: bpf.ExtRand},
+		// 1% dice roll?
+		bpf.JumpIf{Cond: bpf.JumpLessThan, Val: 2^32/100, SkipFalse: 1},
+		// Capture.
+		bpf.RetConstant{Val: 4096},
+		// Ignore.
+		bpf.RetConstant{Val: 0},
+	})
+
+*/
+package bpf // import "golang.org/x/net/bpf"
diff --git a/harvester/vendor/golang.org/x/net/bpf/instructions.go b/harvester/vendor/golang.org/x/net/bpf/instructions.go
new file mode 100644
index 0000000..3b4fd08
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/instructions.go
@@ -0,0 +1,704 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import "fmt"
+
+// An Instruction is one instruction executed by the BPF virtual
+// machine.
+type Instruction interface {
+	// Assemble assembles the Instruction into a RawInstruction.
+	Assemble() (RawInstruction, error)
+}
+
+// A RawInstruction is a raw BPF virtual machine instruction.
+type RawInstruction struct {
+	// Operation to execute.
+	Op uint16
+	// For conditional jump instructions, the number of instructions
+	// to skip if the condition is true/false.
+	Jt uint8
+	Jf uint8
+	// Constant parameter. The meaning depends on the Op.
+	K uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (ri RawInstruction) Assemble() (RawInstruction, error) { return ri, nil }
+
+// Disassemble parses ri into an Instruction and returns it. If ri is
+// not recognized by this package, ri itself is returned.
+func (ri RawInstruction) Disassemble() Instruction {
+	switch ri.Op & opMaskCls {
+	case opClsLoadA, opClsLoadX:
+		reg := Register(ri.Op & opMaskLoadDest)
+		sz := 0
+		switch ri.Op & opMaskLoadWidth {
+		case opLoadWidth4:
+			sz = 4
+		case opLoadWidth2:
+			sz = 2
+		case opLoadWidth1:
+			sz = 1
+		default:
+			return ri
+		}
+		switch ri.Op & opMaskLoadMode {
+		case opAddrModeImmediate:
+			if sz != 4 {
+				return ri
+			}
+			return LoadConstant{Dst: reg, Val: ri.K}
+		case opAddrModeScratch:
+			if sz != 4 || ri.K > 15 {
+				return ri
+			}
+			return LoadScratch{Dst: reg, N: int(ri.K)}
+		case opAddrModeAbsolute:
+			if ri.K > extOffset+0xffffffff {
+				return LoadExtension{Num: Extension(-extOffset + ri.K)}
+			}
+			return LoadAbsolute{Size: sz, Off: ri.K}
+		case opAddrModeIndirect:
+			return LoadIndirect{Size: sz, Off: ri.K}
+		case opAddrModePacketLen:
+			if sz != 4 {
+				return ri
+			}
+			return LoadExtension{Num: ExtLen}
+		case opAddrModeMemShift:
+			return LoadMemShift{Off: ri.K}
+		default:
+			return ri
+		}
+
+	case opClsStoreA:
+		if ri.Op != opClsStoreA || ri.K > 15 {
+			return ri
+		}
+		return StoreScratch{Src: RegA, N: int(ri.K)}
+
+	case opClsStoreX:
+		if ri.Op != opClsStoreX || ri.K > 15 {
+			return ri
+		}
+		return StoreScratch{Src: RegX, N: int(ri.K)}
+
+	case opClsALU:
+		switch op := ALUOp(ri.Op & opMaskOperator); op {
+		case ALUOpAdd, ALUOpSub, ALUOpMul, ALUOpDiv, ALUOpOr, ALUOpAnd, ALUOpShiftLeft, ALUOpShiftRight, ALUOpMod, ALUOpXor:
+			if ri.Op&opMaskOperandSrc != 0 {
+				return ALUOpX{Op: op}
+			}
+			return ALUOpConstant{Op: op, Val: ri.K}
+		case aluOpNeg:
+			return NegateA{}
+		default:
+			return ri
+		}
+
+	case opClsJump:
+		if ri.Op&opMaskJumpConst != opClsJump {
+			return ri
+		}
+		switch ri.Op & opMaskJumpCond {
+		case opJumpAlways:
+			return Jump{Skip: ri.K}
+		case opJumpEqual:
+			if ri.Jt == 0 {
+				return JumpIf{
+					Cond:      JumpNotEqual,
+					Val:       ri.K,
+					SkipTrue:  ri.Jf,
+					SkipFalse: 0,
+				}
+			}
+			return JumpIf{
+				Cond:      JumpEqual,
+				Val:       ri.K,
+				SkipTrue:  ri.Jt,
+				SkipFalse: ri.Jf,
+			}
+		case opJumpGT:
+			if ri.Jt == 0 {
+				return JumpIf{
+					Cond:      JumpLessOrEqual,
+					Val:       ri.K,
+					SkipTrue:  ri.Jf,
+					SkipFalse: 0,
+				}
+			}
+			return JumpIf{
+				Cond:      JumpGreaterThan,
+				Val:       ri.K,
+				SkipTrue:  ri.Jt,
+				SkipFalse: ri.Jf,
+			}
+		case opJumpGE:
+			if ri.Jt == 0 {
+				return JumpIf{
+					Cond:      JumpLessThan,
+					Val:       ri.K,
+					SkipTrue:  ri.Jf,
+					SkipFalse: 0,
+				}
+			}
+			return JumpIf{
+				Cond:      JumpGreaterOrEqual,
+				Val:       ri.K,
+				SkipTrue:  ri.Jt,
+				SkipFalse: ri.Jf,
+			}
+		case opJumpSet:
+			return JumpIf{
+				Cond:      JumpBitsSet,
+				Val:       ri.K,
+				SkipTrue:  ri.Jt,
+				SkipFalse: ri.Jf,
+			}
+		default:
+			return ri
+		}
+
+	case opClsReturn:
+		switch ri.Op {
+		case opClsReturn | opRetSrcA:
+			return RetA{}
+		case opClsReturn | opRetSrcConstant:
+			return RetConstant{Val: ri.K}
+		default:
+			return ri
+		}
+
+	case opClsMisc:
+		switch ri.Op {
+		case opClsMisc | opMiscTAX:
+			return TAX{}
+		case opClsMisc | opMiscTXA:
+			return TXA{}
+		default:
+			return ri
+		}
+
+	default:
+		panic("unreachable") // switch is exhaustive on the bit pattern
+	}
+}
+
+// LoadConstant loads Val into register Dst.
+type LoadConstant struct {
+	Dst Register
+	Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadConstant) Assemble() (RawInstruction, error) {
+	return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadConstant) String() string {
+	switch a.Dst {
+	case RegA:
+		return fmt.Sprintf("ld #%d", a.Val)
+	case RegX:
+		return fmt.Sprintf("ldx #%d", a.Val)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// LoadScratch loads scratch[N] into register Dst.
+type LoadScratch struct {
+	Dst Register
+	N   int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadScratch) Assemble() (RawInstruction, error) {
+	if a.N < 0 || a.N > 15 {
+		return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+	}
+	return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadScratch) String() string {
+	switch a.Dst {
+	case RegA:
+		return fmt.Sprintf("ld M[%d]", a.N)
+	case RegX:
+		return fmt.Sprintf("ldx M[%d]", a.N)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// LoadAbsolute loads packet[Off:Off+Size] as an integer value into
+// register A.
+type LoadAbsolute struct {
+	Off  uint32
+	Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadAbsolute) Assemble() (RawInstruction, error) {
+	return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadAbsolute) String() string {
+	switch a.Size {
+	case 1: // byte
+		return fmt.Sprintf("ldb [%d]", a.Off)
+	case 2: // half word
+		return fmt.Sprintf("ldh [%d]", a.Off)
+	case 4: // word
+		if a.Off > extOffset+0xffffffff {
+			return LoadExtension{Num: Extension(a.Off + 0x1000)}.String()
+		}
+		return fmt.Sprintf("ld [%d]", a.Off)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// LoadIndirect loads packet[X+Off:X+Off+Size] as an integer value
+// into register A.
+type LoadIndirect struct {
+	Off  uint32
+	Size int // 1, 2 or 4
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadIndirect) Assemble() (RawInstruction, error) {
+	return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadIndirect) String() string {
+	switch a.Size {
+	case 1: // byte
+		return fmt.Sprintf("ldb [x + %d]", a.Off)
+	case 2: // half word
+		return fmt.Sprintf("ldh [x + %d]", a.Off)
+	case 4: // word
+		return fmt.Sprintf("ld [x + %d]", a.Off)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// LoadMemShift multiplies the first 4 bits of the byte at packet[Off]
+// by 4 and stores the result in register X.
+//
+// This instruction is mainly useful to load into X the length of an
+// IPv4 packet header in a single instruction, rather than have to do
+// the arithmetic on the header's first byte by hand.
+type LoadMemShift struct {
+	Off uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadMemShift) Assemble() (RawInstruction, error) {
+	return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadMemShift) String() string {
+	return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
+}
+
+// LoadExtension invokes a linux-specific extension and stores the
+// result in register A.
+type LoadExtension struct {
+	Num Extension
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a LoadExtension) Assemble() (RawInstruction, error) {
+	if a.Num == ExtLen {
+		return assembleLoad(RegA, 4, opAddrModePacketLen, 0)
+	}
+	return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
+}
+
+// String returns the the instruction in assembler notation.
+func (a LoadExtension) String() string {
+	switch a.Num {
+	case ExtLen:
+		return "ld #len"
+	case ExtProto:
+		return "ld #proto"
+	case ExtType:
+		return "ld #type"
+	case ExtPayloadOffset:
+		return "ld #poff"
+	case ExtInterfaceIndex:
+		return "ld #ifidx"
+	case ExtNetlinkAttr:
+		return "ld #nla"
+	case ExtNetlinkAttrNested:
+		return "ld #nlan"
+	case ExtMark:
+		return "ld #mark"
+	case ExtQueue:
+		return "ld #queue"
+	case ExtLinkLayerType:
+		return "ld #hatype"
+	case ExtRXHash:
+		return "ld #rxhash"
+	case ExtCPUID:
+		return "ld #cpu"
+	case ExtVLANTag:
+		return "ld #vlan_tci"
+	case ExtVLANTagPresent:
+		return "ld #vlan_avail"
+	case ExtVLANProto:
+		return "ld #vlan_tpid"
+	case ExtRand:
+		return "ld #rand"
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// StoreScratch stores register Src into scratch[N].
+type StoreScratch struct {
+	Src Register
+	N   int // 0-15
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a StoreScratch) Assemble() (RawInstruction, error) {
+	if a.N < 0 || a.N > 15 {
+		return RawInstruction{}, fmt.Errorf("invalid scratch slot %d", a.N)
+	}
+	var op uint16
+	switch a.Src {
+	case RegA:
+		op = opClsStoreA
+	case RegX:
+		op = opClsStoreX
+	default:
+		return RawInstruction{}, fmt.Errorf("invalid source register %v", a.Src)
+	}
+
+	return RawInstruction{
+		Op: op,
+		K:  uint32(a.N),
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a StoreScratch) String() string {
+	switch a.Src {
+	case RegA:
+		return fmt.Sprintf("st M[%d]", a.N)
+	case RegX:
+		return fmt.Sprintf("stx M[%d]", a.N)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// ALUOpConstant executes A = A <Op> Val.
+type ALUOpConstant struct {
+	Op  ALUOp
+	Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpConstant) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsALU | opALUSrcConstant | uint16(a.Op),
+		K:  a.Val,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a ALUOpConstant) String() string {
+	switch a.Op {
+	case ALUOpAdd:
+		return fmt.Sprintf("add #%d", a.Val)
+	case ALUOpSub:
+		return fmt.Sprintf("sub #%d", a.Val)
+	case ALUOpMul:
+		return fmt.Sprintf("mul #%d", a.Val)
+	case ALUOpDiv:
+		return fmt.Sprintf("div #%d", a.Val)
+	case ALUOpMod:
+		return fmt.Sprintf("mod #%d", a.Val)
+	case ALUOpAnd:
+		return fmt.Sprintf("and #%d", a.Val)
+	case ALUOpOr:
+		return fmt.Sprintf("or #%d", a.Val)
+	case ALUOpXor:
+		return fmt.Sprintf("xor #%d", a.Val)
+	case ALUOpShiftLeft:
+		return fmt.Sprintf("lsh #%d", a.Val)
+	case ALUOpShiftRight:
+		return fmt.Sprintf("rsh #%d", a.Val)
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// ALUOpX executes A = A <Op> X
+type ALUOpX struct {
+	Op ALUOp
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a ALUOpX) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsALU | opALUSrcX | uint16(a.Op),
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a ALUOpX) String() string {
+	switch a.Op {
+	case ALUOpAdd:
+		return "add x"
+	case ALUOpSub:
+		return "sub x"
+	case ALUOpMul:
+		return "mul x"
+	case ALUOpDiv:
+		return "div x"
+	case ALUOpMod:
+		return "mod x"
+	case ALUOpAnd:
+		return "and x"
+	case ALUOpOr:
+		return "or x"
+	case ALUOpXor:
+		return "xor x"
+	case ALUOpShiftLeft:
+		return "lsh x"
+	case ALUOpShiftRight:
+		return "rsh x"
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+// NegateA executes A = -A.
+type NegateA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a NegateA) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsALU | uint16(aluOpNeg),
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a NegateA) String() string {
+	return fmt.Sprintf("neg")
+}
+
+// Jump skips the following Skip instructions in the program.
+type Jump struct {
+	Skip uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a Jump) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsJump | opJumpAlways,
+		K:  a.Skip,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a Jump) String() string {
+	return fmt.Sprintf("ja %d", a.Skip)
+}
+
+// JumpIf skips the following Skip instructions in the program if A
+// <Cond> Val is true.
+type JumpIf struct {
+	Cond      JumpTest
+	Val       uint32
+	SkipTrue  uint8
+	SkipFalse uint8
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a JumpIf) Assemble() (RawInstruction, error) {
+	var (
+		cond uint16
+		flip bool
+	)
+	switch a.Cond {
+	case JumpEqual:
+		cond = opJumpEqual
+	case JumpNotEqual:
+		cond, flip = opJumpEqual, true
+	case JumpGreaterThan:
+		cond = opJumpGT
+	case JumpLessThan:
+		cond, flip = opJumpGE, true
+	case JumpGreaterOrEqual:
+		cond = opJumpGE
+	case JumpLessOrEqual:
+		cond, flip = opJumpGT, true
+	case JumpBitsSet:
+		cond = opJumpSet
+	case JumpBitsNotSet:
+		cond, flip = opJumpSet, true
+	default:
+		return RawInstruction{}, fmt.Errorf("unknown JumpTest %v", a.Cond)
+	}
+	jt, jf := a.SkipTrue, a.SkipFalse
+	if flip {
+		jt, jf = jf, jt
+	}
+	return RawInstruction{
+		Op: opClsJump | cond,
+		Jt: jt,
+		Jf: jf,
+		K:  a.Val,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a JumpIf) String() string {
+	switch a.Cond {
+	// K == A
+	case JumpEqual:
+		return conditionalJump(a, "jeq", "jneq")
+	// K != A
+	case JumpNotEqual:
+		return fmt.Sprintf("jneq #%d,%d", a.Val, a.SkipTrue)
+	// K > A
+	case JumpGreaterThan:
+		return conditionalJump(a, "jgt", "jle")
+	// K < A
+	case JumpLessThan:
+		return fmt.Sprintf("jlt #%d,%d", a.Val, a.SkipTrue)
+	// K >= A
+	case JumpGreaterOrEqual:
+		return conditionalJump(a, "jge", "jlt")
+	// K <= A
+	case JumpLessOrEqual:
+		return fmt.Sprintf("jle #%d,%d", a.Val, a.SkipTrue)
+	// K & A != 0
+	case JumpBitsSet:
+		if a.SkipFalse > 0 {
+			return fmt.Sprintf("jset #%d,%d,%d", a.Val, a.SkipTrue, a.SkipFalse)
+		}
+		return fmt.Sprintf("jset #%d,%d", a.Val, a.SkipTrue)
+	// K & A == 0, there is no assembler instruction for JumpBitNotSet, use JumpBitSet and invert skips
+	case JumpBitsNotSet:
+		return JumpIf{Cond: JumpBitsSet, SkipTrue: a.SkipFalse, SkipFalse: a.SkipTrue, Val: a.Val}.String()
+	default:
+		return fmt.Sprintf("unknown instruction: %#v", a)
+	}
+}
+
+func conditionalJump(inst JumpIf, positiveJump, negativeJump string) string {
+	if inst.SkipTrue > 0 {
+		if inst.SkipFalse > 0 {
+			return fmt.Sprintf("%s #%d,%d,%d", positiveJump, inst.Val, inst.SkipTrue, inst.SkipFalse)
+		}
+		return fmt.Sprintf("%s #%d,%d", positiveJump, inst.Val, inst.SkipTrue)
+	}
+	return fmt.Sprintf("%s #%d,%d", negativeJump, inst.Val, inst.SkipFalse)
+}
+
+// RetA exits the BPF program, returning the value of register A.
+type RetA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetA) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsReturn | opRetSrcA,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a RetA) String() string {
+	return fmt.Sprintf("ret a")
+}
+
+// RetConstant exits the BPF program, returning a constant value.
+type RetConstant struct {
+	Val uint32
+}
+
+// Assemble implements the Instruction Assemble method.
+func (a RetConstant) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsReturn | opRetSrcConstant,
+		K:  a.Val,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a RetConstant) String() string {
+	return fmt.Sprintf("ret #%d", a.Val)
+}
+
+// TXA copies the value of register X to register A.
+type TXA struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TXA) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsMisc | opMiscTXA,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a TXA) String() string {
+	return fmt.Sprintf("txa")
+}
+
+// TAX copies the value of register A to register X.
+type TAX struct{}
+
+// Assemble implements the Instruction Assemble method.
+func (a TAX) Assemble() (RawInstruction, error) {
+	return RawInstruction{
+		Op: opClsMisc | opMiscTAX,
+	}, nil
+}
+
+// String returns the the instruction in assembler notation.
+func (a TAX) String() string {
+	return fmt.Sprintf("tax")
+}
+
+func assembleLoad(dst Register, loadSize int, mode uint16, k uint32) (RawInstruction, error) {
+	var (
+		cls uint16
+		sz  uint16
+	)
+	switch dst {
+	case RegA:
+		cls = opClsLoadA
+	case RegX:
+		cls = opClsLoadX
+	default:
+		return RawInstruction{}, fmt.Errorf("invalid target register %v", dst)
+	}
+	switch loadSize {
+	case 1:
+		sz = opLoadWidth1
+	case 2:
+		sz = opLoadWidth2
+	case 4:
+		sz = opLoadWidth4
+	default:
+		return RawInstruction{}, fmt.Errorf("invalid load byte length %d", sz)
+	}
+	return RawInstruction{
+		Op: cls | sz | mode,
+		K:  k,
+	}, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/bpf/vm.go b/harvester/vendor/golang.org/x/net/bpf/vm.go
new file mode 100644
index 0000000..4c656f1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/vm.go
@@ -0,0 +1,140 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+	"errors"
+	"fmt"
+)
+
+// A VM is an emulated BPF virtual machine.
+type VM struct {
+	filter []Instruction
+}
+
+// NewVM returns a new VM using the input BPF program.
+func NewVM(filter []Instruction) (*VM, error) {
+	if len(filter) == 0 {
+		return nil, errors.New("one or more Instructions must be specified")
+	}
+
+	for i, ins := range filter {
+		check := len(filter) - (i + 1)
+		switch ins := ins.(type) {
+		// Check for out-of-bounds jumps in instructions
+		case Jump:
+			if check <= int(ins.Skip) {
+				return nil, fmt.Errorf("cannot jump %d instructions; jumping past program bounds", ins.Skip)
+			}
+		case JumpIf:
+			if check <= int(ins.SkipTrue) {
+				return nil, fmt.Errorf("cannot jump %d instructions in true case; jumping past program bounds", ins.SkipTrue)
+			}
+			if check <= int(ins.SkipFalse) {
+				return nil, fmt.Errorf("cannot jump %d instructions in false case; jumping past program bounds", ins.SkipFalse)
+			}
+		// Check for division or modulus by zero
+		case ALUOpConstant:
+			if ins.Val != 0 {
+				break
+			}
+
+			switch ins.Op {
+			case ALUOpDiv, ALUOpMod:
+				return nil, errors.New("cannot divide by zero using ALUOpConstant")
+			}
+		// Check for unknown extensions
+		case LoadExtension:
+			switch ins.Num {
+			case ExtLen:
+			default:
+				return nil, fmt.Errorf("extension %d not implemented", ins.Num)
+			}
+		}
+	}
+
+	// Make sure last instruction is a return instruction
+	switch filter[len(filter)-1].(type) {
+	case RetA, RetConstant:
+	default:
+		return nil, errors.New("BPF program must end with RetA or RetConstant")
+	}
+
+	// Though our VM works using disassembled instructions, we
+	// attempt to assemble the input filter anyway to ensure it is compatible
+	// with an operating system VM.
+	_, err := Assemble(filter)
+
+	return &VM{
+		filter: filter,
+	}, err
+}
+
+// Run runs the VM's BPF program against the input bytes.
+// Run returns the number of bytes accepted by the BPF program, and any errors
+// which occurred while processing the program.
+func (v *VM) Run(in []byte) (int, error) {
+	var (
+		// Registers of the virtual machine
+		regA       uint32
+		regX       uint32
+		regScratch [16]uint32
+
+		// OK is true if the program should continue processing the next
+		// instruction, or false if not, causing the loop to break
+		ok = true
+	)
+
+	// TODO(mdlayher): implement:
+	// - NegateA:
+	//   - would require a change from uint32 registers to int32
+	//     registers
+
+	// TODO(mdlayher): add interop tests that check signedness of ALU
+	// operations against kernel implementation, and make sure Go
+	// implementation matches behavior
+
+	for i := 0; i < len(v.filter) && ok; i++ {
+		ins := v.filter[i]
+
+		switch ins := ins.(type) {
+		case ALUOpConstant:
+			regA = aluOpConstant(ins, regA)
+		case ALUOpX:
+			regA, ok = aluOpX(ins, regA, regX)
+		case Jump:
+			i += int(ins.Skip)
+		case JumpIf:
+			jump := jumpIf(ins, regA)
+			i += jump
+		case LoadAbsolute:
+			regA, ok = loadAbsolute(ins, in)
+		case LoadConstant:
+			regA, regX = loadConstant(ins, regA, regX)
+		case LoadExtension:
+			regA = loadExtension(ins, in)
+		case LoadIndirect:
+			regA, ok = loadIndirect(ins, in, regX)
+		case LoadMemShift:
+			regX, ok = loadMemShift(ins, in)
+		case LoadScratch:
+			regA, regX = loadScratch(ins, regScratch, regA, regX)
+		case RetA:
+			return int(regA), nil
+		case RetConstant:
+			return int(ins.Val), nil
+		case StoreScratch:
+			regScratch = storeScratch(ins, regScratch, regA, regX)
+		case TAX:
+			regX = regA
+		case TXA:
+			regA = regX
+		default:
+			return 0, fmt.Errorf("unknown Instruction at index %d: %T", i, ins)
+		}
+	}
+
+	return 0, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/bpf/vm_instructions.go b/harvester/vendor/golang.org/x/net/bpf/vm_instructions.go
new file mode 100644
index 0000000..516f946
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/bpf/vm_instructions.go
@@ -0,0 +1,174 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bpf
+
+import (
+	"encoding/binary"
+	"fmt"
+)
+
+func aluOpConstant(ins ALUOpConstant, regA uint32) uint32 {
+	return aluOpCommon(ins.Op, regA, ins.Val)
+}
+
+func aluOpX(ins ALUOpX, regA uint32, regX uint32) (uint32, bool) {
+	// Guard against division or modulus by zero by terminating
+	// the program, as the OS BPF VM does
+	if regX == 0 {
+		switch ins.Op {
+		case ALUOpDiv, ALUOpMod:
+			return 0, false
+		}
+	}
+
+	return aluOpCommon(ins.Op, regA, regX), true
+}
+
+func aluOpCommon(op ALUOp, regA uint32, value uint32) uint32 {
+	switch op {
+	case ALUOpAdd:
+		return regA + value
+	case ALUOpSub:
+		return regA - value
+	case ALUOpMul:
+		return regA * value
+	case ALUOpDiv:
+		// Division by zero not permitted by NewVM and aluOpX checks
+		return regA / value
+	case ALUOpOr:
+		return regA | value
+	case ALUOpAnd:
+		return regA & value
+	case ALUOpShiftLeft:
+		return regA << value
+	case ALUOpShiftRight:
+		return regA >> value
+	case ALUOpMod:
+		// Modulus by zero not permitted by NewVM and aluOpX checks
+		return regA % value
+	case ALUOpXor:
+		return regA ^ value
+	default:
+		return regA
+	}
+}
+
+func jumpIf(ins JumpIf, value uint32) int {
+	var ok bool
+	inV := uint32(ins.Val)
+
+	switch ins.Cond {
+	case JumpEqual:
+		ok = value == inV
+	case JumpNotEqual:
+		ok = value != inV
+	case JumpGreaterThan:
+		ok = value > inV
+	case JumpLessThan:
+		ok = value < inV
+	case JumpGreaterOrEqual:
+		ok = value >= inV
+	case JumpLessOrEqual:
+		ok = value <= inV
+	case JumpBitsSet:
+		ok = (value & inV) != 0
+	case JumpBitsNotSet:
+		ok = (value & inV) == 0
+	}
+
+	if ok {
+		return int(ins.SkipTrue)
+	}
+
+	return int(ins.SkipFalse)
+}
+
+func loadAbsolute(ins LoadAbsolute, in []byte) (uint32, bool) {
+	offset := int(ins.Off)
+	size := int(ins.Size)
+
+	return loadCommon(in, offset, size)
+}
+
+func loadConstant(ins LoadConstant, regA uint32, regX uint32) (uint32, uint32) {
+	switch ins.Dst {
+	case RegA:
+		regA = ins.Val
+	case RegX:
+		regX = ins.Val
+	}
+
+	return regA, regX
+}
+
+func loadExtension(ins LoadExtension, in []byte) uint32 {
+	switch ins.Num {
+	case ExtLen:
+		return uint32(len(in))
+	default:
+		panic(fmt.Sprintf("unimplemented extension: %d", ins.Num))
+	}
+}
+
+func loadIndirect(ins LoadIndirect, in []byte, regX uint32) (uint32, bool) {
+	offset := int(ins.Off) + int(regX)
+	size := int(ins.Size)
+
+	return loadCommon(in, offset, size)
+}
+
+func loadMemShift(ins LoadMemShift, in []byte) (uint32, bool) {
+	offset := int(ins.Off)
+
+	if !inBounds(len(in), offset, 0) {
+		return 0, false
+	}
+
+	// Mask off high 4 bits and multiply low 4 bits by 4
+	return uint32(in[offset]&0x0f) * 4, true
+}
+
+func inBounds(inLen int, offset int, size int) bool {
+	return offset+size <= inLen
+}
+
+func loadCommon(in []byte, offset int, size int) (uint32, bool) {
+	if !inBounds(len(in), offset, size) {
+		return 0, false
+	}
+
+	switch size {
+	case 1:
+		return uint32(in[offset]), true
+	case 2:
+		return uint32(binary.BigEndian.Uint16(in[offset : offset+size])), true
+	case 4:
+		return uint32(binary.BigEndian.Uint32(in[offset : offset+size])), true
+	default:
+		panic(fmt.Sprintf("invalid load size: %d", size))
+	}
+}
+
+func loadScratch(ins LoadScratch, regScratch [16]uint32, regA uint32, regX uint32) (uint32, uint32) {
+	switch ins.Dst {
+	case RegA:
+		regA = regScratch[ins.N]
+	case RegX:
+		regX = regScratch[ins.N]
+	}
+
+	return regA, regX
+}
+
+func storeScratch(ins StoreScratch, regScratch [16]uint32, regA uint32, regX uint32) [16]uint32 {
+	switch ins.Src {
+	case RegA:
+		regScratch[ins.N] = regA
+	case RegX:
+		regScratch[ins.N] = regX
+	}
+
+	return regScratch
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/dstunreach.go b/harvester/vendor/golang.org/x/net/icmp/dstunreach.go
new file mode 100644
index 0000000..75db991
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/dstunreach.go
@@ -0,0 +1,41 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A DstUnreach represents an ICMP destination unreachable message
+// body.
+type DstUnreach struct {
+	Data       []byte      // data, known as original datagram field
+	Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *DstUnreach) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+	return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *DstUnreach) Marshal(proto int) ([]byte, error) {
+	return marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+}
+
+// parseDstUnreach parses b as an ICMP destination unreachable message
+// body.
+func parseDstUnreach(proto int, b []byte) (MessageBody, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	p := &DstUnreach{}
+	var err error
+	p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/echo.go b/harvester/vendor/golang.org/x/net/icmp/echo.go
new file mode 100644
index 0000000..e6f15ef
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/echo.go
@@ -0,0 +1,45 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// An Echo represents an ICMP echo request or reply message body.
+type Echo struct {
+	ID   int    // identifier
+	Seq  int    // sequence number
+	Data []byte // data
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *Echo) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	return 4 + len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *Echo) Marshal(proto int) ([]byte, error) {
+	b := make([]byte, 4+len(p.Data))
+	binary.BigEndian.PutUint16(b[:2], uint16(p.ID))
+	binary.BigEndian.PutUint16(b[2:4], uint16(p.Seq))
+	copy(b[4:], p.Data)
+	return b, nil
+}
+
+// parseEcho parses b as an ICMP echo request or reply message body.
+func parseEcho(proto int, b []byte) (MessageBody, error) {
+	bodyLen := len(b)
+	if bodyLen < 4 {
+		return nil, errMessageTooShort
+	}
+	p := &Echo{ID: int(binary.BigEndian.Uint16(b[:2])), Seq: int(binary.BigEndian.Uint16(b[2:4]))}
+	if bodyLen > 4 {
+		p.Data = make([]byte, bodyLen-4)
+		copy(p.Data, b[4:])
+	}
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/endpoint.go b/harvester/vendor/golang.org/x/net/icmp/endpoint.go
new file mode 100644
index 0000000..a68bfb0
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/endpoint.go
@@ -0,0 +1,113 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"net"
+	"runtime"
+	"syscall"
+	"time"
+
+	"golang.org/x/net/ipv4"
+	"golang.org/x/net/ipv6"
+)
+
+var _ net.PacketConn = &PacketConn{}
+
+// A PacketConn represents a packet network endpoint that uses either
+// ICMPv4 or ICMPv6.
+type PacketConn struct {
+	c  net.PacketConn
+	p4 *ipv4.PacketConn
+	p6 *ipv6.PacketConn
+}
+
+func (c *PacketConn) ok() bool { return c != nil && c.c != nil }
+
+// IPv4PacketConn returns the ipv4.PacketConn of c.
+// It returns nil when c is not created as the endpoint for ICMPv4.
+func (c *PacketConn) IPv4PacketConn() *ipv4.PacketConn {
+	if !c.ok() {
+		return nil
+	}
+	return c.p4
+}
+
+// IPv6PacketConn returns the ipv6.PacketConn of c.
+// It returns nil when c is not created as the endpoint for ICMPv6.
+func (c *PacketConn) IPv6PacketConn() *ipv6.PacketConn {
+	if !c.ok() {
+		return nil
+	}
+	return c.p6
+}
+
+// ReadFrom reads an ICMP message from the connection.
+func (c *PacketConn) ReadFrom(b []byte) (int, net.Addr, error) {
+	if !c.ok() {
+		return 0, nil, syscall.EINVAL
+	}
+	// Please be informed that ipv4.NewPacketConn enables
+	// IP_STRIPHDR option by default on Darwin.
+	// See golang.org/issue/9395 for further information.
+	if runtime.GOOS == "darwin" && c.p4 != nil {
+		n, _, peer, err := c.p4.ReadFrom(b)
+		return n, peer, err
+	}
+	return c.c.ReadFrom(b)
+}
+
+// WriteTo writes the ICMP message b to dst.
+// Dst must be net.UDPAddr when c is a non-privileged
+// datagram-oriented ICMP endpoint. Otherwise it must be net.IPAddr.
+func (c *PacketConn) WriteTo(b []byte, dst net.Addr) (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	return c.c.WriteTo(b, dst)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	return c.c.Close()
+}
+
+// LocalAddr returns the local network address.
+func (c *PacketConn) LocalAddr() net.Addr {
+	if !c.ok() {
+		return nil
+	}
+	return c.c.LocalAddr()
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	return c.c.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	return c.c.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	return c.c.SetWriteDeadline(t)
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/extension.go b/harvester/vendor/golang.org/x/net/icmp/extension.go
new file mode 100644
index 0000000..402a751
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/extension.go
@@ -0,0 +1,89 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// An Extension represents an ICMP extension.
+type Extension interface {
+	// Len returns the length of ICMP extension.
+	// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+	Len(proto int) int
+
+	// Marshal returns the binary encoding of ICMP extension.
+	// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+	Marshal(proto int) ([]byte, error)
+}
+
+const extensionVersion = 2
+
+func validExtensionHeader(b []byte) bool {
+	v := int(b[0]&0xf0) >> 4
+	s := binary.BigEndian.Uint16(b[2:4])
+	if s != 0 {
+		s = checksum(b)
+	}
+	if v != extensionVersion || s != 0 {
+		return false
+	}
+	return true
+}
+
+// parseExtensions parses b as a list of ICMP extensions.
+// The length attribute l must be the length attribute field in
+// received icmp messages.
+//
+// It will return a list of ICMP extensions and an adjusted length
+// attribute that represents the length of the padded original
+// datagram field. Otherwise, it returns an error.
+func parseExtensions(b []byte, l int) ([]Extension, int, error) {
+	// Still a lot of non-RFC 4884 compliant implementations are
+	// out there. Set the length attribute l to 128 when it looks
+	// inappropriate for backwards compatibility.
+	//
+	// A minimal extension at least requires 8 octets; 4 octets
+	// for an extension header, and 4 octets for a single object
+	// header.
+	//
+	// See RFC 4884 for further information.
+	if 128 > l || l+8 > len(b) {
+		l = 128
+	}
+	if l+8 > len(b) {
+		return nil, -1, errNoExtension
+	}
+	if !validExtensionHeader(b[l:]) {
+		if l == 128 {
+			return nil, -1, errNoExtension
+		}
+		l = 128
+		if !validExtensionHeader(b[l:]) {
+			return nil, -1, errNoExtension
+		}
+	}
+	var exts []Extension
+	for b = b[l+4:]; len(b) >= 4; {
+		ol := int(binary.BigEndian.Uint16(b[:2]))
+		if 4 > ol || ol > len(b) {
+			break
+		}
+		switch b[2] {
+		case classMPLSLabelStack:
+			ext, err := parseMPLSLabelStack(b[:ol])
+			if err != nil {
+				return nil, -1, err
+			}
+			exts = append(exts, ext)
+		case classInterfaceInfo:
+			ext, err := parseInterfaceInfo(b[:ol])
+			if err != nil {
+				return nil, -1, err
+			}
+			exts = append(exts, ext)
+		}
+		b = b[ol:]
+	}
+	return exts, l, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/helper.go b/harvester/vendor/golang.org/x/net/icmp/helper.go
new file mode 100644
index 0000000..6c4e633
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/helper.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"encoding/binary"
+	"unsafe"
+)
+
+var (
+	// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
+	freebsdVersion uint32
+
+	nativeEndian binary.ByteOrder
+)
+
+func init() {
+	i := uint32(1)
+	b := (*[4]byte)(unsafe.Pointer(&i))
+	if b[0] == 1 {
+		nativeEndian = binary.LittleEndian
+	} else {
+		nativeEndian = binary.BigEndian
+	}
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/helper_posix.go b/harvester/vendor/golang.org/x/net/icmp/helper_posix.go
new file mode 100644
index 0000000..398fd38
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/helper_posix.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package icmp
+
+import (
+	"net"
+	"strconv"
+	"syscall"
+)
+
+func sockaddr(family int, address string) (syscall.Sockaddr, error) {
+	switch family {
+	case syscall.AF_INET:
+		a, err := net.ResolveIPAddr("ip4", address)
+		if err != nil {
+			return nil, err
+		}
+		if len(a.IP) == 0 {
+			a.IP = net.IPv4zero
+		}
+		if a.IP = a.IP.To4(); a.IP == nil {
+			return nil, net.InvalidAddrError("non-ipv4 address")
+		}
+		sa := &syscall.SockaddrInet4{}
+		copy(sa.Addr[:], a.IP)
+		return sa, nil
+	case syscall.AF_INET6:
+		a, err := net.ResolveIPAddr("ip6", address)
+		if err != nil {
+			return nil, err
+		}
+		if len(a.IP) == 0 {
+			a.IP = net.IPv6unspecified
+		}
+		if a.IP.Equal(net.IPv4zero) {
+			a.IP = net.IPv6unspecified
+		}
+		if a.IP = a.IP.To16(); a.IP == nil || a.IP.To4() != nil {
+			return nil, net.InvalidAddrError("non-ipv6 address")
+		}
+		sa := &syscall.SockaddrInet6{ZoneId: zoneToUint32(a.Zone)}
+		copy(sa.Addr[:], a.IP)
+		return sa, nil
+	default:
+		return nil, net.InvalidAddrError("unexpected family")
+	}
+}
+
+func zoneToUint32(zone string) uint32 {
+	if zone == "" {
+		return 0
+	}
+	if ifi, err := net.InterfaceByName(zone); err == nil {
+		return uint32(ifi.Index)
+	}
+	n, err := strconv.Atoi(zone)
+	if err != nil {
+		return 0
+	}
+	return uint32(n)
+}
+
+func last(s string, b byte) int {
+	i := len(s)
+	for i--; i >= 0; i-- {
+		if s[i] == b {
+			break
+		}
+	}
+	return i
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/interface.go b/harvester/vendor/golang.org/x/net/icmp/interface.go
new file mode 100644
index 0000000..78b5b98
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/interface.go
@@ -0,0 +1,236 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"encoding/binary"
+	"net"
+	"strings"
+
+	"golang.org/x/net/internal/iana"
+)
+
+const (
+	classInterfaceInfo = 2
+
+	afiIPv4 = 1
+	afiIPv6 = 2
+)
+
+const (
+	attrMTU = 1 << iota
+	attrName
+	attrIPAddr
+	attrIfIndex
+)
+
+// An InterfaceInfo represents interface and next-hop identification.
+type InterfaceInfo struct {
+	Class     int // extension object class number
+	Type      int // extension object sub-type
+	Interface *net.Interface
+	Addr      *net.IPAddr
+}
+
+func (ifi *InterfaceInfo) nameLen() int {
+	if len(ifi.Interface.Name) > 63 {
+		return 64
+	}
+	l := 1 + len(ifi.Interface.Name)
+	return (l + 3) &^ 3
+}
+
+func (ifi *InterfaceInfo) attrsAndLen(proto int) (attrs, l int) {
+	l = 4
+	if ifi.Interface != nil && ifi.Interface.Index > 0 {
+		attrs |= attrIfIndex
+		l += 4
+		if len(ifi.Interface.Name) > 0 {
+			attrs |= attrName
+			l += ifi.nameLen()
+		}
+		if ifi.Interface.MTU > 0 {
+			attrs |= attrMTU
+			l += 4
+		}
+	}
+	if ifi.Addr != nil {
+		switch proto {
+		case iana.ProtocolICMP:
+			if ifi.Addr.IP.To4() != nil {
+				attrs |= attrIPAddr
+				l += 4 + net.IPv4len
+			}
+		case iana.ProtocolIPv6ICMP:
+			if ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
+				attrs |= attrIPAddr
+				l += 4 + net.IPv6len
+			}
+		}
+	}
+	return
+}
+
+// Len implements the Len method of Extension interface.
+func (ifi *InterfaceInfo) Len(proto int) int {
+	_, l := ifi.attrsAndLen(proto)
+	return l
+}
+
+// Marshal implements the Marshal method of Extension interface.
+func (ifi *InterfaceInfo) Marshal(proto int) ([]byte, error) {
+	attrs, l := ifi.attrsAndLen(proto)
+	b := make([]byte, l)
+	if err := ifi.marshal(proto, b, attrs, l); err != nil {
+		return nil, err
+	}
+	return b, nil
+}
+
+func (ifi *InterfaceInfo) marshal(proto int, b []byte, attrs, l int) error {
+	binary.BigEndian.PutUint16(b[:2], uint16(l))
+	b[2], b[3] = classInterfaceInfo, byte(ifi.Type)
+	for b = b[4:]; len(b) > 0 && attrs != 0; {
+		switch {
+		case attrs&attrIfIndex != 0:
+			b = ifi.marshalIfIndex(proto, b)
+			attrs &^= attrIfIndex
+		case attrs&attrIPAddr != 0:
+			b = ifi.marshalIPAddr(proto, b)
+			attrs &^= attrIPAddr
+		case attrs&attrName != 0:
+			b = ifi.marshalName(proto, b)
+			attrs &^= attrName
+		case attrs&attrMTU != 0:
+			b = ifi.marshalMTU(proto, b)
+			attrs &^= attrMTU
+		}
+	}
+	return nil
+}
+
+func (ifi *InterfaceInfo) marshalIfIndex(proto int, b []byte) []byte {
+	binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.Index))
+	return b[4:]
+}
+
+func (ifi *InterfaceInfo) parseIfIndex(b []byte) ([]byte, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	ifi.Interface.Index = int(binary.BigEndian.Uint32(b[:4]))
+	return b[4:], nil
+}
+
+func (ifi *InterfaceInfo) marshalIPAddr(proto int, b []byte) []byte {
+	switch proto {
+	case iana.ProtocolICMP:
+		binary.BigEndian.PutUint16(b[:2], uint16(afiIPv4))
+		copy(b[4:4+net.IPv4len], ifi.Addr.IP.To4())
+		b = b[4+net.IPv4len:]
+	case iana.ProtocolIPv6ICMP:
+		binary.BigEndian.PutUint16(b[:2], uint16(afiIPv6))
+		copy(b[4:4+net.IPv6len], ifi.Addr.IP.To16())
+		b = b[4+net.IPv6len:]
+	}
+	return b
+}
+
+func (ifi *InterfaceInfo) parseIPAddr(b []byte) ([]byte, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	afi := int(binary.BigEndian.Uint16(b[:2]))
+	b = b[4:]
+	switch afi {
+	case afiIPv4:
+		if len(b) < net.IPv4len {
+			return nil, errMessageTooShort
+		}
+		ifi.Addr.IP = make(net.IP, net.IPv4len)
+		copy(ifi.Addr.IP, b[:net.IPv4len])
+		b = b[net.IPv4len:]
+	case afiIPv6:
+		if len(b) < net.IPv6len {
+			return nil, errMessageTooShort
+		}
+		ifi.Addr.IP = make(net.IP, net.IPv6len)
+		copy(ifi.Addr.IP, b[:net.IPv6len])
+		b = b[net.IPv6len:]
+	}
+	return b, nil
+}
+
+func (ifi *InterfaceInfo) marshalName(proto int, b []byte) []byte {
+	l := byte(ifi.nameLen())
+	b[0] = l
+	copy(b[1:], []byte(ifi.Interface.Name))
+	return b[l:]
+}
+
+func (ifi *InterfaceInfo) parseName(b []byte) ([]byte, error) {
+	if 4 > len(b) || len(b) < int(b[0]) {
+		return nil, errMessageTooShort
+	}
+	l := int(b[0])
+	if l%4 != 0 || 4 > l || l > 64 {
+		return nil, errInvalidExtension
+	}
+	var name [63]byte
+	copy(name[:], b[1:l])
+	ifi.Interface.Name = strings.Trim(string(name[:]), "\000")
+	return b[l:], nil
+}
+
+func (ifi *InterfaceInfo) marshalMTU(proto int, b []byte) []byte {
+	binary.BigEndian.PutUint32(b[:4], uint32(ifi.Interface.MTU))
+	return b[4:]
+}
+
+func (ifi *InterfaceInfo) parseMTU(b []byte) ([]byte, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	ifi.Interface.MTU = int(binary.BigEndian.Uint32(b[:4]))
+	return b[4:], nil
+}
+
+func parseInterfaceInfo(b []byte) (Extension, error) {
+	ifi := &InterfaceInfo{
+		Class: int(b[2]),
+		Type:  int(b[3]),
+	}
+	if ifi.Type&(attrIfIndex|attrName|attrMTU) != 0 {
+		ifi.Interface = &net.Interface{}
+	}
+	if ifi.Type&attrIPAddr != 0 {
+		ifi.Addr = &net.IPAddr{}
+	}
+	attrs := ifi.Type & (attrIfIndex | attrIPAddr | attrName | attrMTU)
+	for b = b[4:]; len(b) > 0 && attrs != 0; {
+		var err error
+		switch {
+		case attrs&attrIfIndex != 0:
+			b, err = ifi.parseIfIndex(b)
+			attrs &^= attrIfIndex
+		case attrs&attrIPAddr != 0:
+			b, err = ifi.parseIPAddr(b)
+			attrs &^= attrIPAddr
+		case attrs&attrName != 0:
+			b, err = ifi.parseName(b)
+			attrs &^= attrName
+		case attrs&attrMTU != 0:
+			b, err = ifi.parseMTU(b)
+			attrs &^= attrMTU
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	if ifi.Interface != nil && ifi.Interface.Name != "" && ifi.Addr != nil && ifi.Addr.IP.To16() != nil && ifi.Addr.IP.To4() == nil {
+		ifi.Addr.Zone = ifi.Interface.Name
+	}
+	return ifi, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/ipv4.go b/harvester/vendor/golang.org/x/net/icmp/ipv4.go
new file mode 100644
index 0000000..729ddc9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/ipv4.go
@@ -0,0 +1,56 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"encoding/binary"
+	"net"
+	"runtime"
+
+	"golang.org/x/net/ipv4"
+)
+
+// ParseIPv4Header parses b as an IPv4 header of ICMP error message
+// invoking packet, which is contained in ICMP error message.
+func ParseIPv4Header(b []byte) (*ipv4.Header, error) {
+	if len(b) < ipv4.HeaderLen {
+		return nil, errHeaderTooShort
+	}
+	hdrlen := int(b[0]&0x0f) << 2
+	if hdrlen > len(b) {
+		return nil, errBufferTooShort
+	}
+	h := &ipv4.Header{
+		Version:  int(b[0] >> 4),
+		Len:      hdrlen,
+		TOS:      int(b[1]),
+		ID:       int(binary.BigEndian.Uint16(b[4:6])),
+		FragOff:  int(binary.BigEndian.Uint16(b[6:8])),
+		TTL:      int(b[8]),
+		Protocol: int(b[9]),
+		Checksum: int(binary.BigEndian.Uint16(b[10:12])),
+		Src:      net.IPv4(b[12], b[13], b[14], b[15]),
+		Dst:      net.IPv4(b[16], b[17], b[18], b[19]),
+	}
+	switch runtime.GOOS {
+	case "darwin":
+		h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+	case "freebsd":
+		if freebsdVersion >= 1000000 {
+			h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+		} else {
+			h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+		}
+	default:
+		h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+	}
+	h.Flags = ipv4.HeaderFlags(h.FragOff&0xe000) >> 13
+	h.FragOff = h.FragOff & 0x1fff
+	if hdrlen-ipv4.HeaderLen > 0 {
+		h.Options = make([]byte, hdrlen-ipv4.HeaderLen)
+		copy(h.Options, b[ipv4.HeaderLen:])
+	}
+	return h, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/ipv6.go b/harvester/vendor/golang.org/x/net/icmp/ipv6.go
new file mode 100644
index 0000000..2e8cfeb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/ipv6.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"net"
+
+	"golang.org/x/net/internal/iana"
+)
+
+const ipv6PseudoHeaderLen = 2*net.IPv6len + 8
+
+// IPv6PseudoHeader returns an IPv6 pseudo header for checksum
+// calculation.
+func IPv6PseudoHeader(src, dst net.IP) []byte {
+	b := make([]byte, ipv6PseudoHeaderLen)
+	copy(b, src.To16())
+	copy(b[net.IPv6len:], dst.To16())
+	b[len(b)-1] = byte(iana.ProtocolIPv6ICMP)
+	return b
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/listen_posix.go b/harvester/vendor/golang.org/x/net/icmp/listen_posix.go
new file mode 100644
index 0000000..7fac4f9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/listen_posix.go
@@ -0,0 +1,100 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package icmp
+
+import (
+	"net"
+	"os"
+	"runtime"
+	"syscall"
+
+	"golang.org/x/net/internal/iana"
+	"golang.org/x/net/ipv4"
+	"golang.org/x/net/ipv6"
+)
+
+const sysIP_STRIPHDR = 0x17 // for now only darwin supports this option
+
+// ListenPacket listens for incoming ICMP packets addressed to
+// address. See net.Dial for the syntax of address.
+//
+// For non-privileged datagram-oriented ICMP endpoints, network must
+// be "udp4" or "udp6". The endpoint allows to read, write a few
+// limited ICMP messages such as echo request and echo reply.
+// Currently only Darwin and Linux support this.
+//
+// Examples:
+//	ListenPacket("udp4", "192.168.0.1")
+//	ListenPacket("udp4", "0.0.0.0")
+//	ListenPacket("udp6", "fe80::1%en0")
+//	ListenPacket("udp6", "::")
+//
+// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
+// followed by a colon and an ICMP protocol number or name.
+//
+// Examples:
+//	ListenPacket("ip4:icmp", "192.168.0.1")
+//	ListenPacket("ip4:1", "0.0.0.0")
+//	ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
+//	ListenPacket("ip6:58", "::")
+func ListenPacket(network, address string) (*PacketConn, error) {
+	var family, proto int
+	switch network {
+	case "udp4":
+		family, proto = syscall.AF_INET, iana.ProtocolICMP
+	case "udp6":
+		family, proto = syscall.AF_INET6, iana.ProtocolIPv6ICMP
+	default:
+		i := last(network, ':')
+		switch network[:i] {
+		case "ip4":
+			proto = iana.ProtocolICMP
+		case "ip6":
+			proto = iana.ProtocolIPv6ICMP
+		}
+	}
+	var cerr error
+	var c net.PacketConn
+	switch family {
+	case syscall.AF_INET, syscall.AF_INET6:
+		s, err := syscall.Socket(family, syscall.SOCK_DGRAM, proto)
+		if err != nil {
+			return nil, os.NewSyscallError("socket", err)
+		}
+		if runtime.GOOS == "darwin" && family == syscall.AF_INET {
+			if err := syscall.SetsockoptInt(s, iana.ProtocolIP, sysIP_STRIPHDR, 1); err != nil {
+				syscall.Close(s)
+				return nil, os.NewSyscallError("setsockopt", err)
+			}
+		}
+		sa, err := sockaddr(family, address)
+		if err != nil {
+			syscall.Close(s)
+			return nil, err
+		}
+		if err := syscall.Bind(s, sa); err != nil {
+			syscall.Close(s)
+			return nil, os.NewSyscallError("bind", err)
+		}
+		f := os.NewFile(uintptr(s), "datagram-oriented icmp")
+		c, cerr = net.FilePacketConn(f)
+		f.Close()
+	default:
+		c, cerr = net.ListenPacket(network, address)
+	}
+	if cerr != nil {
+		return nil, cerr
+	}
+	switch proto {
+	case iana.ProtocolICMP:
+		return &PacketConn{c: c, p4: ipv4.NewPacketConn(c)}, nil
+	case iana.ProtocolIPv6ICMP:
+		return &PacketConn{c: c, p6: ipv6.NewPacketConn(c)}, nil
+	default:
+		return &PacketConn{c: c}, nil
+	}
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/listen_stub.go b/harvester/vendor/golang.org/x/net/icmp/listen_stub.go
new file mode 100644
index 0000000..668728d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/listen_stub.go
@@ -0,0 +1,33 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package icmp
+
+// ListenPacket listens for incoming ICMP packets addressed to
+// address. See net.Dial for the syntax of address.
+//
+// For non-privileged datagram-oriented ICMP endpoints, network must
+// be "udp4" or "udp6". The endpoint allows to read, write a few
+// limited ICMP messages such as echo request and echo reply.
+// Currently only Darwin and Linux support this.
+//
+// Examples:
+//	ListenPacket("udp4", "192.168.0.1")
+//	ListenPacket("udp4", "0.0.0.0")
+//	ListenPacket("udp6", "fe80::1%en0")
+//	ListenPacket("udp6", "::")
+//
+// For privileged raw ICMP endpoints, network must be "ip4" or "ip6"
+// followed by a colon and an ICMP protocol number or name.
+//
+// Examples:
+//	ListenPacket("ip4:icmp", "192.168.0.1")
+//	ListenPacket("ip4:1", "0.0.0.0")
+//	ListenPacket("ip6:ipv6-icmp", "fe80::1%en0")
+//	ListenPacket("ip6:58", "::")
+func ListenPacket(network, address string) (*PacketConn, error) {
+	return nil, errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/message.go b/harvester/vendor/golang.org/x/net/icmp/message.go
new file mode 100644
index 0000000..81140b0
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/message.go
@@ -0,0 +1,152 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package icmp provides basic functions for the manipulation of
+// messages used in the Internet Control Message Protocols,
+// ICMPv4 and ICMPv6.
+//
+// ICMPv4 and ICMPv6 are defined in RFC 792 and RFC 4443.
+// Multi-part message support for ICMP is defined in RFC 4884.
+// ICMP extensions for MPLS are defined in RFC 4950.
+// ICMP extensions for interface and next-hop identification are
+// defined in RFC 5837.
+package icmp // import "golang.org/x/net/icmp"
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"syscall"
+
+	"golang.org/x/net/internal/iana"
+	"golang.org/x/net/ipv4"
+	"golang.org/x/net/ipv6"
+)
+
+// BUG(mikio): This package is not implemented on NaCl and Plan 9.
+
+var (
+	errMessageTooShort  = errors.New("message too short")
+	errHeaderTooShort   = errors.New("header too short")
+	errBufferTooShort   = errors.New("buffer too short")
+	errOpNoSupport      = errors.New("operation not supported")
+	errNoExtension      = errors.New("no extension")
+	errInvalidExtension = errors.New("invalid extension")
+)
+
+func checksum(b []byte) uint16 {
+	csumcv := len(b) - 1 // checksum coverage
+	s := uint32(0)
+	for i := 0; i < csumcv; i += 2 {
+		s += uint32(b[i+1])<<8 | uint32(b[i])
+	}
+	if csumcv&1 == 0 {
+		s += uint32(b[csumcv])
+	}
+	s = s>>16 + s&0xffff
+	s = s + s>>16
+	return ^uint16(s)
+}
+
+// A Type represents an ICMP message type.
+type Type interface {
+	Protocol() int
+}
+
+// A Message represents an ICMP message.
+type Message struct {
+	Type     Type        // type, either ipv4.ICMPType or ipv6.ICMPType
+	Code     int         // code
+	Checksum int         // checksum
+	Body     MessageBody // body
+}
+
+// Marshal returns the binary encoding of the ICMP message m.
+//
+// For an ICMPv4 message, the returned message always contains the
+// calculated checksum field.
+//
+// For an ICMPv6 message, the returned message contains the calculated
+// checksum field when psh is not nil, otherwise the kernel will
+// compute the checksum field during the message transmission.
+// When psh is not nil, it must be the pseudo header for IPv6.
+func (m *Message) Marshal(psh []byte) ([]byte, error) {
+	var mtype int
+	switch typ := m.Type.(type) {
+	case ipv4.ICMPType:
+		mtype = int(typ)
+	case ipv6.ICMPType:
+		mtype = int(typ)
+	default:
+		return nil, syscall.EINVAL
+	}
+	b := []byte{byte(mtype), byte(m.Code), 0, 0}
+	if m.Type.Protocol() == iana.ProtocolIPv6ICMP && psh != nil {
+		b = append(psh, b...)
+	}
+	if m.Body != nil && m.Body.Len(m.Type.Protocol()) != 0 {
+		mb, err := m.Body.Marshal(m.Type.Protocol())
+		if err != nil {
+			return nil, err
+		}
+		b = append(b, mb...)
+	}
+	if m.Type.Protocol() == iana.ProtocolIPv6ICMP {
+		if psh == nil { // cannot calculate checksum here
+			return b, nil
+		}
+		off, l := 2*net.IPv6len, len(b)-len(psh)
+		binary.BigEndian.PutUint32(b[off:off+4], uint32(l))
+	}
+	s := checksum(b)
+	// Place checksum back in header; using ^= avoids the
+	// assumption the checksum bytes are zero.
+	b[len(psh)+2] ^= byte(s)
+	b[len(psh)+3] ^= byte(s >> 8)
+	return b[len(psh):], nil
+}
+
+var parseFns = map[Type]func(int, []byte) (MessageBody, error){
+	ipv4.ICMPTypeDestinationUnreachable: parseDstUnreach,
+	ipv4.ICMPTypeTimeExceeded:           parseTimeExceeded,
+	ipv4.ICMPTypeParameterProblem:       parseParamProb,
+
+	ipv4.ICMPTypeEcho:      parseEcho,
+	ipv4.ICMPTypeEchoReply: parseEcho,
+
+	ipv6.ICMPTypeDestinationUnreachable: parseDstUnreach,
+	ipv6.ICMPTypePacketTooBig:           parsePacketTooBig,
+	ipv6.ICMPTypeTimeExceeded:           parseTimeExceeded,
+	ipv6.ICMPTypeParameterProblem:       parseParamProb,
+
+	ipv6.ICMPTypeEchoRequest: parseEcho,
+	ipv6.ICMPTypeEchoReply:   parseEcho,
+}
+
+// ParseMessage parses b as an ICMP message.
+// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+func ParseMessage(proto int, b []byte) (*Message, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	var err error
+	m := &Message{Code: int(b[1]), Checksum: int(binary.BigEndian.Uint16(b[2:4]))}
+	switch proto {
+	case iana.ProtocolICMP:
+		m.Type = ipv4.ICMPType(b[0])
+	case iana.ProtocolIPv6ICMP:
+		m.Type = ipv6.ICMPType(b[0])
+	default:
+		return nil, syscall.EINVAL
+	}
+	if fn, ok := parseFns[m.Type]; !ok {
+		m.Body, err = parseDefaultMessageBody(proto, b[4:])
+	} else {
+		m.Body, err = fn(proto, b[4:])
+	}
+	if err != nil {
+		return nil, err
+	}
+	return m, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/messagebody.go b/harvester/vendor/golang.org/x/net/icmp/messagebody.go
new file mode 100644
index 0000000..2463730
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/messagebody.go
@@ -0,0 +1,41 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A MessageBody represents an ICMP message body.
+type MessageBody interface {
+	// Len returns the length of ICMP message body.
+	// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+	Len(proto int) int
+
+	// Marshal returns the binary encoding of ICMP message body.
+	// Proto must be either the ICMPv4 or ICMPv6 protocol number.
+	Marshal(proto int) ([]byte, error)
+}
+
+// A DefaultMessageBody represents the default message body.
+type DefaultMessageBody struct {
+	Data []byte // data
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *DefaultMessageBody) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	return len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *DefaultMessageBody) Marshal(proto int) ([]byte, error) {
+	return p.Data, nil
+}
+
+// parseDefaultMessageBody parses b as an ICMP message body.
+func parseDefaultMessageBody(proto int, b []byte) (MessageBody, error) {
+	p := &DefaultMessageBody{Data: make([]byte, len(b))}
+	copy(p.Data, b)
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/mpls.go b/harvester/vendor/golang.org/x/net/icmp/mpls.go
new file mode 100644
index 0000000..c314917
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/mpls.go
@@ -0,0 +1,77 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// A MPLSLabel represents a MPLS label stack entry.
+type MPLSLabel struct {
+	Label int  // label value
+	TC    int  // traffic class; formerly experimental use
+	S     bool // bottom of stack
+	TTL   int  // time to live
+}
+
+const (
+	classMPLSLabelStack        = 1
+	typeIncomingMPLSLabelStack = 1
+)
+
+// A MPLSLabelStack represents a MPLS label stack.
+type MPLSLabelStack struct {
+	Class  int // extension object class number
+	Type   int // extension object sub-type
+	Labels []MPLSLabel
+}
+
+// Len implements the Len method of Extension interface.
+func (ls *MPLSLabelStack) Len(proto int) int {
+	return 4 + (4 * len(ls.Labels))
+}
+
+// Marshal implements the Marshal method of Extension interface.
+func (ls *MPLSLabelStack) Marshal(proto int) ([]byte, error) {
+	b := make([]byte, ls.Len(proto))
+	if err := ls.marshal(proto, b); err != nil {
+		return nil, err
+	}
+	return b, nil
+}
+
+func (ls *MPLSLabelStack) marshal(proto int, b []byte) error {
+	l := ls.Len(proto)
+	binary.BigEndian.PutUint16(b[:2], uint16(l))
+	b[2], b[3] = classMPLSLabelStack, typeIncomingMPLSLabelStack
+	off := 4
+	for _, ll := range ls.Labels {
+		b[off], b[off+1], b[off+2] = byte(ll.Label>>12), byte(ll.Label>>4&0xff), byte(ll.Label<<4&0xf0)
+		b[off+2] |= byte(ll.TC << 1 & 0x0e)
+		if ll.S {
+			b[off+2] |= 0x1
+		}
+		b[off+3] = byte(ll.TTL)
+		off += 4
+	}
+	return nil
+}
+
+func parseMPLSLabelStack(b []byte) (Extension, error) {
+	ls := &MPLSLabelStack{
+		Class: int(b[2]),
+		Type:  int(b[3]),
+	}
+	for b = b[4:]; len(b) >= 4; b = b[4:] {
+		ll := MPLSLabel{
+			Label: int(b[0])<<12 | int(b[1])<<4 | int(b[2])>>4,
+			TC:    int(b[2]&0x0e) >> 1,
+			TTL:   int(b[3]),
+		}
+		if b[2]&0x1 != 0 {
+			ll.S = true
+		}
+		ls.Labels = append(ls.Labels, ll)
+	}
+	return ls, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/multipart.go b/harvester/vendor/golang.org/x/net/icmp/multipart.go
new file mode 100644
index 0000000..f271356
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/multipart.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "golang.org/x/net/internal/iana"
+
+// multipartMessageBodyDataLen takes b as an original datagram and
+// exts as extensions, and returns a required length for message body
+// and a required length for a padded original datagram in wire
+// format.
+func multipartMessageBodyDataLen(proto int, b []byte, exts []Extension) (bodyLen, dataLen int) {
+	for _, ext := range exts {
+		bodyLen += ext.Len(proto)
+	}
+	if bodyLen > 0 {
+		dataLen = multipartMessageOrigDatagramLen(proto, b)
+		bodyLen += 4 // length of extension header
+	} else {
+		dataLen = len(b)
+	}
+	bodyLen += dataLen
+	return bodyLen, dataLen
+}
+
+// multipartMessageOrigDatagramLen takes b as an original datagram,
+// and returns a required length for a padded orignal datagram in wire
+// format.
+func multipartMessageOrigDatagramLen(proto int, b []byte) int {
+	roundup := func(b []byte, align int) int {
+		// According to RFC 4884, the padded original datagram
+		// field must contain at least 128 octets.
+		if len(b) < 128 {
+			return 128
+		}
+		r := len(b)
+		return (r + align - 1) & ^(align - 1)
+	}
+	switch proto {
+	case iana.ProtocolICMP:
+		return roundup(b, 4)
+	case iana.ProtocolIPv6ICMP:
+		return roundup(b, 8)
+	default:
+		return len(b)
+	}
+}
+
+// marshalMultipartMessageBody takes data as an original datagram and
+// exts as extesnsions, and returns a binary encoding of message body.
+// It can be used for non-multipart message bodies when exts is nil.
+func marshalMultipartMessageBody(proto int, data []byte, exts []Extension) ([]byte, error) {
+	bodyLen, dataLen := multipartMessageBodyDataLen(proto, data, exts)
+	b := make([]byte, 4+bodyLen)
+	copy(b[4:], data)
+	off := dataLen + 4
+	if len(exts) > 0 {
+		b[dataLen+4] = byte(extensionVersion << 4)
+		off += 4 // length of object header
+		for _, ext := range exts {
+			switch ext := ext.(type) {
+			case *MPLSLabelStack:
+				if err := ext.marshal(proto, b[off:]); err != nil {
+					return nil, err
+				}
+				off += ext.Len(proto)
+			case *InterfaceInfo:
+				attrs, l := ext.attrsAndLen(proto)
+				if err := ext.marshal(proto, b[off:], attrs, l); err != nil {
+					return nil, err
+				}
+				off += ext.Len(proto)
+			}
+		}
+		s := checksum(b[dataLen+4:])
+		b[dataLen+4+2] ^= byte(s)
+		b[dataLen+4+3] ^= byte(s >> 8)
+		switch proto {
+		case iana.ProtocolICMP:
+			b[1] = byte(dataLen / 4)
+		case iana.ProtocolIPv6ICMP:
+			b[0] = byte(dataLen / 8)
+		}
+	}
+	return b, nil
+}
+
+// parseMultipartMessageBody parses b as either a non-multipart
+// message body or a multipart message body.
+func parseMultipartMessageBody(proto int, b []byte) ([]byte, []Extension, error) {
+	var l int
+	switch proto {
+	case iana.ProtocolICMP:
+		l = 4 * int(b[1])
+	case iana.ProtocolIPv6ICMP:
+		l = 8 * int(b[0])
+	}
+	if len(b) == 4 {
+		return nil, nil, nil
+	}
+	exts, l, err := parseExtensions(b[4:], l)
+	if err != nil {
+		l = len(b) - 4
+	}
+	data := make([]byte, l)
+	copy(data, b[4:])
+	return data, exts, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/packettoobig.go b/harvester/vendor/golang.org/x/net/icmp/packettoobig.go
new file mode 100644
index 0000000..a1c9df7
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/packettoobig.go
@@ -0,0 +1,43 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "encoding/binary"
+
+// A PacketTooBig represents an ICMP packet too big message body.
+type PacketTooBig struct {
+	MTU  int    // maximum transmission unit of the nexthop link
+	Data []byte // data, known as original datagram field
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *PacketTooBig) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	return 4 + len(p.Data)
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *PacketTooBig) Marshal(proto int) ([]byte, error) {
+	b := make([]byte, 4+len(p.Data))
+	binary.BigEndian.PutUint32(b[:4], uint32(p.MTU))
+	copy(b[4:], p.Data)
+	return b, nil
+}
+
+// parsePacketTooBig parses b as an ICMP packet too big message body.
+func parsePacketTooBig(proto int, b []byte) (MessageBody, error) {
+	bodyLen := len(b)
+	if bodyLen < 4 {
+		return nil, errMessageTooShort
+	}
+	p := &PacketTooBig{MTU: int(binary.BigEndian.Uint32(b[:4]))}
+	if bodyLen > 4 {
+		p.Data = make([]byte, bodyLen-4)
+		copy(p.Data, b[4:])
+	}
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/paramprob.go b/harvester/vendor/golang.org/x/net/icmp/paramprob.go
new file mode 100644
index 0000000..0a2548d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/paramprob.go
@@ -0,0 +1,63 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import (
+	"encoding/binary"
+	"golang.org/x/net/internal/iana"
+)
+
+// A ParamProb represents an ICMP parameter problem message body.
+type ParamProb struct {
+	Pointer    uintptr     // offset within the data where the error was detected
+	Data       []byte      // data, known as original datagram field
+	Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *ParamProb) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+	return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *ParamProb) Marshal(proto int) ([]byte, error) {
+	if proto == iana.ProtocolIPv6ICMP {
+		b := make([]byte, p.Len(proto))
+		binary.BigEndian.PutUint32(b[:4], uint32(p.Pointer))
+		copy(b[4:], p.Data)
+		return b, nil
+	}
+	b, err := marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+	if err != nil {
+		return nil, err
+	}
+	b[0] = byte(p.Pointer)
+	return b, nil
+}
+
+// parseParamProb parses b as an ICMP parameter problem message body.
+func parseParamProb(proto int, b []byte) (MessageBody, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	p := &ParamProb{}
+	if proto == iana.ProtocolIPv6ICMP {
+		p.Pointer = uintptr(binary.BigEndian.Uint32(b[:4]))
+		p.Data = make([]byte, len(b)-4)
+		copy(p.Data, b[4:])
+		return p, nil
+	}
+	p.Pointer = uintptr(b[0])
+	var err error
+	p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/sys_freebsd.go b/harvester/vendor/golang.org/x/net/icmp/sys_freebsd.go
new file mode 100644
index 0000000..c75f3dd
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/sys_freebsd.go
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+import "syscall"
+
+func init() {
+	freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate")
+}
diff --git a/harvester/vendor/golang.org/x/net/icmp/timeexceeded.go b/harvester/vendor/golang.org/x/net/icmp/timeexceeded.go
new file mode 100644
index 0000000..344e158
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/icmp/timeexceeded.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package icmp
+
+// A TimeExceeded represents an ICMP time exceeded message body.
+type TimeExceeded struct {
+	Data       []byte      // data, known as original datagram field
+	Extensions []Extension // extensions
+}
+
+// Len implements the Len method of MessageBody interface.
+func (p *TimeExceeded) Len(proto int) int {
+	if p == nil {
+		return 0
+	}
+	l, _ := multipartMessageBodyDataLen(proto, p.Data, p.Extensions)
+	return 4 + l
+}
+
+// Marshal implements the Marshal method of MessageBody interface.
+func (p *TimeExceeded) Marshal(proto int) ([]byte, error) {
+	return marshalMultipartMessageBody(proto, p.Data, p.Extensions)
+}
+
+// parseTimeExceeded parses b as an ICMP time exceeded message body.
+func parseTimeExceeded(proto int, b []byte) (MessageBody, error) {
+	if len(b) < 4 {
+		return nil, errMessageTooShort
+	}
+	p := &TimeExceeded{}
+	var err error
+	p.Data, p.Extensions, err = parseMultipartMessageBody(proto, b)
+	if err != nil {
+		return nil, err
+	}
+	return p, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/internal/iana/const.go b/harvester/vendor/golang.org/x/net/internal/iana/const.go
new file mode 100644
index 0000000..3438a27
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/internal/iana/const.go
@@ -0,0 +1,180 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).
+package iana // import "golang.org/x/net/internal/iana"
+
+// Differentiated Services Field Codepoints (DSCP), Updated: 2013-06-25
+const (
+	DiffServCS0        = 0x0  // CS0
+	DiffServCS1        = 0x20 // CS1
+	DiffServCS2        = 0x40 // CS2
+	DiffServCS3        = 0x60 // CS3
+	DiffServCS4        = 0x80 // CS4
+	DiffServCS5        = 0xa0 // CS5
+	DiffServCS6        = 0xc0 // CS6
+	DiffServCS7        = 0xe0 // CS7
+	DiffServAF11       = 0x28 // AF11
+	DiffServAF12       = 0x30 // AF12
+	DiffServAF13       = 0x38 // AF13
+	DiffServAF21       = 0x48 // AF21
+	DiffServAF22       = 0x50 // AF22
+	DiffServAF23       = 0x58 // AF23
+	DiffServAF31       = 0x68 // AF31
+	DiffServAF32       = 0x70 // AF32
+	DiffServAF33       = 0x78 // AF33
+	DiffServAF41       = 0x88 // AF41
+	DiffServAF42       = 0x90 // AF42
+	DiffServAF43       = 0x98 // AF43
+	DiffServEFPHB      = 0xb8 // EF PHB
+	DiffServVOICEADMIT = 0xb0 // VOICE-ADMIT
+)
+
+// IPv4 TOS Byte and IPv6 Traffic Class Octet, Updated: 2001-09-06
+const (
+	NotECNTransport       = 0x0 // Not-ECT (Not ECN-Capable Transport)
+	ECNTransport1         = 0x1 // ECT(1) (ECN-Capable Transport(1))
+	ECNTransport0         = 0x2 // ECT(0) (ECN-Capable Transport(0))
+	CongestionExperienced = 0x3 // CE (Congestion Experienced)
+)
+
+// Protocol Numbers, Updated: 2015-10-06
+const (
+	ProtocolIP             = 0   // IPv4 encapsulation, pseudo protocol number
+	ProtocolHOPOPT         = 0   // IPv6 Hop-by-Hop Option
+	ProtocolICMP           = 1   // Internet Control Message
+	ProtocolIGMP           = 2   // Internet Group Management
+	ProtocolGGP            = 3   // Gateway-to-Gateway
+	ProtocolIPv4           = 4   // IPv4 encapsulation
+	ProtocolST             = 5   // Stream
+	ProtocolTCP            = 6   // Transmission Control
+	ProtocolCBT            = 7   // CBT
+	ProtocolEGP            = 8   // Exterior Gateway Protocol
+	ProtocolIGP            = 9   // any private interior gateway (used by Cisco for their IGRP)
+	ProtocolBBNRCCMON      = 10  // BBN RCC Monitoring
+	ProtocolNVPII          = 11  // Network Voice Protocol
+	ProtocolPUP            = 12  // PUP
+	ProtocolEMCON          = 14  // EMCON
+	ProtocolXNET           = 15  // Cross Net Debugger
+	ProtocolCHAOS          = 16  // Chaos
+	ProtocolUDP            = 17  // User Datagram
+	ProtocolMUX            = 18  // Multiplexing
+	ProtocolDCNMEAS        = 19  // DCN Measurement Subsystems
+	ProtocolHMP            = 20  // Host Monitoring
+	ProtocolPRM            = 21  // Packet Radio Measurement
+	ProtocolXNSIDP         = 22  // XEROX NS IDP
+	ProtocolTRUNK1         = 23  // Trunk-1
+	ProtocolTRUNK2         = 24  // Trunk-2
+	ProtocolLEAF1          = 25  // Leaf-1
+	ProtocolLEAF2          = 26  // Leaf-2
+	ProtocolRDP            = 27  // Reliable Data Protocol
+	ProtocolIRTP           = 28  // Internet Reliable Transaction
+	ProtocolISOTP4         = 29  // ISO Transport Protocol Class 4
+	ProtocolNETBLT         = 30  // Bulk Data Transfer Protocol
+	ProtocolMFENSP         = 31  // MFE Network Services Protocol
+	ProtocolMERITINP       = 32  // MERIT Internodal Protocol
+	ProtocolDCCP           = 33  // Datagram Congestion Control Protocol
+	Protocol3PC            = 34  // Third Party Connect Protocol
+	ProtocolIDPR           = 35  // Inter-Domain Policy Routing Protocol
+	ProtocolXTP            = 36  // XTP
+	ProtocolDDP            = 37  // Datagram Delivery Protocol
+	ProtocolIDPRCMTP       = 38  // IDPR Control Message Transport Proto
+	ProtocolTPPP           = 39  // TP++ Transport Protocol
+	ProtocolIL             = 40  // IL Transport Protocol
+	ProtocolIPv6           = 41  // IPv6 encapsulation
+	ProtocolSDRP           = 42  // Source Demand Routing Protocol
+	ProtocolIPv6Route      = 43  // Routing Header for IPv6
+	ProtocolIPv6Frag       = 44  // Fragment Header for IPv6
+	ProtocolIDRP           = 45  // Inter-Domain Routing Protocol
+	ProtocolRSVP           = 46  // Reservation Protocol
+	ProtocolGRE            = 47  // Generic Routing Encapsulation
+	ProtocolDSR            = 48  // Dynamic Source Routing Protocol
+	ProtocolBNA            = 49  // BNA
+	ProtocolESP            = 50  // Encap Security Payload
+	ProtocolAH             = 51  // Authentication Header
+	ProtocolINLSP          = 52  // Integrated Net Layer Security  TUBA
+	ProtocolNARP           = 54  // NBMA Address Resolution Protocol
+	ProtocolMOBILE         = 55  // IP Mobility
+	ProtocolTLSP           = 56  // Transport Layer Security Protocol using Kryptonet key management
+	ProtocolSKIP           = 57  // SKIP
+	ProtocolIPv6ICMP       = 58  // ICMP for IPv6
+	ProtocolIPv6NoNxt      = 59  // No Next Header for IPv6
+	ProtocolIPv6Opts       = 60  // Destination Options for IPv6
+	ProtocolCFTP           = 62  // CFTP
+	ProtocolSATEXPAK       = 64  // SATNET and Backroom EXPAK
+	ProtocolKRYPTOLAN      = 65  // Kryptolan
+	ProtocolRVD            = 66  // MIT Remote Virtual Disk Protocol
+	ProtocolIPPC           = 67  // Internet Pluribus Packet Core
+	ProtocolSATMON         = 69  // SATNET Monitoring
+	ProtocolVISA           = 70  // VISA Protocol
+	ProtocolIPCV           = 71  // Internet Packet Core Utility
+	ProtocolCPNX           = 72  // Computer Protocol Network Executive
+	ProtocolCPHB           = 73  // Computer Protocol Heart Beat
+	ProtocolWSN            = 74  // Wang Span Network
+	ProtocolPVP            = 75  // Packet Video Protocol
+	ProtocolBRSATMON       = 76  // Backroom SATNET Monitoring
+	ProtocolSUNND          = 77  // SUN ND PROTOCOL-Temporary
+	ProtocolWBMON          = 78  // WIDEBAND Monitoring
+	ProtocolWBEXPAK        = 79  // WIDEBAND EXPAK
+	ProtocolISOIP          = 80  // ISO Internet Protocol
+	ProtocolVMTP           = 81  // VMTP
+	ProtocolSECUREVMTP     = 82  // SECURE-VMTP
+	ProtocolVINES          = 83  // VINES
+	ProtocolTTP            = 84  // Transaction Transport Protocol
+	ProtocolIPTM           = 84  // Internet Protocol Traffic Manager
+	ProtocolNSFNETIGP      = 85  // NSFNET-IGP
+	ProtocolDGP            = 86  // Dissimilar Gateway Protocol
+	ProtocolTCF            = 87  // TCF
+	ProtocolEIGRP          = 88  // EIGRP
+	ProtocolOSPFIGP        = 89  // OSPFIGP
+	ProtocolSpriteRPC      = 90  // Sprite RPC Protocol
+	ProtocolLARP           = 91  // Locus Address Resolution Protocol
+	ProtocolMTP            = 92  // Multicast Transport Protocol
+	ProtocolAX25           = 93  // AX.25 Frames
+	ProtocolIPIP           = 94  // IP-within-IP Encapsulation Protocol
+	ProtocolSCCSP          = 96  // Semaphore Communications Sec. Pro.
+	ProtocolETHERIP        = 97  // Ethernet-within-IP Encapsulation
+	ProtocolENCAP          = 98  // Encapsulation Header
+	ProtocolGMTP           = 100 // GMTP
+	ProtocolIFMP           = 101 // Ipsilon Flow Management Protocol
+	ProtocolPNNI           = 102 // PNNI over IP
+	ProtocolPIM            = 103 // Protocol Independent Multicast
+	ProtocolARIS           = 104 // ARIS
+	ProtocolSCPS           = 105 // SCPS
+	ProtocolQNX            = 106 // QNX
+	ProtocolAN             = 107 // Active Networks
+	ProtocolIPComp         = 108 // IP Payload Compression Protocol
+	ProtocolSNP            = 109 // Sitara Networks Protocol
+	ProtocolCompaqPeer     = 110 // Compaq Peer Protocol
+	ProtocolIPXinIP        = 111 // IPX in IP
+	ProtocolVRRP           = 112 // Virtual Router Redundancy Protocol
+	ProtocolPGM            = 113 // PGM Reliable Transport Protocol
+	ProtocolL2TP           = 115 // Layer Two Tunneling Protocol
+	ProtocolDDX            = 116 // D-II Data Exchange (DDX)
+	ProtocolIATP           = 117 // Interactive Agent Transfer Protocol
+	ProtocolSTP            = 118 // Schedule Transfer Protocol
+	ProtocolSRP            = 119 // SpectraLink Radio Protocol
+	ProtocolUTI            = 120 // UTI
+	ProtocolSMP            = 121 // Simple Message Protocol
+	ProtocolPTP            = 123 // Performance Transparency Protocol
+	ProtocolISIS           = 124 // ISIS over IPv4
+	ProtocolFIRE           = 125 // FIRE
+	ProtocolCRTP           = 126 // Combat Radio Transport Protocol
+	ProtocolCRUDP          = 127 // Combat Radio User Datagram
+	ProtocolSSCOPMCE       = 128 // SSCOPMCE
+	ProtocolIPLT           = 129 // IPLT
+	ProtocolSPS            = 130 // Secure Packet Shield
+	ProtocolPIPE           = 131 // Private IP Encapsulation within IP
+	ProtocolSCTP           = 132 // Stream Control Transmission Protocol
+	ProtocolFC             = 133 // Fibre Channel
+	ProtocolRSVPE2EIGNORE  = 134 // RSVP-E2E-IGNORE
+	ProtocolMobilityHeader = 135 // Mobility Header
+	ProtocolUDPLite        = 136 // UDPLite
+	ProtocolMPLSinIP       = 137 // MPLS-in-IP
+	ProtocolMANET          = 138 // MANET Protocols
+	ProtocolHIP            = 139 // Host Identity Protocol
+	ProtocolShim6          = 140 // Shim6 Protocol
+	ProtocolWESP           = 141 // Wrapped Encapsulating Security Payload
+	ProtocolROHC           = 142 // Robust Header Compression
+	ProtocolReserved       = 255 // Reserved
+)
diff --git a/harvester/vendor/golang.org/x/net/internal/iana/gen.go b/harvester/vendor/golang.org/x/net/internal/iana/gen.go
new file mode 100644
index 0000000..86c78b3
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/internal/iana/gen.go
@@ -0,0 +1,293 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates internet protocol constants and tables by
+// reading IANA protocol registries.
+package main
+
+import (
+	"bytes"
+	"encoding/xml"
+	"fmt"
+	"go/format"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"strconv"
+	"strings"
+)
+
+var registries = []struct {
+	url   string
+	parse func(io.Writer, io.Reader) error
+}{
+	{
+		"http://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
+		parseDSCPRegistry,
+	},
+	{
+		"http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
+		parseTOSTCByte,
+	},
+	{
+		"http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
+		parseProtocolNumbers,
+	},
+}
+
+func main() {
+	var bb bytes.Buffer
+	fmt.Fprintf(&bb, "// go generate gen.go\n")
+	fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+	fmt.Fprintf(&bb, "// Package iana provides protocol number resources managed by the Internet Assigned Numbers Authority (IANA).\n")
+	fmt.Fprintf(&bb, `package iana // import "golang.org/x/net/internal/iana"`+"\n\n")
+	for _, r := range registries {
+		resp, err := http.Get(r.url)
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			os.Exit(1)
+		}
+		defer resp.Body.Close()
+		if resp.StatusCode != http.StatusOK {
+			fmt.Fprintf(os.Stderr, "got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+			os.Exit(1)
+		}
+		if err := r.parse(&bb, resp.Body); err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			os.Exit(1)
+		}
+		fmt.Fprintf(&bb, "\n")
+	}
+	b, err := format.Source(bb.Bytes())
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := ioutil.WriteFile("const.go", b, 0644); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+
+func parseDSCPRegistry(w io.Writer, r io.Reader) error {
+	dec := xml.NewDecoder(r)
+	var dr dscpRegistry
+	if err := dec.Decode(&dr); err != nil {
+		return err
+	}
+	drs := dr.escape()
+	fmt.Fprintf(w, "// %s, Updated: %s\n", dr.Title, dr.Updated)
+	fmt.Fprintf(w, "const (\n")
+	for _, dr := range drs {
+		fmt.Fprintf(w, "DiffServ%s = %#x", dr.Name, dr.Value)
+		fmt.Fprintf(w, "// %s\n", dr.OrigName)
+	}
+	fmt.Fprintf(w, ")\n")
+	return nil
+}
+
+type dscpRegistry struct {
+	XMLName     xml.Name `xml:"registry"`
+	Title       string   `xml:"title"`
+	Updated     string   `xml:"updated"`
+	Note        string   `xml:"note"`
+	RegTitle    string   `xml:"registry>title"`
+	PoolRecords []struct {
+		Name  string `xml:"name"`
+		Space string `xml:"space"`
+	} `xml:"registry>record"`
+	Records []struct {
+		Name  string `xml:"name"`
+		Space string `xml:"space"`
+	} `xml:"registry>registry>record"`
+}
+
+type canonDSCPRecord struct {
+	OrigName string
+	Name     string
+	Value    int
+}
+
+func (drr *dscpRegistry) escape() []canonDSCPRecord {
+	drs := make([]canonDSCPRecord, len(drr.Records))
+	sr := strings.NewReplacer(
+		"+", "",
+		"-", "",
+		"/", "",
+		".", "",
+		" ", "",
+	)
+	for i, dr := range drr.Records {
+		s := strings.TrimSpace(dr.Name)
+		drs[i].OrigName = s
+		drs[i].Name = sr.Replace(s)
+		n, err := strconv.ParseUint(dr.Space, 2, 8)
+		if err != nil {
+			continue
+		}
+		drs[i].Value = int(n) << 2
+	}
+	return drs
+}
+
+func parseTOSTCByte(w io.Writer, r io.Reader) error {
+	dec := xml.NewDecoder(r)
+	var ttb tosTCByte
+	if err := dec.Decode(&ttb); err != nil {
+		return err
+	}
+	trs := ttb.escape()
+	fmt.Fprintf(w, "// %s, Updated: %s\n", ttb.Title, ttb.Updated)
+	fmt.Fprintf(w, "const (\n")
+	for _, tr := range trs {
+		fmt.Fprintf(w, "%s = %#x", tr.Keyword, tr.Value)
+		fmt.Fprintf(w, "// %s\n", tr.OrigKeyword)
+	}
+	fmt.Fprintf(w, ")\n")
+	return nil
+}
+
+type tosTCByte struct {
+	XMLName  xml.Name `xml:"registry"`
+	Title    string   `xml:"title"`
+	Updated  string   `xml:"updated"`
+	Note     string   `xml:"note"`
+	RegTitle string   `xml:"registry>title"`
+	Records  []struct {
+		Binary  string `xml:"binary"`
+		Keyword string `xml:"keyword"`
+	} `xml:"registry>record"`
+}
+
+type canonTOSTCByteRecord struct {
+	OrigKeyword string
+	Keyword     string
+	Value       int
+}
+
+func (ttb *tosTCByte) escape() []canonTOSTCByteRecord {
+	trs := make([]canonTOSTCByteRecord, len(ttb.Records))
+	sr := strings.NewReplacer(
+		"Capable", "",
+		"(", "",
+		")", "",
+		"+", "",
+		"-", "",
+		"/", "",
+		".", "",
+		" ", "",
+	)
+	for i, tr := range ttb.Records {
+		s := strings.TrimSpace(tr.Keyword)
+		trs[i].OrigKeyword = s
+		ss := strings.Split(s, " ")
+		if len(ss) > 1 {
+			trs[i].Keyword = strings.Join(ss[1:], " ")
+		} else {
+			trs[i].Keyword = ss[0]
+		}
+		trs[i].Keyword = sr.Replace(trs[i].Keyword)
+		n, err := strconv.ParseUint(tr.Binary, 2, 8)
+		if err != nil {
+			continue
+		}
+		trs[i].Value = int(n)
+	}
+	return trs
+}
+
+func parseProtocolNumbers(w io.Writer, r io.Reader) error {
+	dec := xml.NewDecoder(r)
+	var pn protocolNumbers
+	if err := dec.Decode(&pn); err != nil {
+		return err
+	}
+	prs := pn.escape()
+	prs = append([]canonProtocolRecord{{
+		Name:  "IP",
+		Descr: "IPv4 encapsulation, pseudo protocol number",
+		Value: 0,
+	}}, prs...)
+	fmt.Fprintf(w, "// %s, Updated: %s\n", pn.Title, pn.Updated)
+	fmt.Fprintf(w, "const (\n")
+	for _, pr := range prs {
+		if pr.Name == "" {
+			continue
+		}
+		fmt.Fprintf(w, "Protocol%s = %d", pr.Name, pr.Value)
+		s := pr.Descr
+		if s == "" {
+			s = pr.OrigName
+		}
+		fmt.Fprintf(w, "// %s\n", s)
+	}
+	fmt.Fprintf(w, ")\n")
+	return nil
+}
+
+type protocolNumbers struct {
+	XMLName  xml.Name `xml:"registry"`
+	Title    string   `xml:"title"`
+	Updated  string   `xml:"updated"`
+	RegTitle string   `xml:"registry>title"`
+	Note     string   `xml:"registry>note"`
+	Records  []struct {
+		Value string `xml:"value"`
+		Name  string `xml:"name"`
+		Descr string `xml:"description"`
+	} `xml:"registry>record"`
+}
+
+type canonProtocolRecord struct {
+	OrigName string
+	Name     string
+	Descr    string
+	Value    int
+}
+
+func (pn *protocolNumbers) escape() []canonProtocolRecord {
+	prs := make([]canonProtocolRecord, len(pn.Records))
+	sr := strings.NewReplacer(
+		"-in-", "in",
+		"-within-", "within",
+		"-over-", "over",
+		"+", "P",
+		"-", "",
+		"/", "",
+		".", "",
+		" ", "",
+	)
+	for i, pr := range pn.Records {
+		if strings.Contains(pr.Name, "Deprecated") ||
+			strings.Contains(pr.Name, "deprecated") {
+			continue
+		}
+		prs[i].OrigName = pr.Name
+		s := strings.TrimSpace(pr.Name)
+		switch pr.Name {
+		case "ISIS over IPv4":
+			prs[i].Name = "ISIS"
+		case "manet":
+			prs[i].Name = "MANET"
+		default:
+			prs[i].Name = sr.Replace(s)
+		}
+		ss := strings.Split(pr.Descr, "\n")
+		for i := range ss {
+			ss[i] = strings.TrimSpace(ss[i])
+		}
+		if len(ss) > 1 {
+			prs[i].Descr = strings.Join(ss, " ")
+		} else {
+			prs[i].Descr = ss[0]
+		}
+		prs[i].Value, _ = strconv.Atoi(pr.Value)
+	}
+	return prs
+}
diff --git a/harvester/vendor/golang.org/x/net/internal/netreflect/socket.go b/harvester/vendor/golang.org/x/net/internal/netreflect/socket.go
new file mode 100644
index 0000000..e82e51c
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/internal/netreflect/socket.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package netreflect implements run-time reflection for the
+// facilities of net package.
+package netreflect
+
+import (
+	"errors"
+	"net"
+)
+
+var (
+	errInvalidType = errors.New("invalid type")
+	errOpNoSupport = errors.New("operation not supported")
+)
+
+// SocketOf returns the socket descriptor of c.
+func SocketOf(c net.Conn) (uintptr, error) {
+	switch c.(type) {
+	case *net.TCPConn, *net.UDPConn, *net.IPConn, *net.UnixConn:
+		return socketOf(c)
+	default:
+		return 0, errInvalidType
+	}
+}
+
+// PacketSocketOf returns the socket descriptor of c.
+func PacketSocketOf(c net.PacketConn) (uintptr, error) {
+	switch c.(type) {
+	case *net.UDPConn, *net.IPConn, *net.UnixConn:
+		return socketOf(c.(net.Conn))
+	default:
+		return 0, errInvalidType
+	}
+}
diff --git a/harvester/vendor/golang.org/x/net/internal/netreflect/socket_posix.go b/harvester/vendor/golang.org/x/net/internal/netreflect/socket_posix.go
new file mode 100644
index 0000000..df475a2
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/internal/netreflect/socket_posix.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package netreflect
+
+import (
+	"net"
+	"reflect"
+	"runtime"
+)
+
+func socketOf(c net.Conn) (uintptr, error) {
+	v := reflect.ValueOf(c)
+	switch e := v.Elem(); e.Kind() {
+	case reflect.Struct:
+		fd := e.FieldByName("conn").FieldByName("fd")
+		switch e := fd.Elem(); e.Kind() {
+		case reflect.Struct:
+			sysfd := e.FieldByName("sysfd")
+			if runtime.GOOS == "windows" {
+				return uintptr(sysfd.Uint()), nil
+			}
+			return uintptr(sysfd.Int()), nil
+		}
+	}
+	return 0, errInvalidType
+}
diff --git a/harvester/vendor/golang.org/x/net/internal/netreflect/socket_stub.go b/harvester/vendor/golang.org/x/net/internal/netreflect/socket_stub.go
new file mode 100644
index 0000000..85adb4b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/internal/netreflect/socket_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
+
+package netreflect
+
+import "net"
+
+func socketOf(c net.Conn) (uintptr, error) { return 0, errOpNoSupport }
diff --git a/harvester/vendor/golang.org/x/net/ipv4/bpfopt_linux.go b/harvester/vendor/golang.org/x/net/ipv4/bpfopt_linux.go
new file mode 100644
index 0000000..2d626d9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/bpfopt_linux.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/bpf"
+	"golang.org/x/net/internal/netreflect"
+)
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	prog := sockFProg{
+		Len:    uint16(len(filter)),
+		Filter: (*sockFilter)(unsafe.Pointer(&filter[0])),
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog))))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/bpfopt_stub.go b/harvester/vendor/golang.org/x/net/ipv4/bpfopt_stub.go
new file mode 100644
index 0000000..c4a8481
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/bpfopt_stub.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv4
+
+import "golang.org/x/net/bpf"
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control.go b/harvester/vendor/golang.org/x/net/ipv4/control.go
new file mode 100644
index 0000000..da4da2d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control.go
@@ -0,0 +1,70 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"fmt"
+	"net"
+	"sync"
+)
+
+type rawOpt struct {
+	sync.RWMutex
+	cflags ControlFlags
+}
+
+func (c *rawOpt) set(f ControlFlags)        { c.cflags |= f }
+func (c *rawOpt) clear(f ControlFlags)      { c.cflags &^= f }
+func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }
+
+type ControlFlags uint
+
+const (
+	FlagTTL       ControlFlags = 1 << iota // pass the TTL on the received packet
+	FlagSrc                                // pass the source address on the received packet
+	FlagDst                                // pass the destination address on the received packet
+	FlagInterface                          // pass the interface index on the received packet
+)
+
+// A ControlMessage represents per packet basis IP-level socket options.
+type ControlMessage struct {
+	// Receiving socket options: SetControlMessage allows to
+	// receive the options from the protocol stack using ReadFrom
+	// method of PacketConn or RawConn.
+	//
+	// Specifying socket options: ControlMessage for WriteTo
+	// method of PacketConn or RawConn allows to send the options
+	// to the protocol stack.
+	//
+	TTL     int    // time-to-live, receiving only
+	Src     net.IP // source address, specifying only
+	Dst     net.IP // destination address, receiving only
+	IfIndex int    // interface index, must be 1 <= value when specifying
+}
+
+func (cm *ControlMessage) String() string {
+	if cm == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ttl=%d src=%v dst=%v ifindex=%d", cm.TTL, cm.Src, cm.Dst, cm.IfIndex)
+}
+
+// Ancillary data socket options
+const (
+	ctlTTL        = iota // header field
+	ctlSrc               // header field
+	ctlDst               // header field
+	ctlInterface         // inbound or outbound interface
+	ctlPacketInfo        // inbound or outbound packet path
+	ctlMax
+)
+
+// A ctlOpt represents a binding for ancillary data socket option.
+type ctlOpt struct {
+	name    int // option name, must be equal or greater than 1
+	length  int // option length
+	marshal func([]byte, *ControlMessage) []byte
+	parse   func(*ControlMessage, []byte)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control_bsd.go b/harvester/vendor/golang.org/x/net/ipv4/control_bsd.go
new file mode 100644
index 0000000..3f27f99
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control_bsd.go
@@ -0,0 +1,40 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func marshalDst(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIP
+	m.Type = sysIP_RECVDSTADDR
+	m.SetLen(syscall.CmsgLen(net.IPv4len))
+	return b[syscall.CmsgSpace(net.IPv4len):]
+}
+
+func parseDst(cm *ControlMessage, b []byte) {
+	cm.Dst = b[:net.IPv4len]
+}
+
+func marshalInterface(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIP
+	m.Type = sysIP_RECVIF
+	m.SetLen(syscall.CmsgLen(syscall.SizeofSockaddrDatalink))
+	return b[syscall.CmsgSpace(syscall.SizeofSockaddrDatalink):]
+}
+
+func parseInterface(cm *ControlMessage, b []byte) {
+	sadl := (*syscall.SockaddrDatalink)(unsafe.Pointer(&b[0]))
+	cm.IfIndex = int(sadl.Index)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control_pktinfo.go b/harvester/vendor/golang.org/x/net/ipv4/control_pktinfo.go
new file mode 100644
index 0000000..9ed9773
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control_pktinfo.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin linux solaris
+
+package ipv4
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func marshalPacketInfo(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIP
+	m.Type = sysIP_PKTINFO
+	m.SetLen(syscall.CmsgLen(sizeofInetPktinfo))
+	if cm != nil {
+		pi := (*inetPktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+		if ip := cm.Src.To4(); ip != nil {
+			copy(pi.Spec_dst[:], ip)
+		}
+		if cm.IfIndex > 0 {
+			pi.setIfindex(cm.IfIndex)
+		}
+	}
+	return b[syscall.CmsgSpace(sizeofInetPktinfo):]
+}
+
+func parsePacketInfo(cm *ControlMessage, b []byte) {
+	pi := (*inetPktinfo)(unsafe.Pointer(&b[0]))
+	cm.IfIndex = int(pi.Ifindex)
+	cm.Dst = pi.Addr[:]
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control_stub.go b/harvester/vendor/golang.org/x/net/ipv4/control_stub.go
new file mode 100644
index 0000000..27e618b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv4
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	return errOpNoSupport
+}
+
+func newControlMessage(opt *rawOpt) []byte {
+	return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	return nil, errOpNoSupport
+}
+
+func marshalControlMessage(cm *ControlMessage) []byte {
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control_unix.go b/harvester/vendor/golang.org/x/net/ipv4/control_unix.go
new file mode 100644
index 0000000..25ef661
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control_unix.go
@@ -0,0 +1,148 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package ipv4
+
+import (
+	"os"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	opt.Lock()
+	defer opt.Unlock()
+	if cf&FlagTTL != 0 && sockOpts[ssoReceiveTTL].name > 0 {
+		if err := setInt(s, &sockOpts[ssoReceiveTTL], boolint(on)); err != nil {
+			return err
+		}
+		if on {
+			opt.set(FlagTTL)
+		} else {
+			opt.clear(FlagTTL)
+		}
+	}
+	if sockOpts[ssoPacketInfo].name > 0 {
+		if cf&(FlagSrc|FlagDst|FlagInterface) != 0 {
+			if err := setInt(s, &sockOpts[ssoPacketInfo], boolint(on)); err != nil {
+				return err
+			}
+			if on {
+				opt.set(cf & (FlagSrc | FlagDst | FlagInterface))
+			} else {
+				opt.clear(cf & (FlagSrc | FlagDst | FlagInterface))
+			}
+		}
+	} else {
+		if cf&FlagDst != 0 && sockOpts[ssoReceiveDst].name > 0 {
+			if err := setInt(s, &sockOpts[ssoReceiveDst], boolint(on)); err != nil {
+				return err
+			}
+			if on {
+				opt.set(FlagDst)
+			} else {
+				opt.clear(FlagDst)
+			}
+		}
+		if cf&FlagInterface != 0 && sockOpts[ssoReceiveInterface].name > 0 {
+			if err := setInt(s, &sockOpts[ssoReceiveInterface], boolint(on)); err != nil {
+				return err
+			}
+			if on {
+				opt.set(FlagInterface)
+			} else {
+				opt.clear(FlagInterface)
+			}
+		}
+	}
+	return nil
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+	opt.RLock()
+	var l int
+	if opt.isset(FlagTTL) && ctlOpts[ctlTTL].name > 0 {
+		l += syscall.CmsgSpace(ctlOpts[ctlTTL].length)
+	}
+	if ctlOpts[ctlPacketInfo].name > 0 {
+		if opt.isset(FlagSrc | FlagDst | FlagInterface) {
+			l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+		}
+	} else {
+		if opt.isset(FlagDst) && ctlOpts[ctlDst].name > 0 {
+			l += syscall.CmsgSpace(ctlOpts[ctlDst].length)
+		}
+		if opt.isset(FlagInterface) && ctlOpts[ctlInterface].name > 0 {
+			l += syscall.CmsgSpace(ctlOpts[ctlInterface].length)
+		}
+	}
+	if l > 0 {
+		oob = make([]byte, l)
+	}
+	opt.RUnlock()
+	return
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	if len(b) == 0 {
+		return nil, nil
+	}
+	cmsgs, err := syscall.ParseSocketControlMessage(b)
+	if err != nil {
+		return nil, os.NewSyscallError("parse socket control message", err)
+	}
+	cm := &ControlMessage{}
+	for _, m := range cmsgs {
+		if m.Header.Level != iana.ProtocolIP {
+			continue
+		}
+		switch int(m.Header.Type) {
+		case ctlOpts[ctlTTL].name:
+			ctlOpts[ctlTTL].parse(cm, m.Data[:])
+		case ctlOpts[ctlDst].name:
+			ctlOpts[ctlDst].parse(cm, m.Data[:])
+		case ctlOpts[ctlInterface].name:
+			ctlOpts[ctlInterface].parse(cm, m.Data[:])
+		case ctlOpts[ctlPacketInfo].name:
+			ctlOpts[ctlPacketInfo].parse(cm, m.Data[:])
+		}
+	}
+	return cm, nil
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+	if cm == nil {
+		return nil
+	}
+	var l int
+	pktinfo := false
+	if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To4() != nil || cm.IfIndex > 0) {
+		pktinfo = true
+		l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+	}
+	if l > 0 {
+		oob = make([]byte, l)
+		b := oob
+		if pktinfo {
+			b = ctlOpts[ctlPacketInfo].marshal(b, cm)
+		}
+	}
+	return
+}
+
+func marshalTTL(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIP
+	m.Type = sysIP_RECVTTL
+	m.SetLen(syscall.CmsgLen(1))
+	return b[syscall.CmsgSpace(1):]
+}
+
+func parseTTL(cm *ControlMessage, b []byte) {
+	cm.TTL = int(*(*byte)(unsafe.Pointer(&b[:1][0])))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/control_windows.go b/harvester/vendor/golang.org/x/net/ipv4/control_windows.go
new file mode 100644
index 0000000..b27407d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/control_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "syscall"
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	// TODO(mikio): implement this
+	return syscall.EWINDOWS
+}
+
+func newControlMessage(opt *rawOpt) []byte {
+	// TODO(mikio): implement this
+	return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	// TODO(mikio): implement this
+	return nil, syscall.EWINDOWS
+}
+
+func marshalControlMessage(cm *ControlMessage) []byte {
+	// TODO(mikio): implement this
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_darwin.go b/harvester/vendor/golang.org/x/net/ipv4/defs_darwin.go
new file mode 100644
index 0000000..c8f2e05
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_darwin.go
@@ -0,0 +1,77 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_STRIPHDR    = C.IP_STRIPHDR
+	sysIP_RECVTTL     = C.IP_RECVTTL
+	sysIP_BOUND_IF    = C.IP_BOUND_IF
+	sysIP_PKTINFO     = C.IP_PKTINFO
+	sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
+
+	sysIP_MULTICAST_IF           = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP
+	sysIP_MULTICAST_VIF          = C.IP_MULTICAST_VIF
+	sysIP_MULTICAST_IFINDEX      = C.IP_MULTICAST_IFINDEX
+	sysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP
+	sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+	sysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE
+	sysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE
+	sysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet    = C.sizeof_struct_sockaddr_in
+	sizeofInetPktinfo     = C.sizeof_struct_in_pktinfo
+
+	sizeofIPMreq         = C.sizeof_struct_ip_mreq
+	sizeofIPMreqn        = C.sizeof_struct_ip_mreqn
+	sizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet C.struct_sockaddr_in
+
+type inetPktinfo C.struct_in_pktinfo
+
+type ipMreq C.struct_ip_mreq
+
+type ipMreqn C.struct_ip_mreqn
+
+type ipMreqSource C.struct_ip_mreq_source
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_dragonfly.go b/harvester/vendor/golang.org/x/net/ipv4/defs_dragonfly.go
new file mode 100644
index 0000000..f30544e
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_dragonfly.go
@@ -0,0 +1,38 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_RECVTTL     = C.IP_RECVTTL
+
+	sysIP_MULTICAST_IF    = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP
+	sysIP_MULTICAST_VIF   = C.IP_MULTICAST_VIF
+	sysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+	sizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type ipMreq C.struct_ip_mreq
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_freebsd.go b/harvester/vendor/golang.org/x/net/ipv4/defs_freebsd.go
new file mode 100644
index 0000000..4dd57d8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_freebsd.go
@@ -0,0 +1,75 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_SENDSRCADDR = C.IP_SENDSRCADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_ONESBCAST   = C.IP_ONESBCAST
+	sysIP_BINDANY     = C.IP_BINDANY
+	sysIP_RECVTTL     = C.IP_RECVTTL
+	sysIP_MINTTL      = C.IP_MINTTL
+	sysIP_DONTFRAG    = C.IP_DONTFRAG
+	sysIP_RECVTOS     = C.IP_RECVTOS
+
+	sysIP_MULTICAST_IF           = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP
+	sysIP_MULTICAST_VIF          = C.IP_MULTICAST_VIF
+	sysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP
+	sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+	sysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE
+	sysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE
+	sysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet    = C.sizeof_struct_sockaddr_in
+
+	sizeofIPMreq         = C.sizeof_struct_ip_mreq
+	sizeofIPMreqn        = C.sizeof_struct_ip_mreqn
+	sizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet C.struct_sockaddr_in
+
+type ipMreq C.struct_ip_mreq
+
+type ipMreqn C.struct_ip_mreqn
+
+type ipMreqSource C.struct_ip_mreq_source
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_linux.go b/harvester/vendor/golang.org/x/net/ipv4/defs_linux.go
new file mode 100644
index 0000000..31dfa09
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_linux.go
@@ -0,0 +1,120 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <time.h>
+
+#include <linux/errqueue.h>
+#include <linux/icmp.h>
+#include <linux/in.h>
+#include <linux/filter.h>
+#include <sys/socket.h>
+*/
+import "C"
+
+const (
+	sysIP_TOS             = C.IP_TOS
+	sysIP_TTL             = C.IP_TTL
+	sysIP_HDRINCL         = C.IP_HDRINCL
+	sysIP_OPTIONS         = C.IP_OPTIONS
+	sysIP_ROUTER_ALERT    = C.IP_ROUTER_ALERT
+	sysIP_RECVOPTS        = C.IP_RECVOPTS
+	sysIP_RETOPTS         = C.IP_RETOPTS
+	sysIP_PKTINFO         = C.IP_PKTINFO
+	sysIP_PKTOPTIONS      = C.IP_PKTOPTIONS
+	sysIP_MTU_DISCOVER    = C.IP_MTU_DISCOVER
+	sysIP_RECVERR         = C.IP_RECVERR
+	sysIP_RECVTTL         = C.IP_RECVTTL
+	sysIP_RECVTOS         = C.IP_RECVTOS
+	sysIP_MTU             = C.IP_MTU
+	sysIP_FREEBIND        = C.IP_FREEBIND
+	sysIP_TRANSPARENT     = C.IP_TRANSPARENT
+	sysIP_RECVRETOPTS     = C.IP_RECVRETOPTS
+	sysIP_ORIGDSTADDR     = C.IP_ORIGDSTADDR
+	sysIP_RECVORIGDSTADDR = C.IP_RECVORIGDSTADDR
+	sysIP_MINTTL          = C.IP_MINTTL
+	sysIP_NODEFRAG        = C.IP_NODEFRAG
+	sysIP_UNICAST_IF      = C.IP_UNICAST_IF
+
+	sysIP_MULTICAST_IF           = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP
+	sysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE
+	sysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE
+	sysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP
+	sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+	sysIP_MSFILTER               = C.IP_MSFILTER
+	sysMCAST_JOIN_GROUP          = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP         = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP   = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP  = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE        = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE      = C.MCAST_UNBLOCK_SOURCE
+	sysMCAST_MSFILTER            = C.MCAST_MSFILTER
+	sysIP_MULTICAST_ALL          = C.IP_MULTICAST_ALL
+
+	//sysIP_PMTUDISC_DONT      = C.IP_PMTUDISC_DONT
+	//sysIP_PMTUDISC_WANT      = C.IP_PMTUDISC_WANT
+	//sysIP_PMTUDISC_DO        = C.IP_PMTUDISC_DO
+	//sysIP_PMTUDISC_PROBE     = C.IP_PMTUDISC_PROBE
+	//sysIP_PMTUDISC_INTERFACE = C.IP_PMTUDISC_INTERFACE
+	//sysIP_PMTUDISC_OMIT      = C.IP_PMTUDISC_OMIT
+
+	sysICMP_FILTER = C.ICMP_FILTER
+
+	sysSO_EE_ORIGIN_NONE         = C.SO_EE_ORIGIN_NONE
+	sysSO_EE_ORIGIN_LOCAL        = C.SO_EE_ORIGIN_LOCAL
+	sysSO_EE_ORIGIN_ICMP         = C.SO_EE_ORIGIN_ICMP
+	sysSO_EE_ORIGIN_ICMP6        = C.SO_EE_ORIGIN_ICMP6
+	sysSO_EE_ORIGIN_TXSTATUS     = C.SO_EE_ORIGIN_TXSTATUS
+	sysSO_EE_ORIGIN_TIMESTAMPING = C.SO_EE_ORIGIN_TIMESTAMPING
+
+	sysSOL_SOCKET       = C.SOL_SOCKET
+	sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
+
+	sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
+	sizeofSockaddrInet          = C.sizeof_struct_sockaddr_in
+	sizeofInetPktinfo           = C.sizeof_struct_in_pktinfo
+	sizeofSockExtendedErr       = C.sizeof_struct_sock_extended_err
+
+	sizeofIPMreq         = C.sizeof_struct_ip_mreq
+	sizeofIPMreqn        = C.sizeof_struct_ip_mreqn
+	sizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+	sizeofICMPFilter = C.sizeof_struct_icmp_filter
+)
+
+type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
+
+type sockaddrInet C.struct_sockaddr_in
+
+type inetPktinfo C.struct_in_pktinfo
+
+type sockExtendedErr C.struct_sock_extended_err
+
+type ipMreq C.struct_ip_mreq
+
+type ipMreqn C.struct_ip_mreqn
+
+type ipMreqSource C.struct_ip_mreq_source
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
+
+type icmpFilter C.struct_icmp_filter
+
+type sockFProg C.struct_sock_fprog
+
+type sockFilter C.struct_sock_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_netbsd.go b/harvester/vendor/golang.org/x/net/ipv4/defs_netbsd.go
new file mode 100644
index 0000000..8f8af1b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_netbsd.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_RECVTTL     = C.IP_RECVTTL
+
+	sysIP_MULTICAST_IF    = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+	sizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type ipMreq C.struct_ip_mreq
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_openbsd.go b/harvester/vendor/golang.org/x/net/ipv4/defs_openbsd.go
new file mode 100644
index 0000000..8f8af1b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_openbsd.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_RECVTTL     = C.IP_RECVTTL
+
+	sysIP_MULTICAST_IF    = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL   = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP  = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP  = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP = C.IP_DROP_MEMBERSHIP
+
+	sizeofIPMreq = C.sizeof_struct_ip_mreq
+)
+
+type ipMreq C.struct_ip_mreq
diff --git a/harvester/vendor/golang.org/x/net/ipv4/defs_solaris.go b/harvester/vendor/golang.org/x/net/ipv4/defs_solaris.go
new file mode 100644
index 0000000..aeb33e9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/defs_solaris.go
@@ -0,0 +1,84 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in_addr [4]byte /* in_addr */
+
+package ipv4
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+*/
+import "C"
+
+const (
+	sysIP_OPTIONS     = C.IP_OPTIONS
+	sysIP_HDRINCL     = C.IP_HDRINCL
+	sysIP_TOS         = C.IP_TOS
+	sysIP_TTL         = C.IP_TTL
+	sysIP_RECVOPTS    = C.IP_RECVOPTS
+	sysIP_RECVRETOPTS = C.IP_RECVRETOPTS
+	sysIP_RECVDSTADDR = C.IP_RECVDSTADDR
+	sysIP_RETOPTS     = C.IP_RETOPTS
+	sysIP_RECVIF      = C.IP_RECVIF
+	sysIP_RECVSLLA    = C.IP_RECVSLLA
+	sysIP_RECVTTL     = C.IP_RECVTTL
+
+	sysIP_MULTICAST_IF           = C.IP_MULTICAST_IF
+	sysIP_MULTICAST_TTL          = C.IP_MULTICAST_TTL
+	sysIP_MULTICAST_LOOP         = C.IP_MULTICAST_LOOP
+	sysIP_ADD_MEMBERSHIP         = C.IP_ADD_MEMBERSHIP
+	sysIP_DROP_MEMBERSHIP        = C.IP_DROP_MEMBERSHIP
+	sysIP_BLOCK_SOURCE           = C.IP_BLOCK_SOURCE
+	sysIP_UNBLOCK_SOURCE         = C.IP_UNBLOCK_SOURCE
+	sysIP_ADD_SOURCE_MEMBERSHIP  = C.IP_ADD_SOURCE_MEMBERSHIP
+	sysIP_DROP_SOURCE_MEMBERSHIP = C.IP_DROP_SOURCE_MEMBERSHIP
+	sysIP_NEXTHOP                = C.IP_NEXTHOP
+
+	sysIP_PKTINFO     = C.IP_PKTINFO
+	sysIP_RECVPKTINFO = C.IP_RECVPKTINFO
+	sysIP_DONTFRAG    = C.IP_DONTFRAG
+
+	sysIP_BOUND_IF      = C.IP_BOUND_IF
+	sysIP_UNSPEC_SRC    = C.IP_UNSPEC_SRC
+	sysIP_BROADCAST_TTL = C.IP_BROADCAST_TTL
+	sysIP_DHCPINIT_IF   = C.IP_DHCPINIT_IF
+
+	sysIP_REUSEADDR = C.IP_REUSEADDR
+	sysIP_DONTROUTE = C.IP_DONTROUTE
+	sysIP_BROADCAST = C.IP_BROADCAST
+
+	sysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP
+	sysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE
+	sysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet    = C.sizeof_struct_sockaddr_in
+	sizeofInetPktinfo     = C.sizeof_struct_in_pktinfo
+
+	sizeofIPMreq         = C.sizeof_struct_ip_mreq
+	sizeofIPMreqSource   = C.sizeof_struct_ip_mreq_source
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet C.struct_sockaddr_in
+
+type inetPktinfo C.struct_in_pktinfo
+
+type ipMreq C.struct_ip_mreq
+
+type ipMreqSource C.struct_ip_mreq_source
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
diff --git a/harvester/vendor/golang.org/x/net/ipv4/dgramopt_posix.go b/harvester/vendor/golang.org/x/net/ipv4/dgramopt_posix.go
new file mode 100644
index 0000000..fbc5df1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/dgramopt_posix.go
@@ -0,0 +1,253 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// MulticastTTL returns the time-to-live field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastTTL() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoMulticastTTL])
+}
+
+// SetMulticastTTL sets the time-to-live field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastTTL(ttl int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoMulticastTTL], ttl)
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+	if !c.ok() {
+		return nil, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return nil, err
+	}
+	return getInterface(s, &sockOpts[ssoMulticastInterface])
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInterface(s, &sockOpts[ssoMulticastInterface], ifi)
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+	if !c.ok() {
+		return false, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return false, err
+	}
+	on, err := getInt(s, &sockOpts[ssoMulticastLoopback])
+	if err != nil {
+		return false, err
+	}
+	return on == 1, nil
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoMulticastLoopback], boolint(on))
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	return setGroup(s, &sockOpts[ssoJoinGroup], ifi, grp)
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	return setGroup(s, &sockOpts[ssoLeaveGroup], ifi, grp)
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP4(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoJoinSourceGroup], ifi, grp, src)
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP4(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src)
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP4(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoBlockSourceGroup], ifi, grp, src)
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP4(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP4(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src)
+}
+
+// ICMPFilter returns an ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+	if !c.ok() {
+		return nil, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return nil, err
+	}
+	return getICMPFilter(s, &sockOpts[ssoICMPFilter])
+}
+
+// SetICMPFilter deploys the ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setICMPFilter(s, &sockOpts[ssoICMPFilter], f)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/dgramopt_stub.go b/harvester/vendor/golang.org/x/net/ipv4/dgramopt_stub.go
new file mode 100644
index 0000000..f6b867f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/dgramopt_stub.go
@@ -0,0 +1,106 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv4
+
+import "net"
+
+// MulticastTTL returns the time-to-live field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastTTL() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetMulticastTTL sets the time-to-live field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastTTL(ttl int) error {
+	return errOpNoSupport
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+	return nil, errOpNoSupport
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+	return errOpNoSupport
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+	return false, errOpNoSupport
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+	return errOpNoSupport
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+	return errOpNoSupport
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+	return errOpNoSupport
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// ICMPFilter returns an ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+	return nil, errOpNoSupport
+}
+
+// SetICMPFilter deploys the ICMP filter.
+// Currently only Linux supports this.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/doc.go b/harvester/vendor/golang.org/x/net/ipv4/doc.go
new file mode 100644
index 0000000..b4ce40f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/doc.go
@@ -0,0 +1,244 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ipv4 implements IP-level socket options for the Internet
+// Protocol version 4.
+//
+// The package provides IP-level socket options that allow
+// manipulation of IPv4 facilities.
+//
+// The IPv4 protocol and basic host requirements for IPv4 are defined
+// in RFC 791 and RFC 1122.
+// Host extensions for multicasting and socket interface extensions
+// for multicast source filters are defined in RFC 1112 and RFC 3678.
+// IGMPv1, IGMPv2 and IGMPv3 are defined in RFC 1112, RFC 2236 and RFC
+// 3376.
+// Source-specific multicast is defined in RFC 4607.
+//
+//
+// Unicasting
+//
+// The options for unicasting are available for net.TCPConn,
+// net.UDPConn and net.IPConn which are created as network connections
+// that use the IPv4 transport.  When a single TCP connection carrying
+// a data flow of multiple packets needs to indicate the flow is
+// important, Conn is used to set the type-of-service field on the
+// IPv4 header for each packet.
+//
+//	ln, err := net.Listen("tcp4", "0.0.0.0:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer ln.Close()
+//	for {
+//		c, err := ln.Accept()
+//		if err != nil {
+//			// error handling
+//		}
+//		go func(c net.Conn) {
+//			defer c.Close()
+//
+// The outgoing packets will be labeled DiffServ assured forwarding
+// class 1 low drop precedence, known as AF11 packets.
+//
+//			if err := ipv4.NewConn(c).SetTOS(0x28); err != nil {
+//				// error handling
+//			}
+//			if _, err := c.Write(data); err != nil {
+//				// error handling
+//			}
+//		}(c)
+//	}
+//
+//
+// Multicasting
+//
+// The options for multicasting are available for net.UDPConn and
+// net.IPconn which are created as network connections that use the
+// IPv4 transport.  A few network facilities must be prepared before
+// you begin multicasting, at a minimum joining network interfaces and
+// multicast groups.
+//
+//	en0, err := net.InterfaceByName("en0")
+//	if err != nil {
+//		// error handling
+//	}
+//	en1, err := net.InterfaceByIndex(911)
+//	if err != nil {
+//		// error handling
+//	}
+//	group := net.IPv4(224, 0, 0, 250)
+//
+// First, an application listens to an appropriate address with an
+// appropriate service port.
+//
+//	c, err := net.ListenPacket("udp4", "0.0.0.0:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c.Close()
+//
+// Second, the application joins multicast groups, starts listening to
+// the groups on the specified network interfaces.  Note that the
+// service port for transport layer protocol does not matter with this
+// operation as joining groups affects only network and link layer
+// protocols, such as IPv4 and Ethernet.
+//
+//	p := ipv4.NewPacketConn(c)
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {
+//		// error handling
+//	}
+//
+// The application might set per packet control message transmissions
+// between the protocol stack within the kernel.  When the application
+// needs a destination address on an incoming packet,
+// SetControlMessage of PacketConn is used to enable control message
+// transmissions.
+//
+//	if err := p.SetControlMessage(ipv4.FlagDst, true); err != nil {
+//		// error handling
+//	}
+//
+// The application could identify whether the received packets are
+// of interest by using the control message that contains the
+// destination address of the received packet.
+//
+//	b := make([]byte, 1500)
+//	for {
+//		n, cm, src, err := p.ReadFrom(b)
+//		if err != nil {
+//			// error handling
+//		}
+//		if cm.Dst.IsMulticast() {
+//			if cm.Dst.Equal(group) {
+//				// joined group, do something
+//			} else {
+//				// unknown group, discard
+//				continue
+//			}
+//		}
+//
+// The application can also send both unicast and multicast packets.
+//
+//		p.SetTOS(0x0)
+//		p.SetTTL(16)
+//		if _, err := p.WriteTo(data, nil, src); err != nil {
+//			// error handling
+//		}
+//		dst := &net.UDPAddr{IP: group, Port: 1024}
+//		for _, ifi := range []*net.Interface{en0, en1} {
+//			if err := p.SetMulticastInterface(ifi); err != nil {
+//				// error handling
+//			}
+//			p.SetMulticastTTL(2)
+//			if _, err := p.WriteTo(data, nil, dst); err != nil {
+//				// error handling
+//			}
+//		}
+//	}
+//
+//
+// More multicasting
+//
+// An application that uses PacketConn or RawConn may join multiple
+// multicast groups.  For example, a UDP listener with port 1024 might
+// join two different groups across over two different network
+// interfaces by using:
+//
+//	c, err := net.ListenPacket("udp4", "0.0.0.0:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c.Close()
+//	p := ipv4.NewPacketConn(c)
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 249)}); err != nil {
+//		// error handling
+//	}
+//
+// It is possible for multiple UDP listeners that listen on the same
+// UDP port to join the same multicast group.  The net package will
+// provide a socket that listens to a wildcard address with reusable
+// UDP port when an appropriate multicast address prefix is passed to
+// the net.ListenPacket or net.ListenUDP.
+//
+//	c1, err := net.ListenPacket("udp4", "224.0.0.0:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c1.Close()
+//	c2, err := net.ListenPacket("udp4", "224.0.0.0:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c2.Close()
+//	p1 := ipv4.NewPacketConn(c1)
+//	if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+//		// error handling
+//	}
+//	p2 := ipv4.NewPacketConn(c2)
+//	if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+//		// error handling
+//	}
+//
+// Also it is possible for the application to leave or rejoin a
+// multicast group on the network interface.
+//
+//	if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 248)}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.IPv4(224, 0, 0, 250)}); err != nil {
+//		// error handling
+//	}
+//
+//
+// Source-specific multicasting
+//
+// An application that uses PacketConn or RawConn on IGMPv3 supported
+// platform is able to join source-specific multicast groups.
+// The application may use JoinSourceSpecificGroup and
+// LeaveSourceSpecificGroup for the operation known as "include" mode,
+//
+//	ssmgroup := net.UDPAddr{IP: net.IPv4(232, 7, 8, 9)}
+//	ssmsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 1)})
+//	if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+//		// error handling
+//	}
+//	if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+//		// error handling
+//	}
+//
+// or JoinGroup, ExcludeSourceSpecificGroup,
+// IncludeSourceSpecificGroup and LeaveGroup for the operation known
+// as "exclude" mode.
+//
+//	exclsource := net.UDPAddr{IP: net.IPv4(192, 168, 0, 254)}
+//	if err := p.JoinGroup(en0, &ssmgroup); err != nil {
+//		// error handling
+//	}
+//	if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {
+//		// error handling
+//	}
+//	if err := p.LeaveGroup(en0, &ssmgroup); err != nil {
+//		// error handling
+//	}
+//
+// Note that it depends on each platform implementation what happens
+// when an application which runs on IGMPv3 unsupported platform uses
+// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.
+// In general the platform tries to fall back to conversations using
+// IGMPv1 or IGMPv2 and starts to listen to multicast traffic.
+// In the fallback case, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup may return an error.
+package ipv4 // import "golang.org/x/net/ipv4"
+
+// BUG(mikio): This package is not implemented on NaCl and Plan 9.
diff --git a/harvester/vendor/golang.org/x/net/ipv4/endpoint.go b/harvester/vendor/golang.org/x/net/ipv4/endpoint.go
new file mode 100644
index 0000000..01c4e39
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/endpoint.go
@@ -0,0 +1,194 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// BUG(mikio): On Windows, the JoinSourceSpecificGroup,
+// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup methods of PacketConn and RawConn are
+// not implemented.
+
+// A Conn represents a network endpoint that uses the IPv4 transport.
+// It is used to control basic IP-level socket options such as TOS and
+// TTL.
+type Conn struct {
+	genericOpt
+}
+
+type genericOpt struct {
+	net.Conn
+}
+
+func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }
+
+// NewConn returns a new Conn.
+func NewConn(c net.Conn) *Conn {
+	return &Conn{
+		genericOpt: genericOpt{Conn: c},
+	}
+}
+
+// A PacketConn represents a packet network endpoint that uses the
+// IPv4 transport.  It is used to control several IP-level socket
+// options including multicasting.  It also provides datagram based
+// network I/O methods specific to the IPv4 and higher layer protocols
+// such as UDP.
+type PacketConn struct {
+	genericOpt
+	dgramOpt
+	payloadHandler
+}
+
+type dgramOpt struct {
+	net.PacketConn
+}
+
+func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil }
+
+// SetControlMessage sets the per packet IP-level socket options.
+func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setControlMessage(s, &c.payloadHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.PacketConn.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.PacketConn.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.PacketConn.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.PacketConn.Close()
+}
+
+// NewPacketConn returns a new PacketConn using c as its underlying
+// transport.
+func NewPacketConn(c net.PacketConn) *PacketConn {
+	p := &PacketConn{
+		genericOpt:     genericOpt{Conn: c.(net.Conn)},
+		dgramOpt:       dgramOpt{PacketConn: c},
+		payloadHandler: payloadHandler{PacketConn: c},
+	}
+	if _, ok := c.(*net.IPConn); ok && sockOpts[ssoStripHeader].name > 0 {
+		if s, err := netreflect.PacketSocketOf(c); err == nil {
+			setInt(s, &sockOpts[ssoStripHeader], boolint(true))
+		}
+	}
+	return p
+}
+
+// A RawConn represents a packet network endpoint that uses the IPv4
+// transport.  It is used to control several IP-level socket options
+// including IPv4 header manipulation.  It also provides datagram
+// based network I/O methods specific to the IPv4 and higher layer
+// protocols that handle IPv4 datagram directly such as OSPF, GRE.
+type RawConn struct {
+	genericOpt
+	dgramOpt
+	packetHandler
+}
+
+// SetControlMessage sets the per packet IP-level socket options.
+func (c *RawConn) SetControlMessage(cf ControlFlags, on bool) error {
+	if !c.packetHandler.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setControlMessage(s, &c.packetHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *RawConn) SetDeadline(t time.Time) error {
+	if !c.packetHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.packetHandler.c.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *RawConn) SetReadDeadline(t time.Time) error {
+	if !c.packetHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.packetHandler.c.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *RawConn) SetWriteDeadline(t time.Time) error {
+	if !c.packetHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.packetHandler.c.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *RawConn) Close() error {
+	if !c.packetHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.packetHandler.c.Close()
+}
+
+// NewRawConn returns a new RawConn using c as its underlying
+// transport.
+func NewRawConn(c net.PacketConn) (*RawConn, error) {
+	r := &RawConn{
+		genericOpt:    genericOpt{Conn: c.(net.Conn)},
+		dgramOpt:      dgramOpt{PacketConn: c},
+		packetHandler: packetHandler{c: c.(*net.IPConn)},
+	}
+	s, err := netreflect.PacketSocketOf(c)
+	if err != nil {
+		return nil, err
+	}
+	if err := setInt(s, &sockOpts[ssoHeaderPrepend], boolint(true)); err != nil {
+		return nil, err
+	}
+	return r, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/gen.go b/harvester/vendor/golang.org/x/net/ipv4/gen.go
new file mode 100644
index 0000000..ffb44fe
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/gen.go
@@ -0,0 +1,199 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates system adaptation constants and types,
+// internet protocol constants and tables by reading template files
+// and IANA protocol registries.
+package main
+
+import (
+	"bytes"
+	"encoding/xml"
+	"fmt"
+	"go/format"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"os/exec"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+func main() {
+	if err := genzsys(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := geniana(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+
+func genzsys() error {
+	defs := "defs_" + runtime.GOOS + ".go"
+	f, err := os.Open(defs)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
+		return err
+	}
+	f.Close()
+	cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
+	b, err := cmd.Output()
+	if err != nil {
+		return err
+	}
+	b, err = format.Source(b)
+	if err != nil {
+		return err
+	}
+	zsys := "zsys_" + runtime.GOOS + ".go"
+	switch runtime.GOOS {
+	case "freebsd", "linux":
+		zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
+	}
+	if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+		return err
+	}
+	return nil
+}
+
+var registries = []struct {
+	url   string
+	parse func(io.Writer, io.Reader) error
+}{
+	{
+		"http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
+		parseICMPv4Parameters,
+	},
+}
+
+func geniana() error {
+	var bb bytes.Buffer
+	fmt.Fprintf(&bb, "// go generate gen.go\n")
+	fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+	fmt.Fprintf(&bb, "package ipv4\n\n")
+	for _, r := range registries {
+		resp, err := http.Get(r.url)
+		if err != nil {
+			return err
+		}
+		defer resp.Body.Close()
+		if resp.StatusCode != http.StatusOK {
+			return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+		}
+		if err := r.parse(&bb, resp.Body); err != nil {
+			return err
+		}
+		fmt.Fprintf(&bb, "\n")
+	}
+	b, err := format.Source(bb.Bytes())
+	if err != nil {
+		return err
+	}
+	if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+		return err
+	}
+	return nil
+}
+
+func parseICMPv4Parameters(w io.Writer, r io.Reader) error {
+	dec := xml.NewDecoder(r)
+	var icp icmpv4Parameters
+	if err := dec.Decode(&icp); err != nil {
+		return err
+	}
+	prs := icp.escape()
+	fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+	fmt.Fprintf(w, "const (\n")
+	for _, pr := range prs {
+		if pr.Descr == "" {
+			continue
+		}
+		fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Descr, pr.Value)
+		fmt.Fprintf(w, "// %s\n", pr.OrigDescr)
+	}
+	fmt.Fprintf(w, ")\n\n")
+	fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+	fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
+	for _, pr := range prs {
+		if pr.Descr == "" {
+			continue
+		}
+		fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigDescr))
+	}
+	fmt.Fprintf(w, "}\n")
+	return nil
+}
+
+type icmpv4Parameters struct {
+	XMLName    xml.Name `xml:"registry"`
+	Title      string   `xml:"title"`
+	Updated    string   `xml:"updated"`
+	Registries []struct {
+		Title   string `xml:"title"`
+		Records []struct {
+			Value string `xml:"value"`
+			Descr string `xml:"description"`
+		} `xml:"record"`
+	} `xml:"registry"`
+}
+
+type canonICMPv4ParamRecord struct {
+	OrigDescr string
+	Descr     string
+	Value     int
+}
+
+func (icp *icmpv4Parameters) escape() []canonICMPv4ParamRecord {
+	id := -1
+	for i, r := range icp.Registries {
+		if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
+			id = i
+			break
+		}
+	}
+	if id < 0 {
+		return nil
+	}
+	prs := make([]canonICMPv4ParamRecord, len(icp.Registries[id].Records))
+	sr := strings.NewReplacer(
+		"Messages", "",
+		"Message", "",
+		"ICMP", "",
+		"+", "P",
+		"-", "",
+		"/", "",
+		".", "",
+		" ", "",
+	)
+	for i, pr := range icp.Registries[id].Records {
+		if strings.Contains(pr.Descr, "Reserved") ||
+			strings.Contains(pr.Descr, "Unassigned") ||
+			strings.Contains(pr.Descr, "Deprecated") ||
+			strings.Contains(pr.Descr, "Experiment") ||
+			strings.Contains(pr.Descr, "experiment") {
+			continue
+		}
+		ss := strings.Split(pr.Descr, "\n")
+		if len(ss) > 1 {
+			prs[i].Descr = strings.Join(ss, " ")
+		} else {
+			prs[i].Descr = ss[0]
+		}
+		s := strings.TrimSpace(prs[i].Descr)
+		prs[i].OrigDescr = s
+		prs[i].Descr = sr.Replace(s)
+		prs[i].Value, _ = strconv.Atoi(pr.Value)
+	}
+	return prs
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/genericopt_posix.go b/harvester/vendor/golang.org/x/net/ipv4/genericopt_posix.go
new file mode 100644
index 0000000..58168b7
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/genericopt_posix.go
@@ -0,0 +1,63 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv4
+
+import (
+	"syscall"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// TOS returns the type-of-service field value for outgoing packets.
+func (c *genericOpt) TOS() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoTOS])
+}
+
+// SetTOS sets the type-of-service field value for future outgoing
+// packets.
+func (c *genericOpt) SetTOS(tos int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoTOS], tos)
+}
+
+// TTL returns the time-to-live field value for outgoing packets.
+func (c *genericOpt) TTL() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoTTL])
+}
+
+// SetTTL sets the time-to-live field value for future outgoing
+// packets.
+func (c *genericOpt) SetTTL(ttl int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoTTL], ttl)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/genericopt_stub.go b/harvester/vendor/golang.org/x/net/ipv4/genericopt_stub.go
new file mode 100644
index 0000000..661a4d1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/genericopt_stub.go
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv4
+
+// TOS returns the type-of-service field value for outgoing packets.
+func (c *genericOpt) TOS() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetTOS sets the type-of-service field value for future outgoing
+// packets.
+func (c *genericOpt) SetTOS(tos int) error {
+	return errOpNoSupport
+}
+
+// TTL returns the time-to-live field value for outgoing packets.
+func (c *genericOpt) TTL() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetTTL sets the time-to-live field value for future outgoing
+// packets.
+func (c *genericOpt) SetTTL(ttl int) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/header.go b/harvester/vendor/golang.org/x/net/ipv4/header.go
new file mode 100644
index 0000000..6dc26d4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/header.go
@@ -0,0 +1,145 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"encoding/binary"
+	"fmt"
+	"net"
+	"runtime"
+	"syscall"
+)
+
+const (
+	Version      = 4  // protocol version
+	HeaderLen    = 20 // header length without extension headers
+	maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields
+)
+
+type HeaderFlags int
+
+const (
+	MoreFragments HeaderFlags = 1 << iota // more fragments flag
+	DontFragment                          // don't fragment flag
+)
+
+// A Header represents an IPv4 header.
+type Header struct {
+	Version  int         // protocol version
+	Len      int         // header length
+	TOS      int         // type-of-service
+	TotalLen int         // packet total length
+	ID       int         // identification
+	Flags    HeaderFlags // flags
+	FragOff  int         // fragment offset
+	TTL      int         // time-to-live
+	Protocol int         // next protocol
+	Checksum int         // checksum
+	Src      net.IP      // source address
+	Dst      net.IP      // destination address
+	Options  []byte      // options, extension headers
+}
+
+func (h *Header) String() string {
+	if h == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ver=%d hdrlen=%d tos=%#x totallen=%d id=%#x flags=%#x fragoff=%#x ttl=%d proto=%d cksum=%#x src=%v dst=%v", h.Version, h.Len, h.TOS, h.TotalLen, h.ID, h.Flags, h.FragOff, h.TTL, h.Protocol, h.Checksum, h.Src, h.Dst)
+}
+
+// Marshal returns the binary encoding of the IPv4 header h.
+func (h *Header) Marshal() ([]byte, error) {
+	if h == nil {
+		return nil, syscall.EINVAL
+	}
+	if h.Len < HeaderLen {
+		return nil, errHeaderTooShort
+	}
+	hdrlen := HeaderLen + len(h.Options)
+	b := make([]byte, hdrlen)
+	b[0] = byte(Version<<4 | (hdrlen >> 2 & 0x0f))
+	b[1] = byte(h.TOS)
+	flagsAndFragOff := (h.FragOff & 0x1fff) | int(h.Flags<<13)
+	switch runtime.GOOS {
+	case "darwin", "dragonfly", "netbsd":
+		nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+		nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+	case "freebsd":
+		if freebsdVersion < 1100000 {
+			nativeEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+			nativeEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+		} else {
+			binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+			binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+		}
+	default:
+		binary.BigEndian.PutUint16(b[2:4], uint16(h.TotalLen))
+		binary.BigEndian.PutUint16(b[6:8], uint16(flagsAndFragOff))
+	}
+	binary.BigEndian.PutUint16(b[4:6], uint16(h.ID))
+	b[8] = byte(h.TTL)
+	b[9] = byte(h.Protocol)
+	binary.BigEndian.PutUint16(b[10:12], uint16(h.Checksum))
+	if ip := h.Src.To4(); ip != nil {
+		copy(b[12:16], ip[:net.IPv4len])
+	}
+	if ip := h.Dst.To4(); ip != nil {
+		copy(b[16:20], ip[:net.IPv4len])
+	} else {
+		return nil, errMissingAddress
+	}
+	if len(h.Options) > 0 {
+		copy(b[HeaderLen:], h.Options)
+	}
+	return b, nil
+}
+
+// ParseHeader parses b as an IPv4 header.
+func ParseHeader(b []byte) (*Header, error) {
+	if len(b) < HeaderLen {
+		return nil, errHeaderTooShort
+	}
+	hdrlen := int(b[0]&0x0f) << 2
+	if hdrlen > len(b) {
+		return nil, errBufferTooShort
+	}
+	h := &Header{
+		Version:  int(b[0] >> 4),
+		Len:      hdrlen,
+		TOS:      int(b[1]),
+		ID:       int(binary.BigEndian.Uint16(b[4:6])),
+		TTL:      int(b[8]),
+		Protocol: int(b[9]),
+		Checksum: int(binary.BigEndian.Uint16(b[10:12])),
+		Src:      net.IPv4(b[12], b[13], b[14], b[15]),
+		Dst:      net.IPv4(b[16], b[17], b[18], b[19]),
+	}
+	switch runtime.GOOS {
+	case "darwin", "dragonfly", "netbsd":
+		h.TotalLen = int(nativeEndian.Uint16(b[2:4])) + hdrlen
+		h.FragOff = int(nativeEndian.Uint16(b[6:8]))
+	case "freebsd":
+		if freebsdVersion < 1100000 {
+			h.TotalLen = int(nativeEndian.Uint16(b[2:4]))
+			if freebsdVersion < 1000000 {
+				h.TotalLen += hdrlen
+			}
+			h.FragOff = int(nativeEndian.Uint16(b[6:8]))
+		} else {
+			h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+			h.FragOff = int(binary.BigEndian.Uint16(b[6:8]))
+		}
+	default:
+		h.TotalLen = int(binary.BigEndian.Uint16(b[2:4]))
+		h.FragOff = int(binary.BigEndian.Uint16(b[6:8]))
+	}
+	h.Flags = HeaderFlags(h.FragOff&0xe000) >> 13
+	h.FragOff = h.FragOff & 0x1fff
+	if hdrlen-HeaderLen > 0 {
+		h.Options = make([]byte, hdrlen-HeaderLen)
+		copy(h.Options, b[HeaderLen:])
+	}
+	return h, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/helper.go b/harvester/vendor/golang.org/x/net/ipv4/helper.go
new file mode 100644
index 0000000..0838979
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/helper.go
@@ -0,0 +1,59 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"unsafe"
+)
+
+var (
+	errMissingAddress           = errors.New("missing address")
+	errMissingHeader            = errors.New("missing header")
+	errHeaderTooShort           = errors.New("header too short")
+	errBufferTooShort           = errors.New("buffer too short")
+	errInvalidConnType          = errors.New("invalid conn type")
+	errOpNoSupport              = errors.New("operation not supported")
+	errNoSuchInterface          = errors.New("no such interface")
+	errNoSuchMulticastInterface = errors.New("no such multicast interface")
+
+	// See http://www.freebsd.org/doc/en/books/porters-handbook/freebsd-versions.html.
+	freebsdVersion uint32
+
+	nativeEndian binary.ByteOrder
+)
+
+func init() {
+	i := uint32(1)
+	b := (*[4]byte)(unsafe.Pointer(&i))
+	if b[0] == 1 {
+		nativeEndian = binary.LittleEndian
+	} else {
+		nativeEndian = binary.BigEndian
+	}
+}
+
+func boolint(b bool) int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func netAddrToIP4(a net.Addr) net.IP {
+	switch v := a.(type) {
+	case *net.UDPAddr:
+		if ip := v.IP.To4(); ip != nil {
+			return ip
+		}
+	case *net.IPAddr:
+		if ip := v.IP.To4(); ip != nil {
+			return ip
+		}
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/iana.go b/harvester/vendor/golang.org/x/net/ipv4/iana.go
new file mode 100644
index 0000000..be10c94
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/iana.go
@@ -0,0 +1,34 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package ipv4
+
+// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
+const (
+	ICMPTypeEchoReply              ICMPType = 0  // Echo Reply
+	ICMPTypeDestinationUnreachable ICMPType = 3  // Destination Unreachable
+	ICMPTypeRedirect               ICMPType = 5  // Redirect
+	ICMPTypeEcho                   ICMPType = 8  // Echo
+	ICMPTypeRouterAdvertisement    ICMPType = 9  // Router Advertisement
+	ICMPTypeRouterSolicitation     ICMPType = 10 // Router Solicitation
+	ICMPTypeTimeExceeded           ICMPType = 11 // Time Exceeded
+	ICMPTypeParameterProblem       ICMPType = 12 // Parameter Problem
+	ICMPTypeTimestamp              ICMPType = 13 // Timestamp
+	ICMPTypeTimestampReply         ICMPType = 14 // Timestamp Reply
+	ICMPTypePhoturis               ICMPType = 40 // Photuris
+)
+
+// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
+var icmpTypes = map[ICMPType]string{
+	0:  "echo reply",
+	3:  "destination unreachable",
+	5:  "redirect",
+	8:  "echo",
+	9:  "router advertisement",
+	10: "router solicitation",
+	11: "time exceeded",
+	12: "parameter problem",
+	13: "timestamp",
+	14: "timestamp reply",
+	40: "photuris",
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/icmp.go b/harvester/vendor/golang.org/x/net/ipv4/icmp.go
new file mode 100644
index 0000000..097bea8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/icmp.go
@@ -0,0 +1,57 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "golang.org/x/net/internal/iana"
+
+// An ICMPType represents a type of ICMP message.
+type ICMPType int
+
+func (typ ICMPType) String() string {
+	s, ok := icmpTypes[typ]
+	if !ok {
+		return "<nil>"
+	}
+	return s
+}
+
+// Protocol returns the ICMPv4 protocol number.
+func (typ ICMPType) Protocol() int {
+	return iana.ProtocolICMP
+}
+
+// An ICMPFilter represents an ICMP message filter for incoming
+// packets. The filter belongs to a packet delivery path on a host and
+// it cannot interact with forwarding packets or tunnel-outer packets.
+//
+// Note: RFC 2460 defines a reasonable role model and it works not
+// only for IPv6 but IPv4. A node means a device that implements IP.
+// A router means a node that forwards IP packets not explicitly
+// addressed to itself, and a host means a node that is not a router.
+type ICMPFilter struct {
+	icmpFilter
+}
+
+// Accept accepts incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Accept(typ ICMPType) {
+	f.accept(typ)
+}
+
+// Block blocks incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Block(typ ICMPType) {
+	f.block(typ)
+}
+
+// SetAll sets the filter action to the filter.
+func (f *ICMPFilter) SetAll(block bool) {
+	f.setAll(block)
+}
+
+// WillBlock reports whether the ICMP type will be blocked.
+func (f *ICMPFilter) WillBlock(typ ICMPType) bool {
+	return f.willBlock(typ)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/icmp_linux.go b/harvester/vendor/golang.org/x/net/ipv4/icmp_linux.go
new file mode 100644
index 0000000..6e1c5c8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/icmp_linux.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+func (f *icmpFilter) accept(typ ICMPType) {
+	f.Data &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpFilter) block(typ ICMPType) {
+	f.Data |= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpFilter) setAll(block bool) {
+	if block {
+		f.Data = 1<<32 - 1
+	} else {
+		f.Data = 0
+	}
+}
+
+func (f *icmpFilter) willBlock(typ ICMPType) bool {
+	return f.Data&(1<<(uint32(typ)&31)) != 0
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/icmp_stub.go b/harvester/vendor/golang.org/x/net/ipv4/icmp_stub.go
new file mode 100644
index 0000000..21bb29a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/icmp_stub.go
@@ -0,0 +1,25 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv4
+
+const sizeofICMPFilter = 0x0
+
+type icmpFilter struct {
+}
+
+func (f *icmpFilter) accept(typ ICMPType) {
+}
+
+func (f *icmpFilter) block(typ ICMPType) {
+}
+
+func (f *icmpFilter) setAll(block bool) {
+}
+
+func (f *icmpFilter) willBlock(typ ICMPType) bool {
+	return false
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/packet.go b/harvester/vendor/golang.org/x/net/ipv4/packet.go
new file mode 100644
index 0000000..7f3bf48
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/packet.go
@@ -0,0 +1,100 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+)
+
+// BUG(mikio): On Windows, the ReadFrom and WriteTo methods of RawConn
+// are not implemented.
+
+// A packetHandler represents the IPv4 datagram handler.
+type packetHandler struct {
+	c *net.IPConn
+	rawOpt
+}
+
+func (c *packetHandler) ok() bool { return c != nil && c.c != nil }
+
+// ReadFrom reads an IPv4 datagram from the endpoint c, copying the
+// datagram into b.  It returns the received datagram as the IPv4
+// header h, the payload p and the control message cm.
+func (c *packetHandler) ReadFrom(b []byte) (h *Header, p []byte, cm *ControlMessage, err error) {
+	if !c.ok() {
+		return nil, nil, nil, syscall.EINVAL
+	}
+	oob := newControlMessage(&c.rawOpt)
+	n, oobn, _, src, err := c.c.ReadMsgIP(b, oob)
+	if err != nil {
+		return nil, nil, nil, err
+	}
+	var hs []byte
+	if hs, p, err = slicePacket(b[:n]); err != nil {
+		return nil, nil, nil, err
+	}
+	if h, err = ParseHeader(hs); err != nil {
+		return nil, nil, nil, err
+	}
+	if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+		return nil, nil, nil, err
+	}
+	if src != nil && cm != nil {
+		cm.Src = src.IP
+	}
+	return
+}
+
+func slicePacket(b []byte) (h, p []byte, err error) {
+	if len(b) < HeaderLen {
+		return nil, nil, errHeaderTooShort
+	}
+	hdrlen := int(b[0]&0x0f) << 2
+	return b[:hdrlen], b[hdrlen:], nil
+}
+
+// WriteTo writes an IPv4 datagram through the endpoint c, copying the
+// datagram from the IPv4 header h and the payload p.  The control
+// message cm allows the datagram path and the outgoing interface to be
+// specified.  Currently only Darwin and Linux support this.  The cm
+// may be nil if control of the outgoing datagram is not required.
+//
+// The IPv4 header h must contain appropriate fields that include:
+//
+//	Version       = <must be specified>
+//	Len           = <must be specified>
+//	TOS           = <must be specified>
+//	TotalLen      = <must be specified>
+//	ID            = platform sets an appropriate value if ID is zero
+//	FragOff       = <must be specified>
+//	TTL           = <must be specified>
+//	Protocol      = <must be specified>
+//	Checksum      = platform sets an appropriate value if Checksum is zero
+//	Src           = platform sets an appropriate value if Src is nil
+//	Dst           = <must be specified>
+//	Options       = optional
+func (c *packetHandler) WriteTo(h *Header, p []byte, cm *ControlMessage) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	oob := marshalControlMessage(cm)
+	wh, err := h.Marshal()
+	if err != nil {
+		return err
+	}
+	dst := &net.IPAddr{}
+	if cm != nil {
+		if ip := cm.Dst.To4(); ip != nil {
+			dst.IP = ip
+		}
+	}
+	if dst.IP == nil {
+		dst.IP = h.Dst
+	}
+	wh = append(wh, p...)
+	_, _, err = c.c.WriteMsgIP(wh, oob, dst)
+	return err
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/payload.go b/harvester/vendor/golang.org/x/net/ipv4/payload.go
new file mode 100644
index 0000000..be130e4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/payload.go
@@ -0,0 +1,18 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import "net"
+
+// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo
+// methods of PacketConn is not implemented.
+
+// A payloadHandler represents the IPv4 datagram payload handler.
+type payloadHandler struct {
+	net.PacketConn
+	rawOpt
+}
+
+func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil }
diff --git a/harvester/vendor/golang.org/x/net/ipv4/payload_cmsg.go b/harvester/vendor/golang.org/x/net/ipv4/payload_cmsg.go
new file mode 100644
index 0000000..9bcde8f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/payload_cmsg.go
@@ -0,0 +1,81 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !plan9,!windows
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+)
+
+// ReadFrom reads a payload of the received IPv4 datagram, from the
+// endpoint c, copying the payload into b.  It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+	if !c.ok() {
+		return 0, nil, nil, syscall.EINVAL
+	}
+	oob := newControlMessage(&c.rawOpt)
+	var oobn int
+	switch c := c.PacketConn.(type) {
+	case *net.UDPConn:
+		if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {
+			return 0, nil, nil, err
+		}
+	case *net.IPConn:
+		if sockOpts[ssoStripHeader].name > 0 {
+			if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil {
+				return 0, nil, nil, err
+			}
+		} else {
+			nb := make([]byte, maxHeaderLen+len(b))
+			if n, oobn, _, src, err = c.ReadMsgIP(nb, oob); err != nil {
+				return 0, nil, nil, err
+			}
+			hdrlen := int(nb[0]&0x0f) << 2
+			copy(b, nb[hdrlen:])
+			n -= hdrlen
+		}
+	default:
+		return 0, nil, nil, errInvalidConnType
+	}
+	if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+		return 0, nil, nil, err
+	}
+	if cm != nil {
+		cm.Src = netAddrToIP4(src)
+	}
+	return
+}
+
+// WriteTo writes a payload of the IPv4 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b.  It
+// returns the number of bytes written.  The control message cm allows
+// the datagram path and the outgoing interface to be specified.
+// Currently only Darwin and Linux support this.  The cm may be nil if
+// control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	oob := marshalControlMessage(cm)
+	if dst == nil {
+		return 0, errMissingAddress
+	}
+	switch c := c.PacketConn.(type) {
+	case *net.UDPConn:
+		n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))
+	case *net.IPConn:
+		n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))
+	default:
+		return 0, errInvalidConnType
+	}
+	if err != nil {
+		return 0, err
+	}
+	return
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/payload_nocmsg.go b/harvester/vendor/golang.org/x/net/ipv4/payload_nocmsg.go
new file mode 100644
index 0000000..6f1b402
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/payload_nocmsg.go
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build plan9 windows
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+)
+
+// ReadFrom reads a payload of the received IPv4 datagram, from the
+// endpoint c, copying the payload into b.  It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+	if !c.ok() {
+		return 0, nil, nil, syscall.EINVAL
+	}
+	if n, src, err = c.PacketConn.ReadFrom(b); err != nil {
+		return 0, nil, nil, err
+	}
+	return
+}
+
+// WriteTo writes a payload of the IPv4 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b.  It
+// returns the number of bytes written.  The control message cm allows
+// the datagram path and the outgoing interface to be specified.
+// Currently only Darwin and Linux support this.  The cm may be nil if
+// control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	if dst == nil {
+		return 0, errMissingAddress
+	}
+	return c.PacketConn.WriteTo(b, dst)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt.go
new file mode 100644
index 0000000..ace37d3
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+// Sticky socket options
+const (
+	ssoTOS                = iota // header field for unicast packet
+	ssoTTL                       // header field for unicast packet
+	ssoMulticastTTL              // header field for multicast packet
+	ssoMulticastInterface        // outbound interface for multicast packet
+	ssoMulticastLoopback         // loopback for multicast packet
+	ssoReceiveTTL                // header field on received packet
+	ssoReceiveDst                // header field on received packet
+	ssoReceiveInterface          // inbound interface on received packet
+	ssoPacketInfo                // incbound or outbound packet path
+	ssoHeaderPrepend             // ipv4 header prepend
+	ssoStripHeader               // strip ipv4 header
+	ssoICMPFilter                // icmp filter
+	ssoJoinGroup                 // any-source multicast
+	ssoLeaveGroup                // any-source multicast
+	ssoJoinSourceGroup           // source-specific multicast
+	ssoLeaveSourceGroup          // source-specific multicast
+	ssoBlockSourceGroup          // any-source or source-specific multicast
+	ssoUnblockSourceGroup        // any-source or source-specific multicast
+	ssoMax
+)
+
+// Sticky socket option value types
+const (
+	ssoTypeByte = iota + 1
+	ssoTypeInt
+	ssoTypeInterface
+	ssoTypeICMPFilter
+	ssoTypeIPMreq
+	ssoTypeIPMreqn
+	ssoTypeGroupReq
+	ssoTypeGroupSourceReq
+)
+
+// A sockOpt represents a binding for sticky socket option.
+type sockOpt struct {
+	name int // option name, must be equal or greater than 1
+	typ  int // option value type, must be equal or greater than 1
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go
new file mode 100644
index 0000000..8092f1d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq.go
@@ -0,0 +1,83 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd solaris windows
+
+package ipv4
+
+import "net"
+
+func setIPMreqInterface(mreq *ipMreq, ifi *net.Interface) error {
+	if ifi == nil {
+		return nil
+	}
+	ifat, err := ifi.Addrs()
+	if err != nil {
+		return err
+	}
+	for _, ifa := range ifat {
+		switch ifa := ifa.(type) {
+		case *net.IPAddr:
+			if ip := ifa.IP.To4(); ip != nil {
+				copy(mreq.Interface[:], ip)
+				return nil
+			}
+		case *net.IPNet:
+			if ip := ifa.IP.To4(); ip != nil {
+				copy(mreq.Interface[:], ip)
+				return nil
+			}
+		}
+	}
+	return errNoSuchInterface
+}
+
+func netIP4ToInterface(ip net.IP) (*net.Interface, error) {
+	ift, err := net.Interfaces()
+	if err != nil {
+		return nil, err
+	}
+	for _, ifi := range ift {
+		ifat, err := ifi.Addrs()
+		if err != nil {
+			return nil, err
+		}
+		for _, ifa := range ifat {
+			switch ifa := ifa.(type) {
+			case *net.IPAddr:
+				if ip.Equal(ifa.IP) {
+					return &ifi, nil
+				}
+			case *net.IPNet:
+				if ip.Equal(ifa.IP) {
+					return &ifi, nil
+				}
+			}
+		}
+	}
+	return nil, errNoSuchInterface
+}
+
+func netInterfaceToIP4(ifi *net.Interface) (net.IP, error) {
+	if ifi == nil {
+		return net.IPv4zero.To4(), nil
+	}
+	ifat, err := ifi.Addrs()
+	if err != nil {
+		return nil, err
+	}
+	for _, ifa := range ifat {
+		switch ifa := ifa.(type) {
+		case *net.IPAddr:
+			if ip := ifa.IP.To4(); ip != nil {
+				return ip, nil
+			}
+		case *net.IPNet:
+			if ip := ifa.IP.To4(); ip != nil {
+				return ip, nil
+			}
+		}
+	}
+	return nil, errNoSuchInterface
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go
new file mode 100644
index 0000000..2259a39
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_posix.go
@@ -0,0 +1,46 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd solaris windows
+
+package ipv4
+
+import (
+	"net"
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func setsockoptIPMreq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	mreq := ipMreq{Multiaddr: [4]byte{grp[0], grp[1], grp[2], grp[3]}}
+	if err := setIPMreqInterface(&mreq, ifi); err != nil {
+		return err
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreq), sizeofIPMreq))
+}
+
+func getsockoptInterface(s uintptr, name int) (*net.Interface, error) {
+	var b [4]byte
+	l := uint32(4)
+	if err := getsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), &l); err != nil {
+		return nil, os.NewSyscallError("getsockopt", err)
+	}
+	ifi, err := netIP4ToInterface(net.IPv4(b[0], b[1], b[2], b[3]))
+	if err != nil {
+		return nil, err
+	}
+	return ifi, nil
+}
+
+func setsockoptInterface(s uintptr, name int, ifi *net.Interface) error {
+	ip, err := netInterfaceToIP4(ifi)
+	if err != nil {
+		return err
+	}
+	var b [4]byte
+	copy(b[:], ip)
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&b[0]), uint32(4)))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go
new file mode 100644
index 0000000..e655635
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreq_stub.go
@@ -0,0 +1,21 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!windows
+
+package ipv4
+
+import "net"
+
+func setsockoptIPMreq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	return errOpNoSupport
+}
+
+func getsockoptInterface(s uintptr, name int) (*net.Interface, error) {
+	return nil, errOpNoSupport
+}
+
+func setsockoptInterface(s uintptr, name int, ifi *net.Interface) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go
new file mode 100644
index 0000000..0c7f0f8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux
+
+package ipv4
+
+import "net"
+
+func getsockoptIPMreqn(s uintptr, name int) (*net.Interface, error) {
+	return nil, errOpNoSupport
+}
+
+func setsockoptIPMreqn(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go
new file mode 100644
index 0000000..92daffb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_asmreqn_unix.go
@@ -0,0 +1,42 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux
+
+package ipv4
+
+import (
+	"net"
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func getsockoptIPMreqn(s uintptr, name int) (*net.Interface, error) {
+	var mreqn ipMreqn
+	l := uint32(sizeofIPMreqn)
+	if err := getsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), &l); err != nil {
+		return nil, os.NewSyscallError("getsockopt", err)
+	}
+	if mreqn.Ifindex == 0 {
+		return nil, nil
+	}
+	ifi, err := net.InterfaceByIndex(int(mreqn.Ifindex))
+	if err != nil {
+		return nil, err
+	}
+	return ifi, nil
+}
+
+func setsockoptIPMreqn(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	var mreqn ipMreqn
+	if ifi != nil {
+		mreqn.Ifindex = int32(ifi.Index)
+	}
+	if grp != nil {
+		mreqn.Multiaddr = [4]byte{grp[0], grp[1], grp[2], grp[3]}
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, unsafe.Pointer(&mreqn), sizeofIPMreqn))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_posix.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_posix.go
new file mode 100644
index 0000000..d806803
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_posix.go
@@ -0,0 +1,122 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv4
+
+import (
+	"net"
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func getInt(s uintptr, opt *sockOpt) (int, error) {
+	if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) {
+		return 0, errOpNoSupport
+	}
+	var i int32
+	var b byte
+	p := unsafe.Pointer(&i)
+	l := uint32(4)
+	if opt.typ == ssoTypeByte {
+		p = unsafe.Pointer(&b)
+		l = 1
+	}
+	if err := getsockopt(s, iana.ProtocolIP, opt.name, p, &l); err != nil {
+		return 0, os.NewSyscallError("getsockopt", err)
+	}
+	if opt.typ == ssoTypeByte {
+		return int(b), nil
+	}
+	return int(i), nil
+}
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+	if opt.name < 1 || (opt.typ != ssoTypeByte && opt.typ != ssoTypeInt) {
+		return errOpNoSupport
+	}
+	i := int32(v)
+	var b byte
+	p := unsafe.Pointer(&i)
+	l := uint32(4)
+	if opt.typ == ssoTypeByte {
+		b = byte(v)
+		p = unsafe.Pointer(&b)
+		l = 1
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, opt.name, p, l))
+}
+
+func getInterface(s uintptr, opt *sockOpt) (*net.Interface, error) {
+	if opt.name < 1 {
+		return nil, errOpNoSupport
+	}
+	switch opt.typ {
+	case ssoTypeInterface:
+		return getsockoptInterface(s, opt.name)
+	case ssoTypeIPMreqn:
+		return getsockoptIPMreqn(s, opt.name)
+	default:
+		return nil, errOpNoSupport
+	}
+}
+
+func setInterface(s uintptr, opt *sockOpt, ifi *net.Interface) error {
+	if opt.name < 1 {
+		return errOpNoSupport
+	}
+	switch opt.typ {
+	case ssoTypeInterface:
+		return setsockoptInterface(s, opt.name, ifi)
+	case ssoTypeIPMreqn:
+		return setsockoptIPMreqn(s, opt.name, ifi, nil)
+	default:
+		return errOpNoSupport
+	}
+}
+
+func getICMPFilter(s uintptr, opt *sockOpt) (*ICMPFilter, error) {
+	if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+		return nil, errOpNoSupport
+	}
+	var f ICMPFilter
+	l := uint32(sizeofICMPFilter)
+	if err := getsockopt(s, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.icmpFilter), &l); err != nil {
+		return nil, os.NewSyscallError("getsockopt", err)
+	}
+	return &f, nil
+}
+
+func setICMPFilter(s uintptr, opt *sockOpt, f *ICMPFilter) error {
+	if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+		return errOpNoSupport
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolReserved, opt.name, unsafe.Pointer(&f.icmpFilter), sizeofICMPFilter))
+}
+
+func setGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+	if opt.name < 1 {
+		return errOpNoSupport
+	}
+	switch opt.typ {
+	case ssoTypeIPMreq:
+		return setsockoptIPMreq(s, opt.name, ifi, grp)
+	case ssoTypeIPMreqn:
+		return setsockoptIPMreqn(s, opt.name, ifi, grp)
+	case ssoTypeGroupReq:
+		return setsockoptGroupReq(s, opt.name, ifi, grp)
+	default:
+		return errOpNoSupport
+	}
+}
+
+func setSourceGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+	if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq {
+		return errOpNoSupport
+	}
+	return setsockoptGroupSourceReq(s, opt.name, ifi, grp, src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go
new file mode 100644
index 0000000..0287396
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux,!solaris
+
+package ipv4
+
+import "net"
+
+func setsockoptGroupReq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	return errOpNoSupport
+}
+
+func setsockoptGroupSourceReq(s uintptr, name int, ifi *net.Interface, grp, src net.IP) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go
new file mode 100644
index 0000000..c9af55b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_ssmreq_unix.go
@@ -0,0 +1,61 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux solaris
+
+package ipv4
+
+import (
+	"net"
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var freebsd32o64 bool
+
+func setsockoptGroupReq(s uintptr, name int, ifi *net.Interface, grp net.IP) error {
+	var gr groupReq
+	if ifi != nil {
+		gr.Interface = uint32(ifi.Index)
+	}
+	gr.setGroup(grp)
+	var p unsafe.Pointer
+	var l uint32
+	if freebsd32o64 {
+		var d [sizeofGroupReq + 4]byte
+		s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))
+		copy(d[:4], s[:4])
+		copy(d[8:], s[4:])
+		p = unsafe.Pointer(&d[0])
+		l = sizeofGroupReq + 4
+	} else {
+		p = unsafe.Pointer(&gr)
+		l = sizeofGroupReq
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, p, l))
+}
+
+func setsockoptGroupSourceReq(s uintptr, name int, ifi *net.Interface, grp, src net.IP) error {
+	var gsr groupSourceReq
+	if ifi != nil {
+		gsr.Interface = uint32(ifi.Index)
+	}
+	gsr.setSourceGroup(grp, src)
+	var p unsafe.Pointer
+	var l uint32
+	if freebsd32o64 {
+		var d [sizeofGroupSourceReq + 4]byte
+		s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))
+		copy(d[:4], s[:4])
+		copy(d[8:], s[4:])
+		p = unsafe.Pointer(&d[0])
+		l = sizeofGroupSourceReq + 4
+	} else {
+		p = unsafe.Pointer(&gsr)
+		l = sizeofGroupSourceReq
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, iana.ProtocolIP, name, p, l))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sockopt_stub.go b/harvester/vendor/golang.org/x/net/ipv4/sockopt_stub.go
new file mode 100644
index 0000000..4ff6099
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sockopt_stub.go
@@ -0,0 +1,11 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv4
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_bsd.go b/harvester/vendor/golang.org/x/net/ipv4/sys_bsd.go
new file mode 100644
index 0000000..203033d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_bsd.go
@@ -0,0 +1,34 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly netbsd
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+		ctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+		ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeByte},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoReceiveDst:         {sysIP_RECVDSTADDR, ssoTypeInt},
+		ssoReceiveInterface:   {sysIP_RECVIF, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoJoinGroup:          {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+	}
+)
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_darwin.go b/harvester/vendor/golang.org/x/net/ipv4/sys_darwin.go
new file mode 100644
index 0000000..abfffca
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_darwin.go
@@ -0,0 +1,97 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+		ctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+		ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeByte},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoReceiveDst:         {sysIP_RECVDSTADDR, ssoTypeInt},
+		ssoReceiveInterface:   {sysIP_RECVIF, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoStripHeader:        {sysIP_STRIPHDR, ssoTypeInt},
+		ssoJoinGroup:          {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+	}
+)
+
+func init() {
+	// Seems like kern.osreldate is veiled on latest OS X. We use
+	// kern.osrelease instead.
+	s, err := syscall.Sysctl("kern.osrelease")
+	if err != nil {
+		return
+	}
+	ss := strings.Split(s, ".")
+	if len(ss) == 0 {
+		return
+	}
+	// The IP_PKTINFO and protocol-independent multicast API were
+	// introduced in OS X 10.7 (Darwin 11). But it looks like
+	// those features require OS X 10.8 (Darwin 12) or above.
+	// See http://support.apple.com/kb/HT1633.
+	if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 {
+		return
+	}
+	ctlOpts[ctlPacketInfo].name = sysIP_PKTINFO
+	ctlOpts[ctlPacketInfo].length = sizeofInetPktinfo
+	ctlOpts[ctlPacketInfo].marshal = marshalPacketInfo
+	ctlOpts[ctlPacketInfo].parse = parsePacketInfo
+	sockOpts[ssoPacketInfo].name = sysIP_RECVPKTINFO
+	sockOpts[ssoPacketInfo].typ = ssoTypeInt
+	sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn
+	sockOpts[ssoJoinGroup].name = sysMCAST_JOIN_GROUP
+	sockOpts[ssoJoinGroup].typ = ssoTypeGroupReq
+	sockOpts[ssoLeaveGroup].name = sysMCAST_LEAVE_GROUP
+	sockOpts[ssoLeaveGroup].typ = ssoTypeGroupReq
+	sockOpts[ssoJoinSourceGroup].name = sysMCAST_JOIN_SOURCE_GROUP
+	sockOpts[ssoJoinSourceGroup].typ = ssoTypeGroupSourceReq
+	sockOpts[ssoLeaveSourceGroup].name = sysMCAST_LEAVE_SOURCE_GROUP
+	sockOpts[ssoLeaveSourceGroup].typ = ssoTypeGroupSourceReq
+	sockOpts[ssoBlockSourceGroup].name = sysMCAST_BLOCK_SOURCE
+	sockOpts[ssoBlockSourceGroup].typ = ssoTypeGroupSourceReq
+	sockOpts[ssoUnblockSourceGroup].name = sysMCAST_UNBLOCK_SOURCE
+	sockOpts[ssoUnblockSourceGroup].typ = ssoTypeGroupSourceReq
+}
+
+func (pi *inetPktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_freebsd.go b/harvester/vendor/golang.org/x/net/ipv4/sys_freebsd.go
new file mode 100644
index 0000000..fceffe9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_freebsd.go
@@ -0,0 +1,73 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"runtime"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+		ctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+		ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeByte},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoReceiveDst:         {sysIP_RECVDSTADDR, ssoTypeInt},
+		ssoReceiveInterface:   {sysIP_RECVIF, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoJoinGroup:          {sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:         {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:    {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:   {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:   {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func init() {
+	freebsdVersion, _ = syscall.SysctlUint32("kern.osreldate")
+	if freebsdVersion >= 1000000 {
+		sockOpts[ssoMulticastInterface].typ = ssoTypeIPMreqn
+	}
+	if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" {
+		archs, _ := syscall.Sysctl("kern.supported_archs")
+		for _, s := range strings.Fields(archs) {
+			if s == "amd64" {
+				freebsd32o64 = true
+				break
+			}
+		}
+	}
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source))
+	sa.Len = sizeofSockaddrInet
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_linux.go b/harvester/vendor/golang.org/x/net/ipv4/sys_linux.go
new file mode 100644
index 0000000..c6c2a50
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_linux.go
@@ -0,0 +1,55 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:        {sysIP_TTL, 1, marshalTTL, parseTTL},
+		ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeInt},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeIPMreqn},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoPacketInfo:         {sysIP_PKTINFO, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoICMPFilter:         {sysICMP_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:          {sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:         {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:    {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:   {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:   {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func (pi *inetPktinfo) setIfindex(i int) {
+	pi.Ifindex = int32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(&gr.Group))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(&gsr.Group))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet)(unsafe.Pointer(&gsr.Source))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_linux_386.s b/harvester/vendor/golang.org/x/net/ipv4/sys_linux_386.s
new file mode 100644
index 0000000..b85551a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_linux_386.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT	·socketcall(SB),NOSPLIT,$0-36
+	JMP	syscall·socketcall(SB)
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_openbsd.go b/harvester/vendor/golang.org/x/net/ipv4/sys_openbsd.go
new file mode 100644
index 0000000..d78083a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_openbsd.go
@@ -0,0 +1,32 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:       {sysIP_RECVTTL, 1, marshalTTL, parseTTL},
+		ctlDst:       {sysIP_RECVDSTADDR, net.IPv4len, marshalDst, parseDst},
+		ctlInterface: {sysIP_RECVIF, syscall.SizeofSockaddrDatalink, marshalInterface, parseInterface},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeByte},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeByte},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoReceiveDst:         {sysIP_RECVDSTADDR, ssoTypeInt},
+		ssoReceiveInterface:   {sysIP_RECVIF, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoJoinGroup:          {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+	}
+)
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_solaris.go b/harvester/vendor/golang.org/x/net/ipv4/sys_solaris.go
new file mode 100644
index 0000000..879f39e
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_solaris.go
@@ -0,0 +1,54 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTTL:        {sysIP_RECVTTL, 4, marshalTTL, parseTTL},
+		ctlPacketInfo: {sysIP_PKTINFO, sizeofInetPktinfo, marshalPacketInfo, parsePacketInfo},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeByte},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeByte},
+		ssoReceiveTTL:         {sysIP_RECVTTL, ssoTypeInt},
+		ssoPacketInfo:         {sysIP_RECVPKTINFO, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoJoinGroup:          {sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:         {sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:    {sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:   {sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:   {sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup: {sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func (pi *inetPktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260))
+	sa.Family = syscall.AF_INET
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_solaris_amd64.s b/harvester/vendor/golang.org/x/net/ipv4/sys_solaris_amd64.s
new file mode 100644
index 0000000..39d76af
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_solaris_amd64.s
@@ -0,0 +1,8 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
+	JMP	syscall·sysvicall6(SB)
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_stub.go b/harvester/vendor/golang.org/x/net/ipv4/sys_stub.go
new file mode 100644
index 0000000..d6dd812
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv4
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{}
+
+	sockOpts = [ssoMax]sockOpt{}
+)
diff --git a/harvester/vendor/golang.org/x/net/ipv4/sys_windows.go b/harvester/vendor/golang.org/x/net/ipv4/sys_windows.go
new file mode 100644
index 0000000..fac00bd
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/sys_windows.go
@@ -0,0 +1,62 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+const (
+	// See ws2tcpip.h.
+	sysIP_OPTIONS                = 0x1
+	sysIP_HDRINCL                = 0x2
+	sysIP_TOS                    = 0x3
+	sysIP_TTL                    = 0x4
+	sysIP_MULTICAST_IF           = 0x9
+	sysIP_MULTICAST_TTL          = 0xa
+	sysIP_MULTICAST_LOOP         = 0xb
+	sysIP_ADD_MEMBERSHIP         = 0xc
+	sysIP_DROP_MEMBERSHIP        = 0xd
+	sysIP_DONTFRAGMENT           = 0xe
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0xf
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x10
+	sysIP_PKTINFO                = 0x13
+
+	sizeofInetPktinfo  = 0x8
+	sizeofIPMreq       = 0x8
+	sizeofIPMreqSource = 0xc
+)
+
+type inetPktinfo struct {
+	Addr    [4]byte
+	Ifindex int32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte
+	Interface [4]byte
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte
+	Sourceaddr [4]byte
+	Interface  [4]byte
+}
+
+// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms738586(v=vs.85).aspx
+var (
+	ctlOpts = [ctlMax]ctlOpt{}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTOS:                {sysIP_TOS, ssoTypeInt},
+		ssoTTL:                {sysIP_TTL, ssoTypeInt},
+		ssoMulticastTTL:       {sysIP_MULTICAST_TTL, ssoTypeInt},
+		ssoMulticastInterface: {sysIP_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastLoopback:  {sysIP_MULTICAST_LOOP, ssoTypeInt},
+		ssoHeaderPrepend:      {sysIP_HDRINCL, ssoTypeInt},
+		ssoJoinGroup:          {sysIP_ADD_MEMBERSHIP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {sysIP_DROP_MEMBERSHIP, ssoTypeIPMreq},
+	}
+)
+
+func (pi *inetPktinfo) setIfindex(i int) {
+	pi.Ifindex = int32(i)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/syscall_linux_386.go b/harvester/vendor/golang.org/x/net/ipv4/syscall_linux_386.go
new file mode 100644
index 0000000..84f60bf
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/syscall_linux_386.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	sysGETSOCKOPT = 0xf
+	sysSETSOCKOPT = 0xe
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	if _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/syscall_solaris.go b/harvester/vendor/golang.org/x/net/ipv4/syscall_solaris.go
new file mode 100644
index 0000000..8b0e1e4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/syscall_solaris.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so"
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
+
+//go:linkname procGetsockopt libc___xnet_getsockopt
+//go:linkname procSetsockopt libc_setsockopt
+
+var (
+	procGetsockopt uintptr
+	procSetsockopt uintptr
+)
+
+func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	_, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0)
+	if errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/syscall_unix.go b/harvester/vendor/golang.org/x/net/ipv4/syscall_unix.go
new file mode 100644
index 0000000..d952763
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/syscall_unix.go
@@ -0,0 +1,26 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!386 netbsd openbsd
+
+package ipv4
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/syscall_windows.go b/harvester/vendor/golang.org/x/net/ipv4/syscall_windows.go
new file mode 100644
index 0000000..0f42d22
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/syscall_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv4
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	return syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), (*int32)(unsafe.Pointer(l)))
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), int32(l))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_darwin.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_darwin.go
new file mode 100644
index 0000000..c07cc88
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_darwin.go
@@ -0,0 +1,99 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_darwin.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_STRIPHDR    = 0x17
+	sysIP_RECVTTL     = 0x18
+	sysIP_BOUND_IF    = 0x19
+	sysIP_PKTINFO     = 0x1a
+	sysIP_RECVPKTINFO = 0x1a
+
+	sysIP_MULTICAST_IF           = 0x9
+	sysIP_MULTICAST_TTL          = 0xa
+	sysIP_MULTICAST_LOOP         = 0xb
+	sysIP_ADD_MEMBERSHIP         = 0xc
+	sysIP_DROP_MEMBERSHIP        = 0xd
+	sysIP_MULTICAST_VIF          = 0xe
+	sysIP_MULTICAST_IFINDEX      = 0x42
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x46
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+	sysIP_BLOCK_SOURCE           = 0x48
+	sysIP_UNBLOCK_SOURCE         = 0x49
+	sysMCAST_JOIN_GROUP          = 0x50
+	sysMCAST_LEAVE_GROUP         = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x53
+	sysMCAST_BLOCK_SOURCE        = 0x54
+	sysMCAST_UNBLOCK_SOURCE      = 0x55
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet    = 0x10
+	sizeofInetPktinfo     = 0xc
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type inetPktinfo struct {
+	Ifindex  uint32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte /* in_addr */
+	Sourceaddr [4]byte /* in_addr */
+	Interface  [4]byte /* in_addr */
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [128]byte
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [128]byte
+	Pad_cgo_1 [128]byte
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go
new file mode 100644
index 0000000..c4365e9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_dragonfly.go
@@ -0,0 +1,31 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_dragonfly.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_RECVTTL     = 0x41
+
+	sysIP_MULTICAST_IF    = 0x9
+	sysIP_MULTICAST_TTL   = 0xa
+	sysIP_MULTICAST_LOOP  = 0xb
+	sysIP_MULTICAST_VIF   = 0xe
+	sysIP_ADD_MEMBERSHIP  = 0xc
+	sysIP_DROP_MEMBERSHIP = 0xd
+
+	sizeofIPMreq = 0x8
+)
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go
new file mode 100644
index 0000000..8c4aec9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_386.go
@@ -0,0 +1,93 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_SENDSRCADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_ONESBCAST   = 0x17
+	sysIP_BINDANY     = 0x18
+	sysIP_RECVTTL     = 0x41
+	sysIP_MINTTL      = 0x42
+	sysIP_DONTFRAG    = 0x43
+	sysIP_RECVTOS     = 0x44
+
+	sysIP_MULTICAST_IF           = 0x9
+	sysIP_MULTICAST_TTL          = 0xa
+	sysIP_MULTICAST_LOOP         = 0xb
+	sysIP_ADD_MEMBERSHIP         = 0xc
+	sysIP_DROP_MEMBERSHIP        = 0xd
+	sysIP_MULTICAST_VIF          = 0xe
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x46
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+	sysIP_BLOCK_SOURCE           = 0x48
+	sysIP_UNBLOCK_SOURCE         = 0x49
+	sysMCAST_JOIN_GROUP          = 0x50
+	sysMCAST_LEAVE_GROUP         = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x53
+	sysMCAST_BLOCK_SOURCE        = 0x54
+	sysMCAST_UNBLOCK_SOURCE      = 0x55
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet    = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte /* in_addr */
+	Sourceaddr [4]byte /* in_addr */
+	Interface  [4]byte /* in_addr */
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go
new file mode 100644
index 0000000..4b10b7c
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_amd64.go
@@ -0,0 +1,95 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_SENDSRCADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_ONESBCAST   = 0x17
+	sysIP_BINDANY     = 0x18
+	sysIP_RECVTTL     = 0x41
+	sysIP_MINTTL      = 0x42
+	sysIP_DONTFRAG    = 0x43
+	sysIP_RECVTOS     = 0x44
+
+	sysIP_MULTICAST_IF           = 0x9
+	sysIP_MULTICAST_TTL          = 0xa
+	sysIP_MULTICAST_LOOP         = 0xb
+	sysIP_ADD_MEMBERSHIP         = 0xc
+	sysIP_DROP_MEMBERSHIP        = 0xd
+	sysIP_MULTICAST_VIF          = 0xe
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x46
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+	sysIP_BLOCK_SOURCE           = 0x48
+	sysIP_UNBLOCK_SOURCE         = 0x49
+	sysMCAST_JOIN_GROUP          = 0x50
+	sysMCAST_LEAVE_GROUP         = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x53
+	sysMCAST_BLOCK_SOURCE        = 0x54
+	sysMCAST_UNBLOCK_SOURCE      = 0x55
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet    = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte /* in_addr */
+	Sourceaddr [4]byte /* in_addr */
+	Interface  [4]byte /* in_addr */
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go
new file mode 100644
index 0000000..4b10b7c
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_freebsd_arm.go
@@ -0,0 +1,95 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_SENDSRCADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_ONESBCAST   = 0x17
+	sysIP_BINDANY     = 0x18
+	sysIP_RECVTTL     = 0x41
+	sysIP_MINTTL      = 0x42
+	sysIP_DONTFRAG    = 0x43
+	sysIP_RECVTOS     = 0x44
+
+	sysIP_MULTICAST_IF           = 0x9
+	sysIP_MULTICAST_TTL          = 0xa
+	sysIP_MULTICAST_LOOP         = 0xb
+	sysIP_ADD_MEMBERSHIP         = 0xc
+	sysIP_DROP_MEMBERSHIP        = 0xd
+	sysIP_MULTICAST_VIF          = 0xe
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x46
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x47
+	sysIP_BLOCK_SOURCE           = 0x48
+	sysIP_UNBLOCK_SOURCE         = 0x49
+	sysMCAST_JOIN_GROUP          = 0x50
+	sysMCAST_LEAVE_GROUP         = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x53
+	sysMCAST_BLOCK_SOURCE        = 0x54
+	sysMCAST_UNBLOCK_SOURCE      = 0x55
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet    = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet struct {
+	Len    uint8
+	Family uint8
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte /* in_addr */
+	Sourceaddr [4]byte /* in_addr */
+	Interface  [4]byte /* in_addr */
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_386.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_386.go
new file mode 100644
index 0000000..4da6720
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_386.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_amd64.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go
new file mode 100644
index 0000000..4da6720
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_arm64.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go
new file mode 100644
index 0000000..4da6720
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mips64le.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go
new file mode 100644
index 0000000..4da6720
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_mipsle.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go
new file mode 100644
index 0000000..b825a18
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc.go
@@ -0,0 +1,146 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]uint8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_ppc64le.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go
new file mode 100644
index 0000000..65945bb
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_linux_s390x.go
@@ -0,0 +1,148 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv4
+
+const (
+	sysIP_TOS             = 0x1
+	sysIP_TTL             = 0x2
+	sysIP_HDRINCL         = 0x3
+	sysIP_OPTIONS         = 0x4
+	sysIP_ROUTER_ALERT    = 0x5
+	sysIP_RECVOPTS        = 0x6
+	sysIP_RETOPTS         = 0x7
+	sysIP_PKTINFO         = 0x8
+	sysIP_PKTOPTIONS      = 0x9
+	sysIP_MTU_DISCOVER    = 0xa
+	sysIP_RECVERR         = 0xb
+	sysIP_RECVTTL         = 0xc
+	sysIP_RECVTOS         = 0xd
+	sysIP_MTU             = 0xe
+	sysIP_FREEBIND        = 0xf
+	sysIP_TRANSPARENT     = 0x13
+	sysIP_RECVRETOPTS     = 0x7
+	sysIP_ORIGDSTADDR     = 0x14
+	sysIP_RECVORIGDSTADDR = 0x14
+	sysIP_MINTTL          = 0x15
+	sysIP_NODEFRAG        = 0x16
+	sysIP_UNICAST_IF      = 0x32
+
+	sysIP_MULTICAST_IF           = 0x20
+	sysIP_MULTICAST_TTL          = 0x21
+	sysIP_MULTICAST_LOOP         = 0x22
+	sysIP_ADD_MEMBERSHIP         = 0x23
+	sysIP_DROP_MEMBERSHIP        = 0x24
+	sysIP_UNBLOCK_SOURCE         = 0x25
+	sysIP_BLOCK_SOURCE           = 0x26
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x27
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x28
+	sysIP_MSFILTER               = 0x29
+	sysMCAST_JOIN_GROUP          = 0x2a
+	sysMCAST_LEAVE_GROUP         = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP   = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP  = 0x2f
+	sysMCAST_BLOCK_SOURCE        = 0x2b
+	sysMCAST_UNBLOCK_SOURCE      = 0x2c
+	sysMCAST_MSFILTER            = 0x30
+	sysIP_MULTICAST_ALL          = 0x31
+
+	sysICMP_FILTER = 0x1
+
+	sysSO_EE_ORIGIN_NONE         = 0x0
+	sysSO_EE_ORIGIN_LOCAL        = 0x1
+	sysSO_EE_ORIGIN_ICMP         = 0x2
+	sysSO_EE_ORIGIN_ICMP6        = 0x3
+	sysSO_EE_ORIGIN_TXSTATUS     = 0x4
+	sysSO_EE_ORIGIN_TIMESTAMPING = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet          = 0x10
+	sizeofInetPktinfo           = 0xc
+	sizeofSockExtendedErr       = 0x10
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqn        = 0xc
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPFilter = 0x4
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	X__pad [8]uint8
+}
+
+type inetPktinfo struct {
+	Ifindex  int32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type sockExtendedErr struct {
+	Errno  uint32
+	Origin uint8
+	Type   uint8
+	Code   uint8
+	Pad    uint8
+	Info   uint32
+	Data   uint32
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqn struct {
+	Multiaddr [4]byte /* in_addr */
+	Address   [4]byte /* in_addr */
+	Ifindex   int32
+}
+
+type ipMreqSource struct {
+	Multiaddr  uint32
+	Interface  uint32
+	Sourceaddr uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpFilter struct {
+	Data uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_netbsd.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_netbsd.go
new file mode 100644
index 0000000..fd3624d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_netbsd.go
@@ -0,0 +1,30 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_netbsd.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x14
+	sysIP_RECVTTL     = 0x17
+
+	sysIP_MULTICAST_IF    = 0x9
+	sysIP_MULTICAST_TTL   = 0xa
+	sysIP_MULTICAST_LOOP  = 0xb
+	sysIP_ADD_MEMBERSHIP  = 0xc
+	sysIP_DROP_MEMBERSHIP = 0xd
+
+	sizeofIPMreq = 0x8
+)
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_openbsd.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_openbsd.go
new file mode 100644
index 0000000..12f36be
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_openbsd.go
@@ -0,0 +1,30 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_openbsd.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x1e
+	sysIP_RECVTTL     = 0x1f
+
+	sysIP_MULTICAST_IF    = 0x9
+	sysIP_MULTICAST_TTL   = 0xa
+	sysIP_MULTICAST_LOOP  = 0xb
+	sysIP_ADD_MEMBERSHIP  = 0xc
+	sysIP_DROP_MEMBERSHIP = 0xd
+
+	sizeofIPMreq = 0x8
+)
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv4/zsys_solaris.go b/harvester/vendor/golang.org/x/net/ipv4/zsys_solaris.go
new file mode 100644
index 0000000..0a3875c
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv4/zsys_solaris.go
@@ -0,0 +1,100 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_solaris.go
+
+package ipv4
+
+const (
+	sysIP_OPTIONS     = 0x1
+	sysIP_HDRINCL     = 0x2
+	sysIP_TOS         = 0x3
+	sysIP_TTL         = 0x4
+	sysIP_RECVOPTS    = 0x5
+	sysIP_RECVRETOPTS = 0x6
+	sysIP_RECVDSTADDR = 0x7
+	sysIP_RETOPTS     = 0x8
+	sysIP_RECVIF      = 0x9
+	sysIP_RECVSLLA    = 0xa
+	sysIP_RECVTTL     = 0xb
+
+	sysIP_MULTICAST_IF           = 0x10
+	sysIP_MULTICAST_TTL          = 0x11
+	sysIP_MULTICAST_LOOP         = 0x12
+	sysIP_ADD_MEMBERSHIP         = 0x13
+	sysIP_DROP_MEMBERSHIP        = 0x14
+	sysIP_BLOCK_SOURCE           = 0x15
+	sysIP_UNBLOCK_SOURCE         = 0x16
+	sysIP_ADD_SOURCE_MEMBERSHIP  = 0x17
+	sysIP_DROP_SOURCE_MEMBERSHIP = 0x18
+	sysIP_NEXTHOP                = 0x19
+
+	sysIP_PKTINFO     = 0x1a
+	sysIP_RECVPKTINFO = 0x1a
+	sysIP_DONTFRAG    = 0x1b
+
+	sysIP_BOUND_IF      = 0x41
+	sysIP_UNSPEC_SRC    = 0x42
+	sysIP_BROADCAST_TTL = 0x43
+	sysIP_DHCPINIT_IF   = 0x45
+
+	sysIP_REUSEADDR = 0x104
+	sysIP_DONTROUTE = 0x105
+	sysIP_BROADCAST = 0x106
+
+	sysMCAST_JOIN_GROUP         = 0x29
+	sysMCAST_LEAVE_GROUP        = 0x2a
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2d
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2e
+
+	sizeofSockaddrStorage = 0x100
+	sizeofSockaddrInet    = 0x10
+	sizeofInetPktinfo     = 0xc
+
+	sizeofIPMreq         = 0x8
+	sizeofIPMreqSource   = 0xc
+	sizeofGroupReq       = 0x104
+	sizeofGroupSourceReq = 0x204
+)
+
+type sockaddrStorage struct {
+	Family     uint16
+	X_ss_pad1  [6]int8
+	X_ss_align float64
+	X_ss_pad2  [240]int8
+}
+
+type sockaddrInet struct {
+	Family uint16
+	Port   uint16
+	Addr   [4]byte /* in_addr */
+	Zero   [8]int8
+}
+
+type inetPktinfo struct {
+	Ifindex  uint32
+	Spec_dst [4]byte /* in_addr */
+	Addr     [4]byte /* in_addr */
+}
+
+type ipMreq struct {
+	Multiaddr [4]byte /* in_addr */
+	Interface [4]byte /* in_addr */
+}
+
+type ipMreqSource struct {
+	Multiaddr  [4]byte /* in_addr */
+	Sourceaddr [4]byte /* in_addr */
+	Interface  [4]byte /* in_addr */
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [256]byte
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [256]byte
+	Pad_cgo_1 [256]byte
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/bpfopt_linux.go b/harvester/vendor/golang.org/x/net/ipv6/bpfopt_linux.go
new file mode 100644
index 0000000..daf7ea8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/bpfopt_linux.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"os"
+	"unsafe"
+
+	"golang.org/x/net/bpf"
+	"golang.org/x/net/internal/netreflect"
+)
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	prog := sockFProg{
+		Len:    uint16(len(filter)),
+		Filter: (*sockFilter)(unsafe.Pointer(&filter[0])),
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, sysSOL_SOCKET, sysSO_ATTACH_FILTER, unsafe.Pointer(&prog), uint32(unsafe.Sizeof(prog))))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/bpfopt_stub.go b/harvester/vendor/golang.org/x/net/ipv6/bpfopt_stub.go
new file mode 100644
index 0000000..2e4de5f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/bpfopt_stub.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !linux
+
+package ipv6
+
+import "golang.org/x/net/bpf"
+
+// SetBPF attaches a BPF program to the connection.
+//
+// Only supported on Linux.
+func (c *dgramOpt) SetBPF(filter []bpf.RawInstruction) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control.go b/harvester/vendor/golang.org/x/net/ipv6/control.go
new file mode 100644
index 0000000..56303f0
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control.go
@@ -0,0 +1,85 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"fmt"
+	"net"
+	"sync"
+)
+
+// Note that RFC 3542 obsoletes RFC 2292 but OS X Snow Leopard and the
+// former still support RFC 2292 only.  Please be aware that almost
+// all protocol implementations prohibit using a combination of RFC
+// 2292 and RFC 3542 for some practical reasons.
+
+type rawOpt struct {
+	sync.RWMutex
+	cflags ControlFlags
+}
+
+func (c *rawOpt) set(f ControlFlags)        { c.cflags |= f }
+func (c *rawOpt) clear(f ControlFlags)      { c.cflags &^= f }
+func (c *rawOpt) isset(f ControlFlags) bool { return c.cflags&f != 0 }
+
+// A ControlFlags represents per packet basis IP-level socket option
+// control flags.
+type ControlFlags uint
+
+const (
+	FlagTrafficClass ControlFlags = 1 << iota // pass the traffic class on the received packet
+	FlagHopLimit                              // pass the hop limit on the received packet
+	FlagSrc                                   // pass the source address on the received packet
+	FlagDst                                   // pass the destination address on the received packet
+	FlagInterface                             // pass the interface index on the received packet
+	FlagPathMTU                               // pass the path MTU on the received packet path
+)
+
+const flagPacketInfo = FlagDst | FlagInterface
+
+// A ControlMessage represents per packet basis IP-level socket
+// options.
+type ControlMessage struct {
+	// Receiving socket options: SetControlMessage allows to
+	// receive the options from the protocol stack using ReadFrom
+	// method of PacketConn.
+	//
+	// Specifying socket options: ControlMessage for WriteTo
+	// method of PacketConn allows to send the options to the
+	// protocol stack.
+	//
+	TrafficClass int    // traffic class, must be 1 <= value <= 255 when specifying
+	HopLimit     int    // hop limit, must be 1 <= value <= 255 when specifying
+	Src          net.IP // source address, specifying only
+	Dst          net.IP // destination address, receiving only
+	IfIndex      int    // interface index, must be 1 <= value when specifying
+	NextHop      net.IP // next hop address, specifying only
+	MTU          int    // path MTU, receiving only
+}
+
+func (cm *ControlMessage) String() string {
+	if cm == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("tclass=%#x hoplim=%d src=%v dst=%v ifindex=%d nexthop=%v mtu=%d", cm.TrafficClass, cm.HopLimit, cm.Src, cm.Dst, cm.IfIndex, cm.NextHop, cm.MTU)
+}
+
+// Ancillary data socket options
+const (
+	ctlTrafficClass = iota // header field
+	ctlHopLimit            // header field
+	ctlPacketInfo          // inbound or outbound packet path
+	ctlNextHop             // nexthop
+	ctlPathMTU             // path mtu
+	ctlMax
+)
+
+// A ctlOpt represents a binding for ancillary data socket option.
+type ctlOpt struct {
+	name    int // option name, must be equal or greater than 1
+	length  int // option length
+	marshal func([]byte, *ControlMessage) []byte
+	parse   func(*ControlMessage, []byte)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go b/harvester/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go
new file mode 100644
index 0000000..d1693af
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control_rfc2292_unix.go
@@ -0,0 +1,55 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func marshal2292HopLimit(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_2292HOPLIMIT
+	m.SetLen(syscall.CmsgLen(4))
+	if cm != nil {
+		data := b[syscall.CmsgLen(0):]
+		nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit))
+	}
+	return b[syscall.CmsgSpace(4):]
+}
+
+func marshal2292PacketInfo(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_2292PKTINFO
+	m.SetLen(syscall.CmsgLen(sizeofInet6Pktinfo))
+	if cm != nil {
+		pi := (*inet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+		if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
+			copy(pi.Addr[:], ip)
+		}
+		if cm.IfIndex > 0 {
+			pi.setIfindex(cm.IfIndex)
+		}
+	}
+	return b[syscall.CmsgSpace(sizeofInet6Pktinfo):]
+}
+
+func marshal2292NextHop(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_2292NEXTHOP
+	m.SetLen(syscall.CmsgLen(sizeofSockaddrInet6))
+	if cm != nil {
+		sa := (*sockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+		sa.setSockaddr(cm.NextHop, cm.IfIndex)
+	}
+	return b[syscall.CmsgSpace(sizeofSockaddrInet6):]
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go b/harvester/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go
new file mode 100644
index 0000000..2800df4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control_rfc3542_unix.go
@@ -0,0 +1,99 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func marshalTrafficClass(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_TCLASS
+	m.SetLen(syscall.CmsgLen(4))
+	if cm != nil {
+		data := b[syscall.CmsgLen(0):]
+		nativeEndian.PutUint32(data[:4], uint32(cm.TrafficClass))
+	}
+	return b[syscall.CmsgSpace(4):]
+}
+
+func parseTrafficClass(cm *ControlMessage, b []byte) {
+	cm.TrafficClass = int(nativeEndian.Uint32(b[:4]))
+}
+
+func marshalHopLimit(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_HOPLIMIT
+	m.SetLen(syscall.CmsgLen(4))
+	if cm != nil {
+		data := b[syscall.CmsgLen(0):]
+		nativeEndian.PutUint32(data[:4], uint32(cm.HopLimit))
+	}
+	return b[syscall.CmsgSpace(4):]
+}
+
+func parseHopLimit(cm *ControlMessage, b []byte) {
+	cm.HopLimit = int(nativeEndian.Uint32(b[:4]))
+}
+
+func marshalPacketInfo(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_PKTINFO
+	m.SetLen(syscall.CmsgLen(sizeofInet6Pktinfo))
+	if cm != nil {
+		pi := (*inet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+		if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
+			copy(pi.Addr[:], ip)
+		}
+		if cm.IfIndex > 0 {
+			pi.setIfindex(cm.IfIndex)
+		}
+	}
+	return b[syscall.CmsgSpace(sizeofInet6Pktinfo):]
+}
+
+func parsePacketInfo(cm *ControlMessage, b []byte) {
+	pi := (*inet6Pktinfo)(unsafe.Pointer(&b[0]))
+	cm.Dst = pi.Addr[:]
+	cm.IfIndex = int(pi.Ifindex)
+}
+
+func marshalNextHop(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_NEXTHOP
+	m.SetLen(syscall.CmsgLen(sizeofSockaddrInet6))
+	if cm != nil {
+		sa := (*sockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
+		sa.setSockaddr(cm.NextHop, cm.IfIndex)
+	}
+	return b[syscall.CmsgSpace(sizeofSockaddrInet6):]
+}
+
+func parseNextHop(cm *ControlMessage, b []byte) {
+}
+
+func marshalPathMTU(b []byte, cm *ControlMessage) []byte {
+	m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
+	m.Level = iana.ProtocolIPv6
+	m.Type = sysIPV6_PATHMTU
+	m.SetLen(syscall.CmsgLen(sizeofIPv6Mtuinfo))
+	return b[syscall.CmsgSpace(sizeofIPv6Mtuinfo):]
+}
+
+func parsePathMTU(cm *ControlMessage, b []byte) {
+	mi := (*ipv6Mtuinfo)(unsafe.Pointer(&b[0]))
+	cm.Dst = mi.Addr.Addr[:]
+	cm.IfIndex = int(mi.Addr.Scope_id)
+	cm.MTU = int(mi.Mtu)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control_stub.go b/harvester/vendor/golang.org/x/net/ipv6/control_stub.go
new file mode 100644
index 0000000..24b40a8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	return errOpNoSupport
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+	return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	return nil, errOpNoSupport
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control_unix.go b/harvester/vendor/golang.org/x/net/ipv6/control_unix.go
new file mode 100644
index 0000000..7bd4210
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control_unix.go
@@ -0,0 +1,153 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris
+
+package ipv6
+
+import (
+	"os"
+	"syscall"
+
+	"golang.org/x/net/internal/iana"
+)
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	opt.Lock()
+	defer opt.Unlock()
+	if cf&FlagTrafficClass != 0 && sockOpts[ssoReceiveTrafficClass].name > 0 {
+		if err := setInt(s, &sockOpts[ssoReceiveTrafficClass], boolint(on)); err != nil {
+			return err
+		}
+		if on {
+			opt.set(FlagTrafficClass)
+		} else {
+			opt.clear(FlagTrafficClass)
+		}
+	}
+	if cf&FlagHopLimit != 0 && sockOpts[ssoReceiveHopLimit].name > 0 {
+		if err := setInt(s, &sockOpts[ssoReceiveHopLimit], boolint(on)); err != nil {
+			return err
+		}
+		if on {
+			opt.set(FlagHopLimit)
+		} else {
+			opt.clear(FlagHopLimit)
+		}
+	}
+	if cf&flagPacketInfo != 0 && sockOpts[ssoReceivePacketInfo].name > 0 {
+		if err := setInt(s, &sockOpts[ssoReceivePacketInfo], boolint(on)); err != nil {
+			return err
+		}
+		if on {
+			opt.set(cf & flagPacketInfo)
+		} else {
+			opt.clear(cf & flagPacketInfo)
+		}
+	}
+	if cf&FlagPathMTU != 0 && sockOpts[ssoReceivePathMTU].name > 0 {
+		if err := setInt(s, &sockOpts[ssoReceivePathMTU], boolint(on)); err != nil {
+			return err
+		}
+		if on {
+			opt.set(FlagPathMTU)
+		} else {
+			opt.clear(FlagPathMTU)
+		}
+	}
+	return nil
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+	opt.RLock()
+	var l int
+	if opt.isset(FlagTrafficClass) && ctlOpts[ctlTrafficClass].name > 0 {
+		l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length)
+	}
+	if opt.isset(FlagHopLimit) && ctlOpts[ctlHopLimit].name > 0 {
+		l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length)
+	}
+	if opt.isset(flagPacketInfo) && ctlOpts[ctlPacketInfo].name > 0 {
+		l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+	}
+	if opt.isset(FlagPathMTU) && ctlOpts[ctlPathMTU].name > 0 {
+		l += syscall.CmsgSpace(ctlOpts[ctlPathMTU].length)
+	}
+	if l > 0 {
+		oob = make([]byte, l)
+	}
+	opt.RUnlock()
+	return
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	if len(b) == 0 {
+		return nil, nil
+	}
+	cmsgs, err := syscall.ParseSocketControlMessage(b)
+	if err != nil {
+		return nil, os.NewSyscallError("parse socket control message", err)
+	}
+	cm := &ControlMessage{}
+	for _, m := range cmsgs {
+		if m.Header.Level != iana.ProtocolIPv6 {
+			continue
+		}
+		switch int(m.Header.Type) {
+		case ctlOpts[ctlTrafficClass].name:
+			ctlOpts[ctlTrafficClass].parse(cm, m.Data[:])
+		case ctlOpts[ctlHopLimit].name:
+			ctlOpts[ctlHopLimit].parse(cm, m.Data[:])
+		case ctlOpts[ctlPacketInfo].name:
+			ctlOpts[ctlPacketInfo].parse(cm, m.Data[:])
+		case ctlOpts[ctlPathMTU].name:
+			ctlOpts[ctlPathMTU].parse(cm, m.Data[:])
+		}
+	}
+	return cm, nil
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+	if cm == nil {
+		return
+	}
+	var l int
+	tclass := false
+	if ctlOpts[ctlTrafficClass].name > 0 && cm.TrafficClass > 0 {
+		tclass = true
+		l += syscall.CmsgSpace(ctlOpts[ctlTrafficClass].length)
+	}
+	hoplimit := false
+	if ctlOpts[ctlHopLimit].name > 0 && cm.HopLimit > 0 {
+		hoplimit = true
+		l += syscall.CmsgSpace(ctlOpts[ctlHopLimit].length)
+	}
+	pktinfo := false
+	if ctlOpts[ctlPacketInfo].name > 0 && (cm.Src.To16() != nil && cm.Src.To4() == nil || cm.IfIndex > 0) {
+		pktinfo = true
+		l += syscall.CmsgSpace(ctlOpts[ctlPacketInfo].length)
+	}
+	nexthop := false
+	if ctlOpts[ctlNextHop].name > 0 && cm.NextHop.To16() != nil && cm.NextHop.To4() == nil {
+		nexthop = true
+		l += syscall.CmsgSpace(ctlOpts[ctlNextHop].length)
+	}
+	if l > 0 {
+		oob = make([]byte, l)
+		b := oob
+		if tclass {
+			b = ctlOpts[ctlTrafficClass].marshal(b, cm)
+		}
+		if hoplimit {
+			b = ctlOpts[ctlHopLimit].marshal(b, cm)
+		}
+		if pktinfo {
+			b = ctlOpts[ctlPacketInfo].marshal(b, cm)
+		}
+		if nexthop {
+			b = ctlOpts[ctlNextHop].marshal(b, cm)
+		}
+	}
+	return
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/control_windows.go b/harvester/vendor/golang.org/x/net/ipv6/control_windows.go
new file mode 100644
index 0000000..feef6ab
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/control_windows.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "syscall"
+
+func setControlMessage(s uintptr, opt *rawOpt, cf ControlFlags, on bool) error {
+	// TODO(mikio): implement this
+	return syscall.EWINDOWS
+}
+
+func newControlMessage(opt *rawOpt) (oob []byte) {
+	// TODO(mikio): implement this
+	return nil
+}
+
+func parseControlMessage(b []byte) (*ControlMessage, error) {
+	// TODO(mikio): implement this
+	return nil, syscall.EWINDOWS
+}
+
+func marshalControlMessage(cm *ControlMessage) (oob []byte) {
+	// TODO(mikio): implement this
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_darwin.go b/harvester/vendor/golang.org/x/net/ipv6/defs_darwin.go
new file mode 100644
index 0000000..55ddc11
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_darwin.go
@@ -0,0 +1,112 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#define __APPLE_USE_RFC_3542
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+
+	sysIPV6_PORTRANGE    = C.IPV6_PORTRANGE
+	sysICMP6_FILTER      = C.ICMP6_FILTER
+	sysIPV6_2292PKTINFO  = C.IPV6_2292PKTINFO
+	sysIPV6_2292HOPLIMIT = C.IPV6_2292HOPLIMIT
+	sysIPV6_2292NEXTHOP  = C.IPV6_2292NEXTHOP
+	sysIPV6_2292HOPOPTS  = C.IPV6_2292HOPOPTS
+	sysIPV6_2292DSTOPTS  = C.IPV6_2292DSTOPTS
+	sysIPV6_2292RTHDR    = C.IPV6_2292RTHDR
+
+	sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
+
+	sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+	sysIPV6_V6ONLY   = C.IPV6_V6ONLY
+
+	sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+	sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+	sysIPV6_TCLASS     = C.IPV6_TCLASS
+
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+	sysIPV6_RECVPKTINFO = C.IPV6_RECVPKTINFO
+
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+
+	sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+	sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+	sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+	sysIPV6_PKTINFO  = C.IPV6_PKTINFO
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+	sysIPV6_RTHDR    = C.IPV6_RTHDR
+
+	sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+	sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+	sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+	sysIPV6_MSFILTER            = C.IPV6_MSFILTER
+	sysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE
+
+	sysIPV6_BOUND_IF = C.IPV6_BOUND_IF
+
+	sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+	sysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH
+	sysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type icmpv6Filter C.struct_icmp6_filter
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_dragonfly.go b/harvester/vendor/golang.org/x/net/ipv6/defs_dragonfly.go
new file mode 100644
index 0000000..a4c383a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_dragonfly.go
@@ -0,0 +1,84 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+	sysIPV6_PORTRANGE      = C.IPV6_PORTRANGE
+	sysICMP6_FILTER        = C.ICMP6_FILTER
+
+	sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+	sysIPV6_V6ONLY   = C.IPV6_V6ONLY
+
+	sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+
+	sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+	sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+	sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+	sysIPV6_PKTINFO  = C.IPV6_PKTINFO
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+	sysIPV6_RTHDR    = C.IPV6_RTHDR
+
+	sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+	sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+	sysIPV6_TCLASS   = C.IPV6_TCLASS
+	sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+	sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+	sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+	sysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH
+	sysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW
+
+	sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type icmpv6Filter C.struct_icmp6_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_freebsd.go b/harvester/vendor/golang.org/x/net/ipv6/defs_freebsd.go
new file mode 100644
index 0000000..53e6253
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_freebsd.go
@@ -0,0 +1,105 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+	sysIPV6_PORTRANGE      = C.IPV6_PORTRANGE
+	sysICMP6_FILTER        = C.ICMP6_FILTER
+
+	sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+	sysIPV6_V6ONLY   = C.IPV6_V6ONLY
+
+	sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+
+	sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+	sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+	sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+	sysIPV6_PKTINFO  = C.IPV6_PKTINFO
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+	sysIPV6_RTHDR    = C.IPV6_RTHDR
+
+	sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+	sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+
+	sysIPV6_TCLASS   = C.IPV6_TCLASS
+	sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+	sysIPV6_PREFER_TEMPADDR = C.IPV6_PREFER_TEMPADDR
+
+	sysIPV6_BINDANY = C.IPV6_BINDANY
+
+	sysIPV6_MSFILTER = C.IPV6_MSFILTER
+
+	sysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE
+
+	sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+	sysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH
+	sysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
+
+type icmpv6Filter C.struct_icmp6_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_linux.go b/harvester/vendor/golang.org/x/net/ipv6/defs_linux.go
new file mode 100644
index 0000000..8a967fd
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_linux.go
@@ -0,0 +1,145 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/filter.h>
+#include <sys/socket.h>
+*/
+import "C"
+
+const (
+	sysIPV6_ADDRFORM       = C.IPV6_ADDRFORM
+	sysIPV6_2292PKTINFO    = C.IPV6_2292PKTINFO
+	sysIPV6_2292HOPOPTS    = C.IPV6_2292HOPOPTS
+	sysIPV6_2292DSTOPTS    = C.IPV6_2292DSTOPTS
+	sysIPV6_2292RTHDR      = C.IPV6_2292RTHDR
+	sysIPV6_2292PKTOPTIONS = C.IPV6_2292PKTOPTIONS
+	sysIPV6_CHECKSUM       = C.IPV6_CHECKSUM
+	sysIPV6_2292HOPLIMIT   = C.IPV6_2292HOPLIMIT
+	sysIPV6_NEXTHOP        = C.IPV6_NEXTHOP
+	sysIPV6_FLOWINFO       = C.IPV6_FLOWINFO
+
+	sysIPV6_UNICAST_HOPS        = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF        = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS      = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP      = C.IPV6_MULTICAST_LOOP
+	sysIPV6_ADD_MEMBERSHIP      = C.IPV6_ADD_MEMBERSHIP
+	sysIPV6_DROP_MEMBERSHIP     = C.IPV6_DROP_MEMBERSHIP
+	sysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP
+	sysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+	sysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE
+	sysMCAST_MSFILTER           = C.MCAST_MSFILTER
+	sysIPV6_ROUTER_ALERT        = C.IPV6_ROUTER_ALERT
+	sysIPV6_MTU_DISCOVER        = C.IPV6_MTU_DISCOVER
+	sysIPV6_MTU                 = C.IPV6_MTU
+	sysIPV6_RECVERR             = C.IPV6_RECVERR
+	sysIPV6_V6ONLY              = C.IPV6_V6ONLY
+	sysIPV6_JOIN_ANYCAST        = C.IPV6_JOIN_ANYCAST
+	sysIPV6_LEAVE_ANYCAST       = C.IPV6_LEAVE_ANYCAST
+
+	//sysIPV6_PMTUDISC_DONT      = C.IPV6_PMTUDISC_DONT
+	//sysIPV6_PMTUDISC_WANT      = C.IPV6_PMTUDISC_WANT
+	//sysIPV6_PMTUDISC_DO        = C.IPV6_PMTUDISC_DO
+	//sysIPV6_PMTUDISC_PROBE     = C.IPV6_PMTUDISC_PROBE
+	//sysIPV6_PMTUDISC_INTERFACE = C.IPV6_PMTUDISC_INTERFACE
+	//sysIPV6_PMTUDISC_OMIT      = C.IPV6_PMTUDISC_OMIT
+
+	sysIPV6_FLOWLABEL_MGR = C.IPV6_FLOWLABEL_MGR
+	sysIPV6_FLOWINFO_SEND = C.IPV6_FLOWINFO_SEND
+
+	sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+	sysIPV6_XFRM_POLICY  = C.IPV6_XFRM_POLICY
+
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_PKTINFO      = C.IPV6_PKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_HOPLIMIT     = C.IPV6_HOPLIMIT
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_HOPOPTS      = C.IPV6_HOPOPTS
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RTHDR        = C.IPV6_RTHDR
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+	sysIPV6_DSTOPTS      = C.IPV6_DSTOPTS
+	sysIPV6_RECVPATHMTU  = C.IPV6_RECVPATHMTU
+	sysIPV6_PATHMTU      = C.IPV6_PATHMTU
+	sysIPV6_DONTFRAG     = C.IPV6_DONTFRAG
+
+	sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+	sysIPV6_TCLASS     = C.IPV6_TCLASS
+
+	sysIPV6_ADDR_PREFERENCES = C.IPV6_ADDR_PREFERENCES
+
+	sysIPV6_PREFER_SRC_TMP            = C.IPV6_PREFER_SRC_TMP
+	sysIPV6_PREFER_SRC_PUBLIC         = C.IPV6_PREFER_SRC_PUBLIC
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = C.IPV6_PREFER_SRC_PUBTMP_DEFAULT
+	sysIPV6_PREFER_SRC_COA            = C.IPV6_PREFER_SRC_COA
+	sysIPV6_PREFER_SRC_HOME           = C.IPV6_PREFER_SRC_HOME
+	sysIPV6_PREFER_SRC_CGA            = C.IPV6_PREFER_SRC_CGA
+	sysIPV6_PREFER_SRC_NONCGA         = C.IPV6_PREFER_SRC_NONCGA
+
+	sysIPV6_MINHOPCOUNT = C.IPV6_MINHOPCOUNT
+
+	sysIPV6_ORIGDSTADDR     = C.IPV6_ORIGDSTADDR
+	sysIPV6_RECVORIGDSTADDR = C.IPV6_RECVORIGDSTADDR
+	sysIPV6_TRANSPARENT     = C.IPV6_TRANSPARENT
+	sysIPV6_UNICAST_IF      = C.IPV6_UNICAST_IF
+
+	sysICMPV6_FILTER = C.ICMPV6_FILTER
+
+	sysICMPV6_FILTER_BLOCK       = C.ICMPV6_FILTER_BLOCK
+	sysICMPV6_FILTER_PASS        = C.ICMPV6_FILTER_PASS
+	sysICMPV6_FILTER_BLOCKOTHERS = C.ICMPV6_FILTER_BLOCKOTHERS
+	sysICMPV6_FILTER_PASSONLY    = C.ICMPV6_FILTER_PASSONLY
+
+	sysSOL_SOCKET       = C.SOL_SOCKET
+	sysSO_ATTACH_FILTER = C.SO_ATTACH_FILTER
+
+	sizeofKernelSockaddrStorage = C.sizeof_struct___kernel_sockaddr_storage
+	sizeofSockaddrInet6         = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo          = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo           = C.sizeof_struct_ip6_mtuinfo
+	sizeofIPv6FlowlabelReq      = C.sizeof_struct_in6_flowlabel_req
+
+	sizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type kernelSockaddrStorage C.struct___kernel_sockaddr_storage
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6FlowlabelReq C.struct_in6_flowlabel_req
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
+
+type icmpv6Filter C.struct_icmp6_filter
+
+type sockFProg C.struct_sock_fprog
+
+type sockFilter C.struct_sock_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_netbsd.go b/harvester/vendor/golang.org/x/net/ipv6/defs_netbsd.go
new file mode 100644
index 0000000..be9ceb9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_netbsd.go
@@ -0,0 +1,80 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+	sysIPV6_PORTRANGE      = C.IPV6_PORTRANGE
+	sysICMP6_FILTER        = C.ICMP6_FILTER
+
+	sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+	sysIPV6_V6ONLY   = C.IPV6_V6ONLY
+
+	sysIPV6_IPSEC_POLICY = C.IPV6_IPSEC_POLICY
+
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+
+	sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+	sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+	sysIPV6_PATHMTU     = C.IPV6_PATHMTU
+
+	sysIPV6_PKTINFO  = C.IPV6_PKTINFO
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+	sysIPV6_RTHDR    = C.IPV6_RTHDR
+
+	sysIPV6_RECVTCLASS = C.IPV6_RECVTCLASS
+
+	sysIPV6_TCLASS   = C.IPV6_TCLASS
+	sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+
+	sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+	sysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH
+	sysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW
+
+	sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type icmpv6Filter C.struct_icmp6_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_openbsd.go b/harvester/vendor/golang.org/x/net/ipv6/defs_openbsd.go
new file mode 100644
index 0000000..177ddf8
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_openbsd.go
@@ -0,0 +1,89 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/param.h>
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+	sysIPV6_PORTRANGE      = C.IPV6_PORTRANGE
+	sysICMP6_FILTER        = C.ICMP6_FILTER
+
+	sysIPV6_CHECKSUM = C.IPV6_CHECKSUM
+	sysIPV6_V6ONLY   = C.IPV6_V6ONLY
+
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVRTHDR    = C.IPV6_RECVRTHDR
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+	sysIPV6_RECVDSTOPTS  = C.IPV6_RECVDSTOPTS
+
+	sysIPV6_USE_MIN_MTU = C.IPV6_USE_MIN_MTU
+	sysIPV6_RECVPATHMTU = C.IPV6_RECVPATHMTU
+
+	sysIPV6_PATHMTU = C.IPV6_PATHMTU
+
+	sysIPV6_PKTINFO  = C.IPV6_PKTINFO
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+	sysIPV6_RTHDR    = C.IPV6_RTHDR
+
+	sysIPV6_AUTH_LEVEL        = C.IPV6_AUTH_LEVEL
+	sysIPV6_ESP_TRANS_LEVEL   = C.IPV6_ESP_TRANS_LEVEL
+	sysIPV6_ESP_NETWORK_LEVEL = C.IPV6_ESP_NETWORK_LEVEL
+	sysIPSEC6_OUTSA           = C.IPSEC6_OUTSA
+	sysIPV6_RECVTCLASS        = C.IPV6_RECVTCLASS
+
+	sysIPV6_AUTOFLOWLABEL = C.IPV6_AUTOFLOWLABEL
+	sysIPV6_IPCOMP_LEVEL  = C.IPV6_IPCOMP_LEVEL
+
+	sysIPV6_TCLASS   = C.IPV6_TCLASS
+	sysIPV6_DONTFRAG = C.IPV6_DONTFRAG
+	sysIPV6_PIPEX    = C.IPV6_PIPEX
+
+	sysIPV6_RTABLE = C.IPV6_RTABLE
+
+	sysIPV6_PORTRANGE_DEFAULT = C.IPV6_PORTRANGE_DEFAULT
+	sysIPV6_PORTRANGE_HIGH    = C.IPV6_PORTRANGE_HIGH
+	sysIPV6_PORTRANGE_LOW     = C.IPV6_PORTRANGE_LOW
+
+	sizeofSockaddrInet6 = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo  = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo   = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq = C.sizeof_struct_ipv6_mreq
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type icmpv6Filter C.struct_icmp6_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/defs_solaris.go b/harvester/vendor/golang.org/x/net/ipv6/defs_solaris.go
new file mode 100644
index 0000000..0f8ce2b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/defs_solaris.go
@@ -0,0 +1,114 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// +godefs map struct_in6_addr [16]byte /* in6_addr */
+
+package ipv6
+
+/*
+#include <sys/socket.h>
+
+#include <netinet/in.h>
+#include <netinet/icmp6.h>
+*/
+import "C"
+
+const (
+	sysIPV6_UNICAST_HOPS   = C.IPV6_UNICAST_HOPS
+	sysIPV6_MULTICAST_IF   = C.IPV6_MULTICAST_IF
+	sysIPV6_MULTICAST_HOPS = C.IPV6_MULTICAST_HOPS
+	sysIPV6_MULTICAST_LOOP = C.IPV6_MULTICAST_LOOP
+	sysIPV6_JOIN_GROUP     = C.IPV6_JOIN_GROUP
+	sysIPV6_LEAVE_GROUP    = C.IPV6_LEAVE_GROUP
+
+	sysIPV6_PKTINFO = C.IPV6_PKTINFO
+
+	sysIPV6_HOPLIMIT = C.IPV6_HOPLIMIT
+	sysIPV6_NEXTHOP  = C.IPV6_NEXTHOP
+	sysIPV6_HOPOPTS  = C.IPV6_HOPOPTS
+	sysIPV6_DSTOPTS  = C.IPV6_DSTOPTS
+
+	sysIPV6_RTHDR        = C.IPV6_RTHDR
+	sysIPV6_RTHDRDSTOPTS = C.IPV6_RTHDRDSTOPTS
+
+	sysIPV6_RECVPKTINFO  = C.IPV6_RECVPKTINFO
+	sysIPV6_RECVHOPLIMIT = C.IPV6_RECVHOPLIMIT
+	sysIPV6_RECVHOPOPTS  = C.IPV6_RECVHOPOPTS
+
+	sysIPV6_RECVRTHDR = C.IPV6_RECVRTHDR
+
+	sysIPV6_RECVRTHDRDSTOPTS = C.IPV6_RECVRTHDRDSTOPTS
+
+	sysIPV6_CHECKSUM        = C.IPV6_CHECKSUM
+	sysIPV6_RECVTCLASS      = C.IPV6_RECVTCLASS
+	sysIPV6_USE_MIN_MTU     = C.IPV6_USE_MIN_MTU
+	sysIPV6_DONTFRAG        = C.IPV6_DONTFRAG
+	sysIPV6_SEC_OPT         = C.IPV6_SEC_OPT
+	sysIPV6_SRC_PREFERENCES = C.IPV6_SRC_PREFERENCES
+	sysIPV6_RECVPATHMTU     = C.IPV6_RECVPATHMTU
+	sysIPV6_PATHMTU         = C.IPV6_PATHMTU
+	sysIPV6_TCLASS          = C.IPV6_TCLASS
+	sysIPV6_V6ONLY          = C.IPV6_V6ONLY
+
+	sysIPV6_RECVDSTOPTS = C.IPV6_RECVDSTOPTS
+
+	sysMCAST_JOIN_GROUP         = C.MCAST_JOIN_GROUP
+	sysMCAST_LEAVE_GROUP        = C.MCAST_LEAVE_GROUP
+	sysMCAST_BLOCK_SOURCE       = C.MCAST_BLOCK_SOURCE
+	sysMCAST_UNBLOCK_SOURCE     = C.MCAST_UNBLOCK_SOURCE
+	sysMCAST_JOIN_SOURCE_GROUP  = C.MCAST_JOIN_SOURCE_GROUP
+	sysMCAST_LEAVE_SOURCE_GROUP = C.MCAST_LEAVE_SOURCE_GROUP
+
+	sysIPV6_PREFER_SRC_HOME   = C.IPV6_PREFER_SRC_HOME
+	sysIPV6_PREFER_SRC_COA    = C.IPV6_PREFER_SRC_COA
+	sysIPV6_PREFER_SRC_PUBLIC = C.IPV6_PREFER_SRC_PUBLIC
+	sysIPV6_PREFER_SRC_TMP    = C.IPV6_PREFER_SRC_TMP
+	sysIPV6_PREFER_SRC_NONCGA = C.IPV6_PREFER_SRC_NONCGA
+	sysIPV6_PREFER_SRC_CGA    = C.IPV6_PREFER_SRC_CGA
+
+	sysIPV6_PREFER_SRC_MIPMASK    = C.IPV6_PREFER_SRC_MIPMASK
+	sysIPV6_PREFER_SRC_MIPDEFAULT = C.IPV6_PREFER_SRC_MIPDEFAULT
+	sysIPV6_PREFER_SRC_TMPMASK    = C.IPV6_PREFER_SRC_TMPMASK
+	sysIPV6_PREFER_SRC_TMPDEFAULT = C.IPV6_PREFER_SRC_TMPDEFAULT
+	sysIPV6_PREFER_SRC_CGAMASK    = C.IPV6_PREFER_SRC_CGAMASK
+	sysIPV6_PREFER_SRC_CGADEFAULT = C.IPV6_PREFER_SRC_CGADEFAULT
+
+	sysIPV6_PREFER_SRC_MASK = C.IPV6_PREFER_SRC_MASK
+
+	sysIPV6_PREFER_SRC_DEFAULT = C.IPV6_PREFER_SRC_DEFAULT
+
+	sysIPV6_BOUND_IF   = C.IPV6_BOUND_IF
+	sysIPV6_UNSPEC_SRC = C.IPV6_UNSPEC_SRC
+
+	sysICMP6_FILTER = C.ICMP6_FILTER
+
+	sizeofSockaddrStorage = C.sizeof_struct_sockaddr_storage
+	sizeofSockaddrInet6   = C.sizeof_struct_sockaddr_in6
+	sizeofInet6Pktinfo    = C.sizeof_struct_in6_pktinfo
+	sizeofIPv6Mtuinfo     = C.sizeof_struct_ip6_mtuinfo
+
+	sizeofIPv6Mreq       = C.sizeof_struct_ipv6_mreq
+	sizeofGroupReq       = C.sizeof_struct_group_req
+	sizeofGroupSourceReq = C.sizeof_struct_group_source_req
+
+	sizeofICMPv6Filter = C.sizeof_struct_icmp6_filter
+)
+
+type sockaddrStorage C.struct_sockaddr_storage
+
+type sockaddrInet6 C.struct_sockaddr_in6
+
+type inet6Pktinfo C.struct_in6_pktinfo
+
+type ipv6Mtuinfo C.struct_ip6_mtuinfo
+
+type ipv6Mreq C.struct_ipv6_mreq
+
+type groupReq C.struct_group_req
+
+type groupSourceReq C.struct_group_source_req
+
+type icmpv6Filter C.struct_icmp6_filter
diff --git a/harvester/vendor/golang.org/x/net/ipv6/dgramopt_posix.go b/harvester/vendor/golang.org/x/net/ipv6/dgramopt_posix.go
new file mode 100644
index 0000000..5714308
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/dgramopt_posix.go
@@ -0,0 +1,290 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// MulticastHopLimit returns the hop limit field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastHopLimit() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoMulticastHopLimit])
+}
+
+// SetMulticastHopLimit sets the hop limit field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoMulticastHopLimit], hoplim)
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+	if !c.ok() {
+		return nil, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return nil, err
+	}
+	return getInterface(s, &sockOpts[ssoMulticastInterface])
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInterface(s, &sockOpts[ssoMulticastInterface], ifi)
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+	if !c.ok() {
+		return false, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return false, err
+	}
+	on, err := getInt(s, &sockOpts[ssoMulticastLoopback])
+	if err != nil {
+		return false, err
+	}
+	return on == 1, nil
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoMulticastLoopback], boolint(on))
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	return setGroup(s, &sockOpts[ssoJoinGroup], ifi, grp)
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	return setGroup(s, &sockOpts[ssoLeaveGroup], ifi, grp)
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP16(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoJoinSourceGroup], ifi, grp, src)
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP16(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoLeaveSourceGroup], ifi, grp, src)
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP16(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoBlockSourceGroup], ifi, grp, src)
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	grp := netAddrToIP16(group)
+	if grp == nil {
+		return errMissingAddress
+	}
+	src := netAddrToIP16(source)
+	if src == nil {
+		return errMissingAddress
+	}
+	return setSourceGroup(s, &sockOpts[ssoUnblockSourceGroup], ifi, grp, src)
+}
+
+// Checksum reports whether the kernel will compute, store or verify a
+// checksum for both incoming and outgoing packets.  If on is true, it
+// returns an offset in bytes into the data of where the checksum
+// field is located.
+func (c *dgramOpt) Checksum() (on bool, offset int, err error) {
+	if !c.ok() {
+		return false, 0, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return false, 0, err
+	}
+	offset, err = getInt(s, &sockOpts[ssoChecksum])
+	if err != nil {
+		return false, 0, err
+	}
+	if offset < 0 {
+		return false, 0, nil
+	}
+	return true, offset, nil
+}
+
+// SetChecksum enables the kernel checksum processing.  If on is ture,
+// the offset should be an offset in bytes into the data of where the
+// checksum field is located.
+func (c *dgramOpt) SetChecksum(on bool, offset int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	if !on {
+		offset = -1
+	}
+	return setInt(s, &sockOpts[ssoChecksum], offset)
+}
+
+// ICMPFilter returns an ICMP filter.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+	if !c.ok() {
+		return nil, syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return nil, err
+	}
+	return getICMPFilter(s, &sockOpts[ssoICMPFilter])
+}
+
+// SetICMPFilter deploys the ICMP filter.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setICMPFilter(s, &sockOpts[ssoICMPFilter], f)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/dgramopt_stub.go b/harvester/vendor/golang.org/x/net/ipv6/dgramopt_stub.go
new file mode 100644
index 0000000..bc3290a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/dgramopt_stub.go
@@ -0,0 +1,119 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+import "net"
+
+// MulticastHopLimit returns the hop limit field value for outgoing
+// multicast packets.
+func (c *dgramOpt) MulticastHopLimit() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetMulticastHopLimit sets the hop limit field value for future
+// outgoing multicast packets.
+func (c *dgramOpt) SetMulticastHopLimit(hoplim int) error {
+	return errOpNoSupport
+}
+
+// MulticastInterface returns the default interface for multicast
+// packet transmissions.
+func (c *dgramOpt) MulticastInterface() (*net.Interface, error) {
+	return nil, errOpNoSupport
+}
+
+// SetMulticastInterface sets the default interface for future
+// multicast packet transmissions.
+func (c *dgramOpt) SetMulticastInterface(ifi *net.Interface) error {
+	return errOpNoSupport
+}
+
+// MulticastLoopback reports whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) MulticastLoopback() (bool, error) {
+	return false, errOpNoSupport
+}
+
+// SetMulticastLoopback sets whether transmitted multicast packets
+// should be copied and send back to the originator.
+func (c *dgramOpt) SetMulticastLoopback(on bool) error {
+	return errOpNoSupport
+}
+
+// JoinGroup joins the group address group on the interface ifi.
+// By default all sources that can cast data to group are accepted.
+// It's possible to mute and unmute data transmission from a specific
+// source by using ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup.
+// JoinGroup uses the system assigned multicast interface when ifi is
+// nil, although this is not recommended because the assignment
+// depends on platforms and sometimes it might require routing
+// configuration.
+func (c *dgramOpt) JoinGroup(ifi *net.Interface, group net.Addr) error {
+	return errOpNoSupport
+}
+
+// LeaveGroup leaves the group address group on the interface ifi
+// regardless of whether the group is any-source group or
+// source-specific group.
+func (c *dgramOpt) LeaveGroup(ifi *net.Interface, group net.Addr) error {
+	return errOpNoSupport
+}
+
+// JoinSourceSpecificGroup joins the source-specific group comprising
+// group and source on the interface ifi.
+// JoinSourceSpecificGroup uses the system assigned multicast
+// interface when ifi is nil, although this is not recommended because
+// the assignment depends on platforms and sometimes it might require
+// routing configuration.
+func (c *dgramOpt) JoinSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// LeaveSourceSpecificGroup leaves the source-specific group on the
+// interface ifi.
+func (c *dgramOpt) LeaveSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// ExcludeSourceSpecificGroup excludes the source-specific group from
+// the already joined any-source groups by JoinGroup on the interface
+// ifi.
+func (c *dgramOpt) ExcludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// IncludeSourceSpecificGroup includes the excluded source-specific
+// group by ExcludeSourceSpecificGroup again on the interface ifi.
+func (c *dgramOpt) IncludeSourceSpecificGroup(ifi *net.Interface, group, source net.Addr) error {
+	return errOpNoSupport
+}
+
+// Checksum reports whether the kernel will compute, store or verify a
+// checksum for both incoming and outgoing packets.  If on is true, it
+// returns an offset in bytes into the data of where the checksum
+// field is located.
+func (c *dgramOpt) Checksum() (on bool, offset int, err error) {
+	return false, 0, errOpNoSupport
+}
+
+// SetChecksum enables the kernel checksum processing.  If on is ture,
+// the offset should be an offset in bytes into the data of where the
+// checksum field is located.
+func (c *dgramOpt) SetChecksum(on bool, offset int) error {
+	return errOpNoSupport
+}
+
+// ICMPFilter returns an ICMP filter.
+func (c *dgramOpt) ICMPFilter() (*ICMPFilter, error) {
+	return nil, errOpNoSupport
+}
+
+// SetICMPFilter deploys the ICMP filter.
+func (c *dgramOpt) SetICMPFilter(f *ICMPFilter) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/doc.go b/harvester/vendor/golang.org/x/net/ipv6/doc.go
new file mode 100644
index 0000000..88383e9
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/doc.go
@@ -0,0 +1,243 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ipv6 implements IP-level socket options for the Internet
+// Protocol version 6.
+//
+// The package provides IP-level socket options that allow
+// manipulation of IPv6 facilities.
+//
+// The IPv6 protocol is defined in RFC 2460.
+// Socket interface extensions are defined in RFC 3493, RFC 3542 and
+// RFC 3678.
+// MLDv1 and MLDv2 are defined in RFC 2710 and RFC 3810.
+// Source-specific multicast is defined in RFC 4607.
+//
+// On Darwin, this package requires OS X Mavericks version 10.9 or
+// above, or equivalent.
+//
+//
+// Unicasting
+//
+// The options for unicasting are available for net.TCPConn,
+// net.UDPConn and net.IPConn which are created as network connections
+// that use the IPv6 transport.  When a single TCP connection carrying
+// a data flow of multiple packets needs to indicate the flow is
+// important, Conn is used to set the traffic class field on the IPv6
+// header for each packet.
+//
+//	ln, err := net.Listen("tcp6", "[::]:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer ln.Close()
+//	for {
+//		c, err := ln.Accept()
+//		if err != nil {
+//			// error handling
+//		}
+//		go func(c net.Conn) {
+//			defer c.Close()
+//
+// The outgoing packets will be labeled DiffServ assured forwarding
+// class 1 low drop precedence, known as AF11 packets.
+//
+//			if err := ipv6.NewConn(c).SetTrafficClass(0x28); err != nil {
+//				// error handling
+//			}
+//			if _, err := c.Write(data); err != nil {
+//				// error handling
+//			}
+//		}(c)
+//	}
+//
+//
+// Multicasting
+//
+// The options for multicasting are available for net.UDPConn and
+// net.IPconn which are created as network connections that use the
+// IPv6 transport.  A few network facilities must be prepared before
+// you begin multicasting, at a minimum joining network interfaces and
+// multicast groups.
+//
+//	en0, err := net.InterfaceByName("en0")
+//	if err != nil {
+//		// error handling
+//	}
+//	en1, err := net.InterfaceByIndex(911)
+//	if err != nil {
+//		// error handling
+//	}
+//	group := net.ParseIP("ff02::114")
+//
+// First, an application listens to an appropriate address with an
+// appropriate service port.
+//
+//	c, err := net.ListenPacket("udp6", "[::]:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c.Close()
+//
+// Second, the application joins multicast groups, starts listening to
+// the groups on the specified network interfaces.  Note that the
+// service port for transport layer protocol does not matter with this
+// operation as joining groups affects only network and link layer
+// protocols, such as IPv6 and Ethernet.
+//
+//	p := ipv6.NewPacketConn(c)
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: group}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en1, &net.UDPAddr{IP: group}); err != nil {
+//		// error handling
+//	}
+//
+// The application might set per packet control message transmissions
+// between the protocol stack within the kernel.  When the application
+// needs a destination address on an incoming packet,
+// SetControlMessage of PacketConn is used to enable control message
+// transmissions.
+//
+//	if err := p.SetControlMessage(ipv6.FlagDst, true); err != nil {
+//		// error handling
+//	}
+//
+// The application could identify whether the received packets are
+// of interest by using the control message that contains the
+// destination address of the received packet.
+//
+//	b := make([]byte, 1500)
+//	for {
+//		n, rcm, src, err := p.ReadFrom(b)
+//		if err != nil {
+//			// error handling
+//		}
+//		if rcm.Dst.IsMulticast() {
+//			if rcm.Dst.Equal(group) {
+//				// joined group, do something
+//			} else {
+//				// unknown group, discard
+//				continue
+//			}
+//		}
+//
+// The application can also send both unicast and multicast packets.
+//
+//		p.SetTrafficClass(0x0)
+//		p.SetHopLimit(16)
+//		if _, err := p.WriteTo(data[:n], nil, src); err != nil {
+//			// error handling
+//		}
+//		dst := &net.UDPAddr{IP: group, Port: 1024}
+//		wcm := ipv6.ControlMessage{TrafficClass: 0xe0, HopLimit: 1}
+//		for _, ifi := range []*net.Interface{en0, en1} {
+//			wcm.IfIndex = ifi.Index
+//			if _, err := p.WriteTo(data[:n], &wcm, dst); err != nil {
+//				// error handling
+//			}
+//		}
+//	}
+//
+//
+// More multicasting
+//
+// An application that uses PacketConn may join multiple multicast
+// groups.  For example, a UDP listener with port 1024 might join two
+// different groups across over two different network interfaces by
+// using:
+//
+//	c, err := net.ListenPacket("udp6", "[::]:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c.Close()
+//	p := ipv6.NewPacketConn(c)
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::1:114")}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en1, &net.UDPAddr{IP: net.ParseIP("ff02::2:114")}); err != nil {
+//		// error handling
+//	}
+//
+// It is possible for multiple UDP listeners that listen on the same
+// UDP port to join the same multicast group.  The net package will
+// provide a socket that listens to a wildcard address with reusable
+// UDP port when an appropriate multicast address prefix is passed to
+// the net.ListenPacket or net.ListenUDP.
+//
+//	c1, err := net.ListenPacket("udp6", "[ff02::]:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c1.Close()
+//	c2, err := net.ListenPacket("udp6", "[ff02::]:1024")
+//	if err != nil {
+//		// error handling
+//	}
+//	defer c2.Close()
+//	p1 := ipv6.NewPacketConn(c1)
+//	if err := p1.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+//		// error handling
+//	}
+//	p2 := ipv6.NewPacketConn(c2)
+//	if err := p2.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+//		// error handling
+//	}
+//
+// Also it is possible for the application to leave or rejoin a
+// multicast group on the network interface.
+//
+//	if err := p.LeaveGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff02::114")}); err != nil {
+//		// error handling
+//	}
+//	if err := p.JoinGroup(en0, &net.UDPAddr{IP: net.ParseIP("ff01::114")}); err != nil {
+//		// error handling
+//	}
+//
+//
+// Source-specific multicasting
+//
+// An application that uses PacketConn on MLDv2 supported platform is
+// able to join source-specific multicast groups.
+// The application may use JoinSourceSpecificGroup and
+// LeaveSourceSpecificGroup for the operation known as "include" mode,
+//
+//	ssmgroup := net.UDPAddr{IP: net.ParseIP("ff32::8000:9")}
+//	ssmsource := net.UDPAddr{IP: net.ParseIP("fe80::cafe")}
+//	if err := p.JoinSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+//		// error handling
+//	}
+//	if err := p.LeaveSourceSpecificGroup(en0, &ssmgroup, &ssmsource); err != nil {
+//		// error handling
+//	}
+//
+// or JoinGroup, ExcludeSourceSpecificGroup,
+// IncludeSourceSpecificGroup and LeaveGroup for the operation known
+// as "exclude" mode.
+//
+//	exclsource := net.UDPAddr{IP: net.ParseIP("fe80::dead")}
+//	if err := p.JoinGroup(en0, &ssmgroup); err != nil {
+//		// error handling
+//	}
+//	if err := p.ExcludeSourceSpecificGroup(en0, &ssmgroup, &exclsource); err != nil {
+//		// error handling
+//	}
+//	if err := p.LeaveGroup(en0, &ssmgroup); err != nil {
+//		// error handling
+//	}
+//
+// Note that it depends on each platform implementation what happens
+// when an application which runs on MLDv2 unsupported platform uses
+// JoinSourceSpecificGroup and LeaveSourceSpecificGroup.
+// In general the platform tries to fall back to conversations using
+// MLDv1 and starts to listen to multicast traffic.
+// In the fallback case, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup may return an error.
+package ipv6 // import "golang.org/x/net/ipv6"
+
+// BUG(mikio): This package is not implemented on NaCl and Plan 9.
diff --git a/harvester/vendor/golang.org/x/net/ipv6/endpoint.go b/harvester/vendor/golang.org/x/net/ipv6/endpoint.go
new file mode 100644
index 0000000..f6a68ab
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/endpoint.go
@@ -0,0 +1,130 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+	"time"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// BUG(mikio): On Windows, the JoinSourceSpecificGroup,
+// LeaveSourceSpecificGroup, ExcludeSourceSpecificGroup and
+// IncludeSourceSpecificGroup methods of PacketConn are not
+// implemented.
+
+// A Conn represents a network endpoint that uses IPv6 transport.
+// It allows to set basic IP-level socket options such as traffic
+// class and hop limit.
+type Conn struct {
+	genericOpt
+}
+
+type genericOpt struct {
+	net.Conn
+}
+
+func (c *genericOpt) ok() bool { return c != nil && c.Conn != nil }
+
+// PathMTU returns a path MTU value for the destination associated
+// with the endpoint.
+func (c *Conn) PathMTU() (int, error) {
+	if !c.genericOpt.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.genericOpt.Conn)
+	if err != nil {
+		return 0, err
+	}
+	_, mtu, err := getMTUInfo(s, &sockOpts[ssoPathMTU])
+	if err != nil {
+		return 0, err
+	}
+	return mtu, nil
+}
+
+// NewConn returns a new Conn.
+func NewConn(c net.Conn) *Conn {
+	return &Conn{
+		genericOpt: genericOpt{Conn: c},
+	}
+}
+
+// A PacketConn represents a packet network endpoint that uses IPv6
+// transport.  It is used to control several IP-level socket options
+// including IPv6 header manipulation.  It also provides datagram
+// based network I/O methods specific to the IPv6 and higher layer
+// protocols such as OSPF, GRE, and UDP.
+type PacketConn struct {
+	genericOpt
+	dgramOpt
+	payloadHandler
+}
+
+type dgramOpt struct {
+	net.PacketConn
+}
+
+func (c *dgramOpt) ok() bool { return c != nil && c.PacketConn != nil }
+
+// SetControlMessage allows to receive the per packet basis IP-level
+// socket options.
+func (c *PacketConn) SetControlMessage(cf ControlFlags, on bool) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.PacketSocketOf(c.dgramOpt.PacketConn)
+	if err != nil {
+		return err
+	}
+	return setControlMessage(s, &c.payloadHandler.rawOpt, cf, on)
+}
+
+// SetDeadline sets the read and write deadlines associated with the
+// endpoint.
+func (c *PacketConn) SetDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.SetDeadline(t)
+}
+
+// SetReadDeadline sets the read deadline associated with the
+// endpoint.
+func (c *PacketConn) SetReadDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.SetReadDeadline(t)
+}
+
+// SetWriteDeadline sets the write deadline associated with the
+// endpoint.
+func (c *PacketConn) SetWriteDeadline(t time.Time) error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.SetWriteDeadline(t)
+}
+
+// Close closes the endpoint.
+func (c *PacketConn) Close() error {
+	if !c.payloadHandler.ok() {
+		return syscall.EINVAL
+	}
+	return c.payloadHandler.Close()
+}
+
+// NewPacketConn returns a new PacketConn using c as its underlying
+// transport.
+func NewPacketConn(c net.PacketConn) *PacketConn {
+	return &PacketConn{
+		genericOpt:     genericOpt{Conn: c.(net.Conn)},
+		dgramOpt:       dgramOpt{PacketConn: c},
+		payloadHandler: payloadHandler{PacketConn: c},
+	}
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/gen.go b/harvester/vendor/golang.org/x/net/ipv6/gen.go
new file mode 100644
index 0000000..41886ec
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/gen.go
@@ -0,0 +1,199 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+//go:generate go run gen.go
+
+// This program generates system adaptation constants and types,
+// internet protocol constants and tables by reading template files
+// and IANA protocol registries.
+package main
+
+import (
+	"bytes"
+	"encoding/xml"
+	"fmt"
+	"go/format"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"os"
+	"os/exec"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+func main() {
+	if err := genzsys(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+	if err := geniana(); err != nil {
+		fmt.Fprintln(os.Stderr, err)
+		os.Exit(1)
+	}
+}
+
+func genzsys() error {
+	defs := "defs_" + runtime.GOOS + ".go"
+	f, err := os.Open(defs)
+	if err != nil {
+		if os.IsNotExist(err) {
+			return nil
+		}
+		return err
+	}
+	f.Close()
+	cmd := exec.Command("go", "tool", "cgo", "-godefs", defs)
+	b, err := cmd.Output()
+	if err != nil {
+		return err
+	}
+	b, err = format.Source(b)
+	if err != nil {
+		return err
+	}
+	zsys := "zsys_" + runtime.GOOS + ".go"
+	switch runtime.GOOS {
+	case "freebsd", "linux":
+		zsys = "zsys_" + runtime.GOOS + "_" + runtime.GOARCH + ".go"
+	}
+	if err := ioutil.WriteFile(zsys, b, 0644); err != nil {
+		return err
+	}
+	return nil
+}
+
+var registries = []struct {
+	url   string
+	parse func(io.Writer, io.Reader) error
+}{
+	{
+		"http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
+		parseICMPv6Parameters,
+	},
+}
+
+func geniana() error {
+	var bb bytes.Buffer
+	fmt.Fprintf(&bb, "// go generate gen.go\n")
+	fmt.Fprintf(&bb, "// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT\n\n")
+	fmt.Fprintf(&bb, "package ipv6\n\n")
+	for _, r := range registries {
+		resp, err := http.Get(r.url)
+		if err != nil {
+			return err
+		}
+		defer resp.Body.Close()
+		if resp.StatusCode != http.StatusOK {
+			return fmt.Errorf("got HTTP status code %v for %v\n", resp.StatusCode, r.url)
+		}
+		if err := r.parse(&bb, resp.Body); err != nil {
+			return err
+		}
+		fmt.Fprintf(&bb, "\n")
+	}
+	b, err := format.Source(bb.Bytes())
+	if err != nil {
+		return err
+	}
+	if err := ioutil.WriteFile("iana.go", b, 0644); err != nil {
+		return err
+	}
+	return nil
+}
+
+func parseICMPv6Parameters(w io.Writer, r io.Reader) error {
+	dec := xml.NewDecoder(r)
+	var icp icmpv6Parameters
+	if err := dec.Decode(&icp); err != nil {
+		return err
+	}
+	prs := icp.escape()
+	fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+	fmt.Fprintf(w, "const (\n")
+	for _, pr := range prs {
+		if pr.Name == "" {
+			continue
+		}
+		fmt.Fprintf(w, "ICMPType%s ICMPType = %d", pr.Name, pr.Value)
+		fmt.Fprintf(w, "// %s\n", pr.OrigName)
+	}
+	fmt.Fprintf(w, ")\n\n")
+	fmt.Fprintf(w, "// %s, Updated: %s\n", icp.Title, icp.Updated)
+	fmt.Fprintf(w, "var icmpTypes = map[ICMPType]string{\n")
+	for _, pr := range prs {
+		if pr.Name == "" {
+			continue
+		}
+		fmt.Fprintf(w, "%d: %q,\n", pr.Value, strings.ToLower(pr.OrigName))
+	}
+	fmt.Fprintf(w, "}\n")
+	return nil
+}
+
+type icmpv6Parameters struct {
+	XMLName    xml.Name `xml:"registry"`
+	Title      string   `xml:"title"`
+	Updated    string   `xml:"updated"`
+	Registries []struct {
+		Title   string `xml:"title"`
+		Records []struct {
+			Value string `xml:"value"`
+			Name  string `xml:"name"`
+		} `xml:"record"`
+	} `xml:"registry"`
+}
+
+type canonICMPv6ParamRecord struct {
+	OrigName string
+	Name     string
+	Value    int
+}
+
+func (icp *icmpv6Parameters) escape() []canonICMPv6ParamRecord {
+	id := -1
+	for i, r := range icp.Registries {
+		if strings.Contains(r.Title, "Type") || strings.Contains(r.Title, "type") {
+			id = i
+			break
+		}
+	}
+	if id < 0 {
+		return nil
+	}
+	prs := make([]canonICMPv6ParamRecord, len(icp.Registries[id].Records))
+	sr := strings.NewReplacer(
+		"Messages", "",
+		"Message", "",
+		"ICMP", "",
+		"+", "P",
+		"-", "",
+		"/", "",
+		".", "",
+		" ", "",
+	)
+	for i, pr := range icp.Registries[id].Records {
+		if strings.Contains(pr.Name, "Reserved") ||
+			strings.Contains(pr.Name, "Unassigned") ||
+			strings.Contains(pr.Name, "Deprecated") ||
+			strings.Contains(pr.Name, "Experiment") ||
+			strings.Contains(pr.Name, "experiment") {
+			continue
+		}
+		ss := strings.Split(pr.Name, "\n")
+		if len(ss) > 1 {
+			prs[i].Name = strings.Join(ss, " ")
+		} else {
+			prs[i].Name = ss[0]
+		}
+		s := strings.TrimSpace(prs[i].Name)
+		prs[i].OrigName = s
+		prs[i].Name = sr.Replace(s)
+		prs[i].Value, _ = strconv.Atoi(pr.Value)
+	}
+	return prs
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/genericopt_posix.go b/harvester/vendor/golang.org/x/net/ipv6/genericopt_posix.go
new file mode 100644
index 0000000..0a8d988
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/genericopt_posix.go
@@ -0,0 +1,64 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv6
+
+import (
+	"syscall"
+
+	"golang.org/x/net/internal/netreflect"
+)
+
+// TrafficClass returns the traffic class field value for outgoing
+// packets.
+func (c *genericOpt) TrafficClass() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoTrafficClass])
+}
+
+// SetTrafficClass sets the traffic class field value for future
+// outgoing packets.
+func (c *genericOpt) SetTrafficClass(tclass int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoTrafficClass], tclass)
+}
+
+// HopLimit returns the hop limit field value for outgoing packets.
+func (c *genericOpt) HopLimit() (int, error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return 0, err
+	}
+	return getInt(s, &sockOpts[ssoHopLimit])
+}
+
+// SetHopLimit sets the hop limit field value for future outgoing
+// packets.
+func (c *genericOpt) SetHopLimit(hoplim int) error {
+	if !c.ok() {
+		return syscall.EINVAL
+	}
+	s, err := netreflect.SocketOf(c.Conn)
+	if err != nil {
+		return err
+	}
+	return setInt(s, &sockOpts[ssoHopLimit], hoplim)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/genericopt_stub.go b/harvester/vendor/golang.org/x/net/ipv6/genericopt_stub.go
new file mode 100644
index 0000000..9dfc57d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/genericopt_stub.go
@@ -0,0 +1,30 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+// TrafficClass returns the traffic class field value for outgoing
+// packets.
+func (c *genericOpt) TrafficClass() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetTrafficClass sets the traffic class field value for future
+// outgoing packets.
+func (c *genericOpt) SetTrafficClass(tclass int) error {
+	return errOpNoSupport
+}
+
+// HopLimit returns the hop limit field value for outgoing packets.
+func (c *genericOpt) HopLimit() (int, error) {
+	return 0, errOpNoSupport
+}
+
+// SetHopLimit sets the hop limit field value for future outgoing
+// packets.
+func (c *genericOpt) SetHopLimit(hoplim int) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/header.go b/harvester/vendor/golang.org/x/net/ipv6/header.go
new file mode 100644
index 0000000..e05cb08
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/header.go
@@ -0,0 +1,55 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"encoding/binary"
+	"fmt"
+	"net"
+)
+
+const (
+	Version   = 6  // protocol version
+	HeaderLen = 40 // header length
+)
+
+// A Header represents an IPv6 base header.
+type Header struct {
+	Version      int    // protocol version
+	TrafficClass int    // traffic class
+	FlowLabel    int    // flow label
+	PayloadLen   int    // payload length
+	NextHeader   int    // next header
+	HopLimit     int    // hop limit
+	Src          net.IP // source address
+	Dst          net.IP // destination address
+}
+
+func (h *Header) String() string {
+	if h == nil {
+		return "<nil>"
+	}
+	return fmt.Sprintf("ver=%d tclass=%#x flowlbl=%#x payloadlen=%d nxthdr=%d hoplim=%d src=%v dst=%v", h.Version, h.TrafficClass, h.FlowLabel, h.PayloadLen, h.NextHeader, h.HopLimit, h.Src, h.Dst)
+}
+
+// ParseHeader parses b as an IPv6 base header.
+func ParseHeader(b []byte) (*Header, error) {
+	if len(b) < HeaderLen {
+		return nil, errHeaderTooShort
+	}
+	h := &Header{
+		Version:      int(b[0]) >> 4,
+		TrafficClass: int(b[0]&0x0f)<<4 | int(b[1])>>4,
+		FlowLabel:    int(b[1]&0x0f)<<16 | int(b[2])<<8 | int(b[3]),
+		PayloadLen:   int(binary.BigEndian.Uint16(b[4:6])),
+		NextHeader:   int(b[6]),
+		HopLimit:     int(b[7]),
+	}
+	h.Src = make(net.IP, net.IPv6len)
+	copy(h.Src, b[8:24])
+	h.Dst = make(net.IP, net.IPv6len)
+	copy(h.Dst, b[24:40])
+	return h, nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/helper.go b/harvester/vendor/golang.org/x/net/ipv6/helper.go
new file mode 100644
index 0000000..7a42e58
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/helper.go
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"unsafe"
+)
+
+var (
+	errMissingAddress  = errors.New("missing address")
+	errHeaderTooShort  = errors.New("header too short")
+	errInvalidConnType = errors.New("invalid conn type")
+	errOpNoSupport     = errors.New("operation not supported")
+	errNoSuchInterface = errors.New("no such interface")
+
+	nativeEndian binary.ByteOrder
+)
+
+func init() {
+	i := uint32(1)
+	b := (*[4]byte)(unsafe.Pointer(&i))
+	if b[0] == 1 {
+		nativeEndian = binary.LittleEndian
+	} else {
+		nativeEndian = binary.BigEndian
+	}
+}
+
+func boolint(b bool) int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func netAddrToIP16(a net.Addr) net.IP {
+	switch v := a.(type) {
+	case *net.UDPAddr:
+		if ip := v.IP.To16(); ip != nil && ip.To4() == nil {
+			return ip
+		}
+	case *net.IPAddr:
+		if ip := v.IP.To16(); ip != nil && ip.To4() == nil {
+			return ip
+		}
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/iana.go b/harvester/vendor/golang.org/x/net/ipv6/iana.go
new file mode 100644
index 0000000..3c6214f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/iana.go
@@ -0,0 +1,82 @@
+// go generate gen.go
+// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package ipv6
+
+// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07
+const (
+	ICMPTypeDestinationUnreachable                ICMPType = 1   // Destination Unreachable
+	ICMPTypePacketTooBig                          ICMPType = 2   // Packet Too Big
+	ICMPTypeTimeExceeded                          ICMPType = 3   // Time Exceeded
+	ICMPTypeParameterProblem                      ICMPType = 4   // Parameter Problem
+	ICMPTypeEchoRequest                           ICMPType = 128 // Echo Request
+	ICMPTypeEchoReply                             ICMPType = 129 // Echo Reply
+	ICMPTypeMulticastListenerQuery                ICMPType = 130 // Multicast Listener Query
+	ICMPTypeMulticastListenerReport               ICMPType = 131 // Multicast Listener Report
+	ICMPTypeMulticastListenerDone                 ICMPType = 132 // Multicast Listener Done
+	ICMPTypeRouterSolicitation                    ICMPType = 133 // Router Solicitation
+	ICMPTypeRouterAdvertisement                   ICMPType = 134 // Router Advertisement
+	ICMPTypeNeighborSolicitation                  ICMPType = 135 // Neighbor Solicitation
+	ICMPTypeNeighborAdvertisement                 ICMPType = 136 // Neighbor Advertisement
+	ICMPTypeRedirect                              ICMPType = 137 // Redirect Message
+	ICMPTypeRouterRenumbering                     ICMPType = 138 // Router Renumbering
+	ICMPTypeNodeInformationQuery                  ICMPType = 139 // ICMP Node Information Query
+	ICMPTypeNodeInformationResponse               ICMPType = 140 // ICMP Node Information Response
+	ICMPTypeInverseNeighborDiscoverySolicitation  ICMPType = 141 // Inverse Neighbor Discovery Solicitation Message
+	ICMPTypeInverseNeighborDiscoveryAdvertisement ICMPType = 142 // Inverse Neighbor Discovery Advertisement Message
+	ICMPTypeVersion2MulticastListenerReport       ICMPType = 143 // Version 2 Multicast Listener Report
+	ICMPTypeHomeAgentAddressDiscoveryRequest      ICMPType = 144 // Home Agent Address Discovery Request Message
+	ICMPTypeHomeAgentAddressDiscoveryReply        ICMPType = 145 // Home Agent Address Discovery Reply Message
+	ICMPTypeMobilePrefixSolicitation              ICMPType = 146 // Mobile Prefix Solicitation
+	ICMPTypeMobilePrefixAdvertisement             ICMPType = 147 // Mobile Prefix Advertisement
+	ICMPTypeCertificationPathSolicitation         ICMPType = 148 // Certification Path Solicitation Message
+	ICMPTypeCertificationPathAdvertisement        ICMPType = 149 // Certification Path Advertisement Message
+	ICMPTypeMulticastRouterAdvertisement          ICMPType = 151 // Multicast Router Advertisement
+	ICMPTypeMulticastRouterSolicitation           ICMPType = 152 // Multicast Router Solicitation
+	ICMPTypeMulticastRouterTermination            ICMPType = 153 // Multicast Router Termination
+	ICMPTypeFMIPv6                                ICMPType = 154 // FMIPv6 Messages
+	ICMPTypeRPLControl                            ICMPType = 155 // RPL Control Message
+	ICMPTypeILNPv6LocatorUpdate                   ICMPType = 156 // ILNPv6 Locator Update Message
+	ICMPTypeDuplicateAddressRequest               ICMPType = 157 // Duplicate Address Request
+	ICMPTypeDuplicateAddressConfirmation          ICMPType = 158 // Duplicate Address Confirmation
+	ICMPTypeMPLControl                            ICMPType = 159 // MPL Control Message
+)
+
+// Internet Control Message Protocol version 6 (ICMPv6) Parameters, Updated: 2015-07-07
+var icmpTypes = map[ICMPType]string{
+	1:   "destination unreachable",
+	2:   "packet too big",
+	3:   "time exceeded",
+	4:   "parameter problem",
+	128: "echo request",
+	129: "echo reply",
+	130: "multicast listener query",
+	131: "multicast listener report",
+	132: "multicast listener done",
+	133: "router solicitation",
+	134: "router advertisement",
+	135: "neighbor solicitation",
+	136: "neighbor advertisement",
+	137: "redirect message",
+	138: "router renumbering",
+	139: "icmp node information query",
+	140: "icmp node information response",
+	141: "inverse neighbor discovery solicitation message",
+	142: "inverse neighbor discovery advertisement message",
+	143: "version 2 multicast listener report",
+	144: "home agent address discovery request message",
+	145: "home agent address discovery reply message",
+	146: "mobile prefix solicitation",
+	147: "mobile prefix advertisement",
+	148: "certification path solicitation message",
+	149: "certification path advertisement message",
+	151: "multicast router advertisement",
+	152: "multicast router solicitation",
+	153: "multicast router termination",
+	154: "fmipv6 messages",
+	155: "rpl control message",
+	156: "ilnpv6 locator update message",
+	157: "duplicate address request",
+	158: "duplicate address confirmation",
+	159: "mpl control message",
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp.go b/harvester/vendor/golang.org/x/net/ipv6/icmp.go
new file mode 100644
index 0000000..ff21d10
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp.go
@@ -0,0 +1,60 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "golang.org/x/net/internal/iana"
+
+// BUG(mikio): On Windows, methods related to ICMPFilter are not
+// implemented.
+
+// An ICMPType represents a type of ICMP message.
+type ICMPType int
+
+func (typ ICMPType) String() string {
+	s, ok := icmpTypes[typ]
+	if !ok {
+		return "<nil>"
+	}
+	return s
+}
+
+// Protocol returns the ICMPv6 protocol number.
+func (typ ICMPType) Protocol() int {
+	return iana.ProtocolIPv6ICMP
+}
+
+// An ICMPFilter represents an ICMP message filter for incoming
+// packets. The filter belongs to a packet delivery path on a host and
+// it cannot interact with forwarding packets or tunnel-outer packets.
+//
+// Note: RFC 2460 defines a reasonable role model. A node means a
+// device that implements IP. A router means a node that forwards IP
+// packets not explicitly addressed to itself, and a host means a node
+// that is not a router.
+type ICMPFilter struct {
+	icmpv6Filter
+}
+
+// Accept accepts incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Accept(typ ICMPType) {
+	f.accept(typ)
+}
+
+// Block blocks incoming ICMP packets including the type field value
+// typ.
+func (f *ICMPFilter) Block(typ ICMPType) {
+	f.block(typ)
+}
+
+// SetAll sets the filter action to the filter.
+func (f *ICMPFilter) SetAll(block bool) {
+	f.setAll(block)
+}
+
+// WillBlock reports whether the ICMP type will be blocked.
+func (f *ICMPFilter) WillBlock(typ ICMPType) bool {
+	return f.willBlock(typ)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp_bsd.go b/harvester/vendor/golang.org/x/net/ipv6/icmp_bsd.go
new file mode 100644
index 0000000..e1a791d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp_bsd.go
@@ -0,0 +1,29 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd netbsd openbsd
+
+package ipv6
+
+func (f *icmpv6Filter) accept(typ ICMPType) {
+	f.Filt[typ>>5] |= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) block(typ ICMPType) {
+	f.Filt[typ>>5] &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) setAll(block bool) {
+	for i := range f.Filt {
+		if block {
+			f.Filt[i] = 0
+		} else {
+			f.Filt[i] = 1<<32 - 1
+		}
+	}
+}
+
+func (f *icmpv6Filter) willBlock(typ ICMPType) bool {
+	return f.Filt[typ>>5]&(1<<(uint32(typ)&31)) == 0
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp_linux.go b/harvester/vendor/golang.org/x/net/ipv6/icmp_linux.go
new file mode 100644
index 0000000..647f6b4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp_linux.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+func (f *icmpv6Filter) accept(typ ICMPType) {
+	f.Data[typ>>5] &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) block(typ ICMPType) {
+	f.Data[typ>>5] |= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) setAll(block bool) {
+	for i := range f.Data {
+		if block {
+			f.Data[i] = 1<<32 - 1
+		} else {
+			f.Data[i] = 0
+		}
+	}
+}
+
+func (f *icmpv6Filter) willBlock(typ ICMPType) bool {
+	return f.Data[typ>>5]&(1<<(uint32(typ)&31)) != 0
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp_solaris.go b/harvester/vendor/golang.org/x/net/ipv6/icmp_solaris.go
new file mode 100644
index 0000000..7c23bb1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp_solaris.go
@@ -0,0 +1,27 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+func (f *icmpv6Filter) accept(typ ICMPType) {
+	f.X__icmp6_filt[typ>>5] |= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) block(typ ICMPType) {
+	f.X__icmp6_filt[typ>>5] &^= 1 << (uint32(typ) & 31)
+}
+
+func (f *icmpv6Filter) setAll(block bool) {
+	for i := range f.X__icmp6_filt {
+		if block {
+			f.X__icmp6_filt[i] = 0
+		} else {
+			f.X__icmp6_filt[i] = 1<<32 - 1
+		}
+	}
+}
+
+func (f *icmpv6Filter) willBlock(typ ICMPType) bool {
+	return f.X__icmp6_filt[typ>>5]&(1<<(uint32(typ)&31)) == 0
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp_stub.go b/harvester/vendor/golang.org/x/net/ipv6/icmp_stub.go
new file mode 100644
index 0000000..3cd84e1
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp_stub.go
@@ -0,0 +1,23 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+type icmpv6Filter struct {
+}
+
+func (f *icmpv6Filter) accept(typ ICMPType) {
+}
+
+func (f *icmpv6Filter) block(typ ICMPType) {
+}
+
+func (f *icmpv6Filter) setAll(block bool) {
+}
+
+func (f *icmpv6Filter) willBlock(typ ICMPType) bool {
+	return false
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/icmp_windows.go b/harvester/vendor/golang.org/x/net/ipv6/icmp_windows.go
new file mode 100644
index 0000000..443cd07
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/icmp_windows.go
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+func (f *icmpv6Filter) accept(typ ICMPType) {
+	// TODO(mikio): implement this
+}
+
+func (f *icmpv6Filter) block(typ ICMPType) {
+	// TODO(mikio): implement this
+}
+
+func (f *icmpv6Filter) setAll(block bool) {
+	// TODO(mikio): implement this
+}
+
+func (f *icmpv6Filter) willBlock(typ ICMPType) bool {
+	// TODO(mikio): implement this
+	return false
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/payload.go b/harvester/vendor/golang.org/x/net/ipv6/payload.go
new file mode 100644
index 0000000..d9f8225
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/payload.go
@@ -0,0 +1,18 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import "net"
+
+// BUG(mikio): On Windows, the ControlMessage for ReadFrom and WriteTo
+// methods of PacketConn is not implemented.
+
+// A payloadHandler represents the IPv6 datagram payload handler.
+type payloadHandler struct {
+	net.PacketConn
+	rawOpt
+}
+
+func (c *payloadHandler) ok() bool { return c != nil && c.PacketConn != nil }
diff --git a/harvester/vendor/golang.org/x/net/ipv6/payload_cmsg.go b/harvester/vendor/golang.org/x/net/ipv6/payload_cmsg.go
new file mode 100644
index 0000000..3a33585
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/payload_cmsg.go
@@ -0,0 +1,70 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !nacl,!plan9,!windows
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+)
+
+// ReadFrom reads a payload of the received IPv6 datagram, from the
+// endpoint c, copying the payload into b.  It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+	if !c.ok() {
+		return 0, nil, nil, syscall.EINVAL
+	}
+	oob := newControlMessage(&c.rawOpt)
+	var oobn int
+	switch c := c.PacketConn.(type) {
+	case *net.UDPConn:
+		if n, oobn, _, src, err = c.ReadMsgUDP(b, oob); err != nil {
+			return 0, nil, nil, err
+		}
+	case *net.IPConn:
+		if n, oobn, _, src, err = c.ReadMsgIP(b, oob); err != nil {
+			return 0, nil, nil, err
+		}
+	default:
+		return 0, nil, nil, errInvalidConnType
+	}
+	if cm, err = parseControlMessage(oob[:oobn]); err != nil {
+		return 0, nil, nil, err
+	}
+	if cm != nil {
+		cm.Src = netAddrToIP16(src)
+	}
+	return
+}
+
+// WriteTo writes a payload of the IPv6 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b.  It
+// returns the number of bytes written.  The control message cm allows
+// the IPv6 header fields and the datagram path to be specified.  The
+// cm may be nil if control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	oob := marshalControlMessage(cm)
+	if dst == nil {
+		return 0, errMissingAddress
+	}
+	switch c := c.PacketConn.(type) {
+	case *net.UDPConn:
+		n, _, err = c.WriteMsgUDP(b, oob, dst.(*net.UDPAddr))
+	case *net.IPConn:
+		n, _, err = c.WriteMsgIP(b, oob, dst.(*net.IPAddr))
+	default:
+		return 0, errInvalidConnType
+	}
+	if err != nil {
+		return 0, err
+	}
+	return
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/payload_nocmsg.go b/harvester/vendor/golang.org/x/net/ipv6/payload_nocmsg.go
new file mode 100644
index 0000000..9731cba
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/payload_nocmsg.go
@@ -0,0 +1,41 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9 windows
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+)
+
+// ReadFrom reads a payload of the received IPv6 datagram, from the
+// endpoint c, copying the payload into b.  It returns the number of
+// bytes copied into b, the control message cm and the source address
+// src of the received datagram.
+func (c *payloadHandler) ReadFrom(b []byte) (n int, cm *ControlMessage, src net.Addr, err error) {
+	if !c.ok() {
+		return 0, nil, nil, syscall.EINVAL
+	}
+	if n, src, err = c.PacketConn.ReadFrom(b); err != nil {
+		return 0, nil, nil, err
+	}
+	return
+}
+
+// WriteTo writes a payload of the IPv6 datagram, to the destination
+// address dst through the endpoint c, copying the payload from b.  It
+// returns the number of bytes written.  The control message cm allows
+// the IPv6 header fields and the datagram path to be specified.  The
+// cm may be nil if control of the outgoing datagram is not required.
+func (c *payloadHandler) WriteTo(b []byte, cm *ControlMessage, dst net.Addr) (n int, err error) {
+	if !c.ok() {
+		return 0, syscall.EINVAL
+	}
+	if dst == nil {
+		return 0, errMissingAddress
+	}
+	return c.PacketConn.WriteTo(b, dst)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt.go
new file mode 100644
index 0000000..f0cfc2f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt.go
@@ -0,0 +1,46 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+// Sticky socket options
+const (
+	ssoTrafficClass        = iota // header field for unicast packet, RFC 3542
+	ssoHopLimit                   // header field for unicast packet, RFC 3493
+	ssoMulticastInterface         // outbound interface for multicast packet, RFC 3493
+	ssoMulticastHopLimit          // header field for multicast packet, RFC 3493
+	ssoMulticastLoopback          // loopback for multicast packet, RFC 3493
+	ssoReceiveTrafficClass        // header field on received packet, RFC 3542
+	ssoReceiveHopLimit            // header field on received packet, RFC 2292 or 3542
+	ssoReceivePacketInfo          // incbound or outbound packet path, RFC 2292 or 3542
+	ssoReceivePathMTU             // path mtu, RFC 3542
+	ssoPathMTU                    // path mtu, RFC 3542
+	ssoChecksum                   // packet checksum, RFC 2292 or 3542
+	ssoICMPFilter                 // icmp filter, RFC 2292 or 3542
+	ssoJoinGroup                  // any-source multicast, RFC 3493
+	ssoLeaveGroup                 // any-source multicast, RFC 3493
+	ssoJoinSourceGroup            // source-specific multicast
+	ssoLeaveSourceGroup           // source-specific multicast
+	ssoBlockSourceGroup           // any-source or source-specific multicast
+	ssoUnblockSourceGroup         // any-source or source-specific multicast
+	ssoMax
+)
+
+// Sticky socket option value types
+const (
+	ssoTypeInt = iota + 1
+	ssoTypeInterface
+	ssoTypeICMPFilter
+	ssoTypeMTUInfo
+	ssoTypeIPMreq
+	ssoTypeGroupReq
+	ssoTypeGroupSourceReq
+)
+
+// A sockOpt represents a binding for sticky socket option.
+type sockOpt struct {
+	level int // option level
+	name  int // option name, must be equal or greater than 1
+	typ   int // option value type, must be equal or greater than 1
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go
new file mode 100644
index 0000000..cd36739
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt_asmreq_posix.go
@@ -0,0 +1,22 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv6
+
+import (
+	"net"
+	"os"
+	"unsafe"
+)
+
+func setsockoptIPMreq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+	var mreq ipv6Mreq
+	copy(mreq.Multiaddr[:], grp)
+	if ifi != nil {
+		mreq.setIfindex(ifi.Index)
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&mreq), sizeofIPv6Mreq))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt_posix.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt_posix.go
new file mode 100644
index 0000000..e0a3fa6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt_posix.go
@@ -0,0 +1,122 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux netbsd openbsd solaris windows
+
+package ipv6
+
+import (
+	"net"
+	"os"
+	"unsafe"
+)
+
+func getInt(s uintptr, opt *sockOpt) (int, error) {
+	if opt.name < 1 || opt.typ != ssoTypeInt {
+		return 0, errOpNoSupport
+	}
+	var i int32
+	l := uint32(4)
+	if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil {
+		return 0, os.NewSyscallError("getsockopt", err)
+	}
+	return int(i), nil
+}
+
+func setInt(s uintptr, opt *sockOpt, v int) error {
+	if opt.name < 1 || opt.typ != ssoTypeInt {
+		return errOpNoSupport
+	}
+	i := int32(v)
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), 4))
+}
+
+func getInterface(s uintptr, opt *sockOpt) (*net.Interface, error) {
+	if opt.name < 1 || opt.typ != ssoTypeInterface {
+		return nil, errOpNoSupport
+	}
+	var i int32
+	l := uint32(4)
+	if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), &l); err != nil {
+		return nil, os.NewSyscallError("getsockopt", err)
+	}
+	if i == 0 {
+		return nil, nil
+	}
+	ifi, err := net.InterfaceByIndex(int(i))
+	if err != nil {
+		return nil, err
+	}
+	return ifi, nil
+}
+
+func setInterface(s uintptr, opt *sockOpt, ifi *net.Interface) error {
+	if opt.name < 1 || opt.typ != ssoTypeInterface {
+		return errOpNoSupport
+	}
+	var i int32
+	if ifi != nil {
+		i = int32(ifi.Index)
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&i), 4))
+}
+
+func getICMPFilter(s uintptr, opt *sockOpt) (*ICMPFilter, error) {
+	if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+		return nil, errOpNoSupport
+	}
+	var f ICMPFilter
+	l := uint32(sizeofICMPv6Filter)
+	if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&f.icmpv6Filter), &l); err != nil {
+		return nil, os.NewSyscallError("getsockopt", err)
+	}
+	return &f, nil
+}
+
+func setICMPFilter(s uintptr, opt *sockOpt, f *ICMPFilter) error {
+	if opt.name < 1 || opt.typ != ssoTypeICMPFilter {
+		return errOpNoSupport
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, unsafe.Pointer(&f.icmpv6Filter), sizeofICMPv6Filter))
+}
+
+func getMTUInfo(s uintptr, opt *sockOpt) (*net.Interface, int, error) {
+	if opt.name < 1 || opt.typ != ssoTypeMTUInfo {
+		return nil, 0, errOpNoSupport
+	}
+	var mi ipv6Mtuinfo
+	l := uint32(sizeofIPv6Mtuinfo)
+	if err := getsockopt(s, opt.level, opt.name, unsafe.Pointer(&mi), &l); err != nil {
+		return nil, 0, os.NewSyscallError("getsockopt", err)
+	}
+	if mi.Addr.Scope_id == 0 {
+		return nil, int(mi.Mtu), nil
+	}
+	ifi, err := net.InterfaceByIndex(int(mi.Addr.Scope_id))
+	if err != nil {
+		return nil, 0, err
+	}
+	return ifi, int(mi.Mtu), nil
+}
+
+func setGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+	if opt.name < 1 {
+		return errOpNoSupport
+	}
+	switch opt.typ {
+	case ssoTypeIPMreq:
+		return setsockoptIPMreq(s, opt, ifi, grp)
+	case ssoTypeGroupReq:
+		return setsockoptGroupReq(s, opt, ifi, grp)
+	default:
+		return errOpNoSupport
+	}
+}
+
+func setSourceGroup(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+	if opt.name < 1 || opt.typ != ssoTypeGroupSourceReq {
+		return errOpNoSupport
+	}
+	return setsockoptGroupSourceReq(s, opt, ifi, grp, src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go
new file mode 100644
index 0000000..1a88290
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_stub.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !darwin,!freebsd,!linux,!solaris
+
+package ipv6
+
+import "net"
+
+func setsockoptGroupReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+	return errOpNoSupport
+}
+
+func setsockoptGroupSourceReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+	return errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go
new file mode 100644
index 0000000..f3668ae
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt_ssmreq_unix.go
@@ -0,0 +1,59 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux solaris
+
+package ipv6
+
+import (
+	"net"
+	"os"
+	"unsafe"
+)
+
+var freebsd32o64 bool
+
+func setsockoptGroupReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp net.IP) error {
+	var gr groupReq
+	if ifi != nil {
+		gr.Interface = uint32(ifi.Index)
+	}
+	gr.setGroup(grp)
+	var p unsafe.Pointer
+	var l uint32
+	if freebsd32o64 {
+		var d [sizeofGroupReq + 4]byte
+		s := (*[sizeofGroupReq]byte)(unsafe.Pointer(&gr))
+		copy(d[:4], s[:4])
+		copy(d[8:], s[4:])
+		p = unsafe.Pointer(&d[0])
+		l = sizeofGroupReq + 4
+	} else {
+		p = unsafe.Pointer(&gr)
+		l = sizeofGroupReq
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, p, l))
+}
+
+func setsockoptGroupSourceReq(s uintptr, opt *sockOpt, ifi *net.Interface, grp, src net.IP) error {
+	var gsr groupSourceReq
+	if ifi != nil {
+		gsr.Interface = uint32(ifi.Index)
+	}
+	gsr.setSourceGroup(grp, src)
+	var p unsafe.Pointer
+	var l uint32
+	if freebsd32o64 {
+		var d [sizeofGroupSourceReq + 4]byte
+		s := (*[sizeofGroupSourceReq]byte)(unsafe.Pointer(&gsr))
+		copy(d[:4], s[:4])
+		copy(d[8:], s[4:])
+		p = unsafe.Pointer(&d[0])
+		l = sizeofGroupSourceReq + 4
+	} else {
+		p = unsafe.Pointer(&gsr)
+		l = sizeofGroupSourceReq
+	}
+	return os.NewSyscallError("setsockopt", setsockopt(s, opt.level, opt.name, p, l))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sockopt_stub.go b/harvester/vendor/golang.org/x/net/ipv6/sockopt_stub.go
new file mode 100644
index 0000000..6d59a00
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sockopt_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+import "net"
+
+func getMTUInfo(s uintptr, opt *sockOpt) (*net.Interface, int, error) {
+	return nil, 0, errOpNoSupport
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_bsd.go b/harvester/vendor/golang.org/x/net/ipv6/sys_bsd.go
new file mode 100644
index 0000000..c22f8ac
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_bsd.go
@@ -0,0 +1,56 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build dragonfly netbsd openbsd
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+		ctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+		ctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+		ctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},
+		ctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTrafficClass:        {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+		ssoHopLimit:            {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+		ssoReceiveHopLimit:     {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+		ssoReceivePacketInfo:   {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+		ssoReceivePathMTU:      {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+		ssoPathMTU:             {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+		ssoChecksum:            {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+		ssoICMPFilter:          {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:           {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+		ssoLeaveGroup:          {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+	}
+)
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (pi *inet6Pktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Interface = uint32(i)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_darwin.go b/harvester/vendor/golang.org/x/net/ipv6/sys_darwin.go
new file mode 100644
index 0000000..ffcc9d4
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_darwin.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlHopLimit:   {sysIPV6_2292HOPLIMIT, 4, marshal2292HopLimit, parseHopLimit},
+		ctlPacketInfo: {sysIPV6_2292PKTINFO, sizeofInet6Pktinfo, marshal2292PacketInfo, parsePacketInfo},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoHopLimit:           {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveHopLimit:    {iana.ProtocolIPv6, sysIPV6_2292HOPLIMIT, ssoTypeInt},
+		ssoReceivePacketInfo:  {iana.ProtocolIPv6, sysIPV6_2292PKTINFO, ssoTypeInt},
+		ssoChecksum:           {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+		ssoICMPFilter:         {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:          {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+	}
+)
+
+func init() {
+	// Seems like kern.osreldate is veiled on latest OS X. We use
+	// kern.osrelease instead.
+	s, err := syscall.Sysctl("kern.osrelease")
+	if err != nil {
+		return
+	}
+	ss := strings.Split(s, ".")
+	if len(ss) == 0 {
+		return
+	}
+	// The IP_PKTINFO and protocol-independent multicast API were
+	// introduced in OS X 10.7 (Darwin 11). But it looks like
+	// those features require OS X 10.8 (Darwin 12) or above.
+	// See http://support.apple.com/kb/HT1633.
+	if mjver, err := strconv.Atoi(ss[0]); err != nil || mjver < 12 {
+		return
+	}
+	ctlOpts[ctlTrafficClass] = ctlOpt{sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass}
+	ctlOpts[ctlHopLimit] = ctlOpt{sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit}
+	ctlOpts[ctlPacketInfo] = ctlOpt{sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo}
+	ctlOpts[ctlNextHop] = ctlOpt{sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop}
+	ctlOpts[ctlPathMTU] = ctlOpt{sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU}
+	sockOpts[ssoTrafficClass] = sockOpt{iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt}
+	sockOpts[ssoReceiveTrafficClass] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt}
+	sockOpts[ssoReceiveHopLimit] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt}
+	sockOpts[ssoReceivePacketInfo] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt}
+	sockOpts[ssoReceivePathMTU] = sockOpt{iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt}
+	sockOpts[ssoPathMTU] = sockOpt{iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo}
+	sockOpts[ssoJoinGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq}
+	sockOpts[ssoLeaveGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq}
+	sockOpts[ssoJoinSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq}
+	sockOpts[ssoLeaveSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq}
+	sockOpts[ssoBlockSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq}
+	sockOpts[ssoUnblockSourceGroup] = sockOpt{iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq}
+}
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (pi *inet6Pktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Interface = uint32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 132))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_freebsd.go b/harvester/vendor/golang.org/x/net/ipv6/sys_freebsd.go
new file mode 100644
index 0000000..fd5204b
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_freebsd.go
@@ -0,0 +1,91 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"runtime"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+		ctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+		ctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+		ctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},
+		ctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTrafficClass:        {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+		ssoHopLimit:            {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+		ssoReceiveHopLimit:     {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+		ssoReceivePacketInfo:   {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+		ssoReceivePathMTU:      {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+		ssoPathMTU:             {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+		ssoChecksum:            {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+		ssoICMPFilter:          {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:           {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:          {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:     {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:    {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:    {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup:  {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func init() {
+	if runtime.GOOS == "freebsd" && runtime.GOARCH == "386" {
+		archs, _ := syscall.Sysctl("kern.supported_archs")
+		for _, s := range strings.Fields(archs) {
+			if s == "amd64" {
+				freebsd32o64 = true
+				break
+			}
+		}
+	}
+}
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (pi *inet6Pktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Interface = uint32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source))
+	sa.Len = sizeofSockaddrInet6
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_linux.go b/harvester/vendor/golang.org/x/net/ipv6/sys_linux.go
new file mode 100644
index 0000000..42f5f78
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_linux.go
@@ -0,0 +1,72 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+		ctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+		ctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+		ctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTrafficClass:        {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+		ssoHopLimit:            {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+		ssoReceiveHopLimit:     {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+		ssoReceivePacketInfo:   {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+		ssoReceivePathMTU:      {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+		ssoPathMTU:             {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+		ssoChecksum:            {iana.ProtocolReserved, sysIPV6_CHECKSUM, ssoTypeInt},
+		ssoICMPFilter:          {iana.ProtocolIPv6ICMP, sysICMPV6_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:           {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:          {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:     {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:    {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:    {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup:  {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (pi *inet6Pktinfo) setIfindex(i int) {
+	pi.Ifindex = int32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Ifindex = int32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(&gr.Group))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(&gsr.Group))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet6)(unsafe.Pointer(&gsr.Source))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_linux_386.s b/harvester/vendor/golang.org/x/net/ipv6/sys_linux_386.s
new file mode 100644
index 0000000..b85551a
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_linux_386.s
@@ -0,0 +1,8 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT	·socketcall(SB),NOSPLIT,$0-36
+	JMP	syscall·socketcall(SB)
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_solaris.go b/harvester/vendor/golang.org/x/net/ipv6/sys_solaris.go
new file mode 100644
index 0000000..9bd2d66
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_solaris.go
@@ -0,0 +1,73 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+	"unsafe"
+
+	"golang.org/x/net/internal/iana"
+)
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{
+		ctlTrafficClass: {sysIPV6_TCLASS, 4, marshalTrafficClass, parseTrafficClass},
+		ctlHopLimit:     {sysIPV6_HOPLIMIT, 4, marshalHopLimit, parseHopLimit},
+		ctlPacketInfo:   {sysIPV6_PKTINFO, sizeofInet6Pktinfo, marshalPacketInfo, parsePacketInfo},
+		ctlNextHop:      {sysIPV6_NEXTHOP, sizeofSockaddrInet6, marshalNextHop, parseNextHop},
+		ctlPathMTU:      {sysIPV6_PATHMTU, sizeofIPv6Mtuinfo, marshalPathMTU, parsePathMTU},
+	}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoTrafficClass:        {iana.ProtocolIPv6, sysIPV6_TCLASS, ssoTypeInt},
+		ssoHopLimit:            {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:   {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoReceiveTrafficClass: {iana.ProtocolIPv6, sysIPV6_RECVTCLASS, ssoTypeInt},
+		ssoReceiveHopLimit:     {iana.ProtocolIPv6, sysIPV6_RECVHOPLIMIT, ssoTypeInt},
+		ssoReceivePacketInfo:   {iana.ProtocolIPv6, sysIPV6_RECVPKTINFO, ssoTypeInt},
+		ssoReceivePathMTU:      {iana.ProtocolIPv6, sysIPV6_RECVPATHMTU, ssoTypeInt},
+		ssoPathMTU:             {iana.ProtocolIPv6, sysIPV6_PATHMTU, ssoTypeMTUInfo},
+		ssoChecksum:            {iana.ProtocolIPv6, sysIPV6_CHECKSUM, ssoTypeInt},
+		ssoICMPFilter:          {iana.ProtocolIPv6ICMP, sysICMP6_FILTER, ssoTypeICMPFilter},
+		ssoJoinGroup:           {iana.ProtocolIPv6, sysMCAST_JOIN_GROUP, ssoTypeGroupReq},
+		ssoLeaveGroup:          {iana.ProtocolIPv6, sysMCAST_LEAVE_GROUP, ssoTypeGroupReq},
+		ssoJoinSourceGroup:     {iana.ProtocolIPv6, sysMCAST_JOIN_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoLeaveSourceGroup:    {iana.ProtocolIPv6, sysMCAST_LEAVE_SOURCE_GROUP, ssoTypeGroupSourceReq},
+		ssoBlockSourceGroup:    {iana.ProtocolIPv6, sysMCAST_BLOCK_SOURCE, ssoTypeGroupSourceReq},
+		ssoUnblockSourceGroup:  {iana.ProtocolIPv6, sysMCAST_UNBLOCK_SOURCE, ssoTypeGroupSourceReq},
+	}
+)
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (pi *inet6Pktinfo) setIfindex(i int) {
+	pi.Ifindex = uint32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Interface = uint32(i)
+}
+
+func (gr *groupReq) setGroup(grp net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gr)) + 4))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+}
+
+func (gsr *groupSourceReq) setSourceGroup(grp, src net.IP) {
+	sa := (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 4))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], grp)
+	sa = (*sockaddrInet6)(unsafe.Pointer(uintptr(unsafe.Pointer(gsr)) + 260))
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], src)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_solaris_amd64.s b/harvester/vendor/golang.org/x/net/ipv6/sys_solaris_amd64.s
new file mode 100644
index 0000000..39d76af
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_solaris_amd64.s
@@ -0,0 +1,8 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·sysvicall6(SB),NOSPLIT,$0-88
+	JMP	syscall·sysvicall6(SB)
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_stub.go b/harvester/vendor/golang.org/x/net/ipv6/sys_stub.go
new file mode 100644
index 0000000..7663bfc
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_stub.go
@@ -0,0 +1,13 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build nacl plan9
+
+package ipv6
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{}
+
+	sockOpts = [ssoMax]sockOpt{}
+)
diff --git a/harvester/vendor/golang.org/x/net/ipv6/sys_windows.go b/harvester/vendor/golang.org/x/net/ipv6/sys_windows.go
new file mode 100644
index 0000000..003c507
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/sys_windows.go
@@ -0,0 +1,74 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"net"
+	"syscall"
+
+	"golang.org/x/net/internal/iana"
+)
+
+const (
+	// See ws2tcpip.h.
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PKTINFO        = 0x13
+
+	sizeofSockaddrInet6 = 0x1c
+
+	sizeofIPv6Mreq     = 0x14
+	sizeofIPv6Mtuinfo  = 0x20
+	sizeofICMPv6Filter = 0
+)
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type icmpv6Filter struct {
+	// TODO(mikio): implement this
+}
+
+var (
+	ctlOpts = [ctlMax]ctlOpt{}
+
+	sockOpts = [ssoMax]sockOpt{
+		ssoHopLimit:           {iana.ProtocolIPv6, sysIPV6_UNICAST_HOPS, ssoTypeInt},
+		ssoMulticastInterface: {iana.ProtocolIPv6, sysIPV6_MULTICAST_IF, ssoTypeInterface},
+		ssoMulticastHopLimit:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_HOPS, ssoTypeInt},
+		ssoMulticastLoopback:  {iana.ProtocolIPv6, sysIPV6_MULTICAST_LOOP, ssoTypeInt},
+		ssoJoinGroup:          {iana.ProtocolIPv6, sysIPV6_JOIN_GROUP, ssoTypeIPMreq},
+		ssoLeaveGroup:         {iana.ProtocolIPv6, sysIPV6_LEAVE_GROUP, ssoTypeIPMreq},
+	}
+)
+
+func (sa *sockaddrInet6) setSockaddr(ip net.IP, i int) {
+	sa.Family = syscall.AF_INET6
+	copy(sa.Addr[:], ip)
+	sa.Scope_id = uint32(i)
+}
+
+func (mreq *ipv6Mreq) setIfindex(i int) {
+	mreq.Interface = uint32(i)
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/syscall_linux_386.go b/harvester/vendor/golang.org/x/net/ipv6/syscall_linux_386.go
new file mode 100644
index 0000000..5184dbe
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/syscall_linux_386.go
@@ -0,0 +1,31 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const (
+	sysGETSOCKOPT = 0xf
+	sysSETSOCKOPT = 0xe
+)
+
+func socketcall(call int, a0, a1, a2, a3, a4, a5 uintptr) (int, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	if _, errno := socketcall(sysGETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, errno := socketcall(sysSETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/syscall_solaris.go b/harvester/vendor/golang.org/x/net/ipv6/syscall_solaris.go
new file mode 100644
index 0000000..2a5c8ee
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/syscall_solaris.go
@@ -0,0 +1,38 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+//go:cgo_import_dynamic libc___xnet_getsockopt __xnet_getsockopt "libsocket.so"
+//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
+
+//go:linkname procGetsockopt libc___xnet_getsockopt
+//go:linkname procSetsockopt libc_setsockopt
+
+var (
+	procGetsockopt uintptr
+	procSetsockopt uintptr
+)
+
+func sysvicall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (uintptr, uintptr, syscall.Errno)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	_, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procGetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0)
+	if errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, _, errno := sysvicall6(uintptr(unsafe.Pointer(&procSetsockopt)), 5, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/syscall_unix.go b/harvester/vendor/golang.org/x/net/ipv6/syscall_unix.go
new file mode 100644
index 0000000..58a75b5
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/syscall_unix.go
@@ -0,0 +1,26 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin dragonfly freebsd linux,!386 netbsd openbsd
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	if _, _, errno := syscall.Syscall6(syscall.SYS_GETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(unsafe.Pointer(l)), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	if _, _, errno := syscall.Syscall6(syscall.SYS_SETSOCKOPT, s, uintptr(level), uintptr(name), uintptr(v), uintptr(l), 0); errno != 0 {
+		return error(errno)
+	}
+	return nil
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/syscall_windows.go b/harvester/vendor/golang.org/x/net/ipv6/syscall_windows.go
new file mode 100644
index 0000000..c1f649d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/syscall_windows.go
@@ -0,0 +1,18 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ipv6
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+func getsockopt(s uintptr, level, name int, v unsafe.Pointer, l *uint32) error {
+	return syscall.Getsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), (*int32)(unsafe.Pointer(l)))
+}
+
+func setsockopt(s uintptr, level, name int, v unsafe.Pointer, l uint32) error {
+	return syscall.Setsockopt(syscall.Handle(s), int32(level), int32(name), (*byte)(v), int32(l))
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_darwin.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_darwin.go
new file mode 100644
index 0000000..6aab1df
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_darwin.go
@@ -0,0 +1,131 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_darwin.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+
+	sysIPV6_PORTRANGE    = 0xe
+	sysICMP6_FILTER      = 0x12
+	sysIPV6_2292PKTINFO  = 0x13
+	sysIPV6_2292HOPLIMIT = 0x14
+	sysIPV6_2292NEXTHOP  = 0x15
+	sysIPV6_2292HOPOPTS  = 0x16
+	sysIPV6_2292DSTOPTS  = 0x17
+	sysIPV6_2292RTHDR    = 0x18
+
+	sysIPV6_2292PKTOPTIONS = 0x19
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RECVTCLASS = 0x23
+	sysIPV6_TCLASS     = 0x24
+
+	sysIPV6_RTHDRDSTOPTS = 0x39
+
+	sysIPV6_RECVPKTINFO = 0x3d
+
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PREFER_TEMPADDR = 0x3f
+
+	sysIPV6_MSFILTER            = 0x4a
+	sysMCAST_JOIN_GROUP         = 0x50
+	sysMCAST_LEAVE_GROUP        = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+	sysMCAST_BLOCK_SOURCE       = 0x54
+	sysMCAST_UNBLOCK_SOURCE     = 0x55
+
+	sysIPV6_BOUND_IF = 0x7d
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet6   = 0x1c
+	sizeofInet6Pktinfo    = 0x14
+	sizeofIPv6Mtuinfo     = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [128]byte
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [128]byte
+	Pad_cgo_1 [128]byte
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go
new file mode 100644
index 0000000..d2de804
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_dragonfly.go
@@ -0,0 +1,88 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_dragonfly.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_RECVTCLASS = 0x39
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PREFER_TEMPADDR = 0x3f
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrInet6 = 0x1c
+	sizeofInet6Pktinfo  = 0x14
+	sizeofIPv6Mtuinfo   = 0x20
+
+	sizeofIPv6Mreq = 0x14
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go
new file mode 100644
index 0000000..919e572
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_386.go
@@ -0,0 +1,122 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_RECVTCLASS = 0x39
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PREFER_TEMPADDR = 0x3f
+
+	sysIPV6_BINDANY = 0x40
+
+	sysIPV6_MSFILTER = 0x4a
+
+	sysMCAST_JOIN_GROUP         = 0x50
+	sysMCAST_LEAVE_GROUP        = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+	sysMCAST_BLOCK_SOURCE       = 0x54
+	sysMCAST_UNBLOCK_SOURCE     = 0x55
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet6   = 0x1c
+	sizeofInet6Pktinfo    = 0x14
+	sizeofIPv6Mtuinfo     = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go
new file mode 100644
index 0000000..cb8141f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_amd64.go
@@ -0,0 +1,124 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_RECVTCLASS = 0x39
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PREFER_TEMPADDR = 0x3f
+
+	sysIPV6_BINDANY = 0x40
+
+	sysIPV6_MSFILTER = 0x4a
+
+	sysMCAST_JOIN_GROUP         = 0x50
+	sysMCAST_LEAVE_GROUP        = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+	sysMCAST_BLOCK_SOURCE       = 0x54
+	sysMCAST_UNBLOCK_SOURCE     = 0x55
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet6   = 0x1c
+	sizeofInet6Pktinfo    = 0x14
+	sizeofIPv6Mtuinfo     = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go
new file mode 100644
index 0000000..cb8141f
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_freebsd_arm.go
@@ -0,0 +1,124 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_freebsd.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_RECVTCLASS = 0x39
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PREFER_TEMPADDR = 0x3f
+
+	sysIPV6_BINDANY = 0x40
+
+	sysIPV6_MSFILTER = 0x4a
+
+	sysMCAST_JOIN_GROUP         = 0x50
+	sysMCAST_LEAVE_GROUP        = 0x51
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x52
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x53
+	sysMCAST_BLOCK_SOURCE       = 0x54
+	sysMCAST_UNBLOCK_SOURCE     = 0x55
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrStorage = 0x80
+	sizeofSockaddrInet6   = 0x1c
+	sizeofInet6Pktinfo    = 0x14
+	sizeofIPv6Mtuinfo     = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrStorage struct {
+	Len         uint8
+	Family      uint8
+	X__ss_pad1  [6]int8
+	X__ss_align int64
+	X__ss_pad2  [112]int8
+}
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     sockaddrStorage
+	Source    sockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_386.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_386.go
new file mode 100644
index 0000000..f5a4109
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_386.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_amd64.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go
new file mode 100644
index 0000000..f5a4109
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_arm64.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go
new file mode 100644
index 0000000..f5a4109
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mips64le.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go
new file mode 100644
index 0000000..f5a4109
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_mipsle.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go
new file mode 100644
index 0000000..be2dbd6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc.go
@@ -0,0 +1,168 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x84
+	sizeofGroupSourceReq = 0x104
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]uint8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [2]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_ppc64le.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go
new file mode 100644
index 0000000..f9376b6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_linux_s390x.go
@@ -0,0 +1,170 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_linux.go
+
+package ipv6
+
+const (
+	sysIPV6_ADDRFORM       = 0x1
+	sysIPV6_2292PKTINFO    = 0x2
+	sysIPV6_2292HOPOPTS    = 0x3
+	sysIPV6_2292DSTOPTS    = 0x4
+	sysIPV6_2292RTHDR      = 0x5
+	sysIPV6_2292PKTOPTIONS = 0x6
+	sysIPV6_CHECKSUM       = 0x7
+	sysIPV6_2292HOPLIMIT   = 0x8
+	sysIPV6_NEXTHOP        = 0x9
+	sysIPV6_FLOWINFO       = 0xb
+
+	sysIPV6_UNICAST_HOPS        = 0x10
+	sysIPV6_MULTICAST_IF        = 0x11
+	sysIPV6_MULTICAST_HOPS      = 0x12
+	sysIPV6_MULTICAST_LOOP      = 0x13
+	sysIPV6_ADD_MEMBERSHIP      = 0x14
+	sysIPV6_DROP_MEMBERSHIP     = 0x15
+	sysMCAST_JOIN_GROUP         = 0x2a
+	sysMCAST_LEAVE_GROUP        = 0x2d
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2e
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2f
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_MSFILTER           = 0x30
+	sysIPV6_ROUTER_ALERT        = 0x16
+	sysIPV6_MTU_DISCOVER        = 0x17
+	sysIPV6_MTU                 = 0x18
+	sysIPV6_RECVERR             = 0x19
+	sysIPV6_V6ONLY              = 0x1a
+	sysIPV6_JOIN_ANYCAST        = 0x1b
+	sysIPV6_LEAVE_ANYCAST       = 0x1c
+
+	sysIPV6_FLOWLABEL_MGR = 0x20
+	sysIPV6_FLOWINFO_SEND = 0x21
+
+	sysIPV6_IPSEC_POLICY = 0x22
+	sysIPV6_XFRM_POLICY  = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x31
+	sysIPV6_PKTINFO      = 0x32
+	sysIPV6_RECVHOPLIMIT = 0x33
+	sysIPV6_HOPLIMIT     = 0x34
+	sysIPV6_RECVHOPOPTS  = 0x35
+	sysIPV6_HOPOPTS      = 0x36
+	sysIPV6_RTHDRDSTOPTS = 0x37
+	sysIPV6_RECVRTHDR    = 0x38
+	sysIPV6_RTHDR        = 0x39
+	sysIPV6_RECVDSTOPTS  = 0x3a
+	sysIPV6_DSTOPTS      = 0x3b
+	sysIPV6_RECVPATHMTU  = 0x3c
+	sysIPV6_PATHMTU      = 0x3d
+	sysIPV6_DONTFRAG     = 0x3e
+
+	sysIPV6_RECVTCLASS = 0x42
+	sysIPV6_TCLASS     = 0x43
+
+	sysIPV6_ADDR_PREFERENCES = 0x48
+
+	sysIPV6_PREFER_SRC_TMP            = 0x1
+	sysIPV6_PREFER_SRC_PUBLIC         = 0x2
+	sysIPV6_PREFER_SRC_PUBTMP_DEFAULT = 0x100
+	sysIPV6_PREFER_SRC_COA            = 0x4
+	sysIPV6_PREFER_SRC_HOME           = 0x400
+	sysIPV6_PREFER_SRC_CGA            = 0x8
+	sysIPV6_PREFER_SRC_NONCGA         = 0x800
+
+	sysIPV6_MINHOPCOUNT = 0x49
+
+	sysIPV6_ORIGDSTADDR     = 0x4a
+	sysIPV6_RECVORIGDSTADDR = 0x4a
+	sysIPV6_TRANSPARENT     = 0x4b
+	sysIPV6_UNICAST_IF      = 0x4c
+
+	sysICMPV6_FILTER = 0x1
+
+	sysICMPV6_FILTER_BLOCK       = 0x1
+	sysICMPV6_FILTER_PASS        = 0x2
+	sysICMPV6_FILTER_BLOCKOTHERS = 0x3
+	sysICMPV6_FILTER_PASSONLY    = 0x4
+
+	sysSOL_SOCKET       = 0x1
+	sysSO_ATTACH_FILTER = 0x1a
+
+	sizeofKernelSockaddrStorage = 0x80
+	sizeofSockaddrInet6         = 0x1c
+	sizeofInet6Pktinfo          = 0x14
+	sizeofIPv6Mtuinfo           = 0x20
+	sizeofIPv6FlowlabelReq      = 0x20
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x88
+	sizeofGroupSourceReq = 0x108
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type kernelSockaddrStorage struct {
+	Family  uint16
+	X__data [126]int8
+}
+
+type sockaddrInet6 struct {
+	Family   uint16
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex int32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6FlowlabelReq struct {
+	Dst        [16]byte /* in6_addr */
+	Label      uint32
+	Action     uint8
+	Share      uint8
+	Flags      uint16
+	Expires    uint16
+	Linger     uint16
+	X__flr_pad uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Ifindex   int32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [4]byte
+	Group     kernelSockaddrStorage
+	Source    kernelSockaddrStorage
+}
+
+type icmpv6Filter struct {
+	Data [8]uint32
+}
+
+type sockFProg struct {
+	Len       uint16
+	Pad_cgo_0 [6]byte
+	Filter    *sockFilter
+}
+
+type sockFilter struct {
+	Code uint16
+	Jt   uint8
+	Jf   uint8
+	K    uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_netbsd.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_netbsd.go
new file mode 100644
index 0000000..bcada13
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_netbsd.go
@@ -0,0 +1,84 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_netbsd.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_IPSEC_POLICY = 0x1c
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+	sysIPV6_PATHMTU     = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_RECVTCLASS = 0x39
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrInet6 = 0x1c
+	sizeofInet6Pktinfo  = 0x14
+	sizeofIPv6Mtuinfo   = 0x20
+
+	sizeofIPv6Mreq = 0x14
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_openbsd.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_openbsd.go
new file mode 100644
index 0000000..86cf3c6
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_openbsd.go
@@ -0,0 +1,93 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_openbsd.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x4
+	sysIPV6_MULTICAST_IF   = 0x9
+	sysIPV6_MULTICAST_HOPS = 0xa
+	sysIPV6_MULTICAST_LOOP = 0xb
+	sysIPV6_JOIN_GROUP     = 0xc
+	sysIPV6_LEAVE_GROUP    = 0xd
+	sysIPV6_PORTRANGE      = 0xe
+	sysICMP6_FILTER        = 0x12
+
+	sysIPV6_CHECKSUM = 0x1a
+	sysIPV6_V6ONLY   = 0x1b
+
+	sysIPV6_RTHDRDSTOPTS = 0x23
+
+	sysIPV6_RECVPKTINFO  = 0x24
+	sysIPV6_RECVHOPLIMIT = 0x25
+	sysIPV6_RECVRTHDR    = 0x26
+	sysIPV6_RECVHOPOPTS  = 0x27
+	sysIPV6_RECVDSTOPTS  = 0x28
+
+	sysIPV6_USE_MIN_MTU = 0x2a
+	sysIPV6_RECVPATHMTU = 0x2b
+
+	sysIPV6_PATHMTU = 0x2c
+
+	sysIPV6_PKTINFO  = 0x2e
+	sysIPV6_HOPLIMIT = 0x2f
+	sysIPV6_NEXTHOP  = 0x30
+	sysIPV6_HOPOPTS  = 0x31
+	sysIPV6_DSTOPTS  = 0x32
+	sysIPV6_RTHDR    = 0x33
+
+	sysIPV6_AUTH_LEVEL        = 0x35
+	sysIPV6_ESP_TRANS_LEVEL   = 0x36
+	sysIPV6_ESP_NETWORK_LEVEL = 0x37
+	sysIPSEC6_OUTSA           = 0x38
+	sysIPV6_RECVTCLASS        = 0x39
+
+	sysIPV6_AUTOFLOWLABEL = 0x3b
+	sysIPV6_IPCOMP_LEVEL  = 0x3c
+
+	sysIPV6_TCLASS   = 0x3d
+	sysIPV6_DONTFRAG = 0x3e
+	sysIPV6_PIPEX    = 0x3f
+
+	sysIPV6_RTABLE = 0x1021
+
+	sysIPV6_PORTRANGE_DEFAULT = 0x0
+	sysIPV6_PORTRANGE_HIGH    = 0x1
+	sysIPV6_PORTRANGE_LOW     = 0x2
+
+	sizeofSockaddrInet6 = 0x1c
+	sizeofInet6Pktinfo  = 0x14
+	sizeofIPv6Mtuinfo   = 0x20
+
+	sizeofIPv6Mreq = 0x14
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrInet6 struct {
+	Len      uint8
+	Family   uint8
+	Port     uint16
+	Flowinfo uint32
+	Addr     [16]byte /* in6_addr */
+	Scope_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type icmpv6Filter struct {
+	Filt [8]uint32
+}
diff --git a/harvester/vendor/golang.org/x/net/ipv6/zsys_solaris.go b/harvester/vendor/golang.org/x/net/ipv6/zsys_solaris.go
new file mode 100644
index 0000000..cf1837d
--- /dev/null
+++ b/harvester/vendor/golang.org/x/net/ipv6/zsys_solaris.go
@@ -0,0 +1,131 @@
+// Created by cgo -godefs - DO NOT EDIT
+// cgo -godefs defs_solaris.go
+
+package ipv6
+
+const (
+	sysIPV6_UNICAST_HOPS   = 0x5
+	sysIPV6_MULTICAST_IF   = 0x6
+	sysIPV6_MULTICAST_HOPS = 0x7
+	sysIPV6_MULTICAST_LOOP = 0x8
+	sysIPV6_JOIN_GROUP     = 0x9
+	sysIPV6_LEAVE_GROUP    = 0xa
+
+	sysIPV6_PKTINFO = 0xb
+
+	sysIPV6_HOPLIMIT = 0xc
+	sysIPV6_NEXTHOP  = 0xd
+	sysIPV6_HOPOPTS  = 0xe
+	sysIPV6_DSTOPTS  = 0xf
+
+	sysIPV6_RTHDR        = 0x10
+	sysIPV6_RTHDRDSTOPTS = 0x11
+
+	sysIPV6_RECVPKTINFO  = 0x12
+	sysIPV6_RECVHOPLIMIT = 0x13
+	sysIPV6_RECVHOPOPTS  = 0x14
+
+	sysIPV6_RECVRTHDR = 0x16
+
+	sysIPV6_RECVRTHDRDSTOPTS = 0x17
+
+	sysIPV6_CHECKSUM        = 0x18
+	sysIPV6_RECVTCLASS      = 0x19
+	sysIPV6_USE_MIN_MTU     = 0x20
+	sysIPV6_DONTFRAG        = 0x21
+	sysIPV6_SEC_OPT         = 0x22
+	sysIPV6_SRC_PREFERENCES = 0x23
+	sysIPV6_RECVPATHMTU     = 0x24
+	sysIPV6_PATHMTU         = 0x25
+	sysIPV6_TCLASS          = 0x26
+	sysIPV6_V6ONLY          = 0x27
+
+	sysIPV6_RECVDSTOPTS = 0x28
+
+	sysMCAST_JOIN_GROUP         = 0x29
+	sysMCAST_LEAVE_GROUP        = 0x2a
+	sysMCAST_BLOCK_SOURCE       = 0x2b
+	sysMCAST_UNBLOCK_SOURCE     = 0x2c
+	sysMCAST_JOIN_SOURCE_GROUP  = 0x2d
+	sysMCAST_LEAVE_SOURCE_GROUP = 0x2e
+
+	sysIPV6_PREFER_SRC_HOME   = 0x1
+	sysIPV6_PREFER_SRC_COA    = 0x2
+	sysIPV6_PREFER_SRC_PUBLIC = 0x4
+	sysIPV6_PREFER_SRC_TMP    = 0x8
+	sysIPV6_PREFER_SRC_NONCGA = 0x10
+	sysIPV6_PREFER_SRC_CGA    = 0x20
+
+	sysIPV6_PREFER_SRC_MIPMASK    = 0x3
+	sysIPV6_PREFER_SRC_MIPDEFAULT = 0x1
+	sysIPV6_PREFER_SRC_TMPMASK    = 0xc
+	sysIPV6_PREFER_SRC_TMPDEFAULT = 0x4
+	sysIPV6_PREFER_SRC_CGAMASK    = 0x30
+	sysIPV6_PREFER_SRC_CGADEFAULT = 0x10
+
+	sysIPV6_PREFER_SRC_MASK = 0x3f
+
+	sysIPV6_PREFER_SRC_DEFAULT = 0x15
+
+	sysIPV6_BOUND_IF   = 0x41
+	sysIPV6_UNSPEC_SRC = 0x42
+
+	sysICMP6_FILTER = 0x1
+
+	sizeofSockaddrStorage = 0x100
+	sizeofSockaddrInet6   = 0x20
+	sizeofInet6Pktinfo    = 0x14
+	sizeofIPv6Mtuinfo     = 0x24
+
+	sizeofIPv6Mreq       = 0x14
+	sizeofGroupReq       = 0x104
+	sizeofGroupSourceReq = 0x204
+
+	sizeofICMPv6Filter = 0x20
+)
+
+type sockaddrStorage struct {
+	Family     uint16
+	X_ss_pad1  [6]int8
+	X_ss_align float64
+	X_ss_pad2  [240]int8
+}
+
+type sockaddrInet6 struct {
+	Family         uint16
+	Port           uint16
+	Flowinfo       uint32
+	Addr           [16]byte /* in6_addr */
+	Scope_id       uint32
+	X__sin6_src_id uint32
+}
+
+type inet6Pktinfo struct {
+	Addr    [16]byte /* in6_addr */
+	Ifindex uint32
+}
+
+type ipv6Mtuinfo struct {
+	Addr sockaddrInet6
+	Mtu  uint32
+}
+
+type ipv6Mreq struct {
+	Multiaddr [16]byte /* in6_addr */
+	Interface uint32
+}
+
+type groupReq struct {
+	Interface uint32
+	Pad_cgo_0 [256]byte
+}
+
+type groupSourceReq struct {
+	Interface uint32
+	Pad_cgo_0 [256]byte
+	Pad_cgo_1 [256]byte
+}
+
+type icmpv6Filter struct {
+	X__icmp6_filt [8]uint32
+}
diff --git a/harvester/vendor/vendor.json b/harvester/vendor/vendor.json
new file mode 100644
index 0000000..55e4162
--- /dev/null
+++ b/harvester/vendor/vendor.json
@@ -0,0 +1,67 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "F5dR3/i70EhSIMZfeIV+H8/PtvM=",
+			"path": "github.com/gorilla/mux",
+			"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
+			"revisionTime": "2017-01-18T13:43:44Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		},
+		{
+			"checksumSHA1": "DpsruMRlfaXitPiELMUatEOMh1k=",
+			"path": "github.com/tatsushid/go-fastping",
+			"revision": "d7bb493dee3e090e2ffb6914adddf17c1e7c026c",
+			"revisionTime": "2016-01-08T17:53:29Z"
+		},
+		{
+			"checksumSHA1": "9Pcc1IiRqPxEcxU2KMpkrcEGb3k=",
+			"path": "golang.org/x/net/bpf",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		},
+		{
+			"checksumSHA1": "fsavl2xrqJNp2rnCPrmj7wVbyqo=",
+			"path": "golang.org/x/net/icmp",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		},
+		{
+			"checksumSHA1": "ph4zMZHCWvSjVOZwoxbks+nG0/M=",
+			"path": "golang.org/x/net/internal/iana",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		},
+		{
+			"checksumSHA1": "LVY0kRV5iwxDS3XQaBWxdzPemyc=",
+			"path": "golang.org/x/net/internal/netreflect",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		},
+		{
+			"checksumSHA1": "825oQJv9jmZu0T9YSXnDHtmeNUY=",
+			"path": "golang.org/x/net/ipv4",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		},
+		{
+			"checksumSHA1": "O58psCWTm25Kf+VCFLoamNACZ8Q=",
+			"path": "golang.org/x/net/ipv6",
+			"revision": "f2499483f923065a842d38eb4c7f1927e6fc6e6d",
+			"revisionTime": "2017-01-14T04:22:49Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.com/maas/harvester"
+}
diff --git a/ip-allocator/Dockerfile b/ip-allocator/Dockerfile
index cac402b..27167f1 100644
--- a/ip-allocator/Dockerfile
+++ b/ip-allocator/Dockerfile
@@ -11,21 +11,12 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM golang:alpine
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-RUN apk --update add git
-
 WORKDIR /go
-RUN go get github.com/tools/godep
 ADD . /go/src/gerrit.opencord.org/maas/cord-ip-allocator
-
-WORKDIR /go/src/gerrit.opencord.org/maas/cord-ip-allocator
-RUN /go/bin/godep restore || true
-
-WORKDIR /go
-
-RUN go install gerrit.opencord.org/maas/cord-ip-allocator
+RUN go build -o /service/entry-point gerrit.opencord.org/maas/cord-ip-allocator
 
 LABEL org.label-schema.name="allocator" \
       org.label-schema.description="Provides IP address allocation for fabric interfaces" \
@@ -33,4 +24,5 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/cord-ip-allocator"]
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/ip-allocator/Dockerfile.release b/ip-allocator/Dockerfile.release
new file mode 100644
index 0000000..3dc00f1
--- /dev/null
+++ b/ip-allocator/Dockerfile.release
@@ -0,0 +1,27 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+RUN mkdir /service
+ADD entry-point /service/entry-point
+
+LABEL org.label-schema.name="allocator" \
+      org.label-schema.description="Provides IP address allocation for fabric interfaces" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/ip-allocator/Godeps/Godeps.json b/ip-allocator/Godeps/Godeps.json
deleted file mode 100644
index 8b2d186..0000000
--- a/ip-allocator/Godeps/Godeps.json
+++ /dev/null
@@ -1,29 +0,0 @@
-{
-	"ImportPath": "_/Users/dbainbri/src/maas/ip-allocator",
-	"GoVersion": "go1.6",
-	"Packages": [
-		"github.com/kelseyhightower/envconfig",
-		"github.com/gorilla/mux"
-	],
-	"Deps": [
-		{
-			"ImportPath": "github.com/gorilla/context",
-			"Comment": "v1.1-4-gaed02d1",
-			"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
-		},
-		{
-			"ImportPath": "github.com/gorilla/mux",
-			"Comment": "v1.1-9-gbd09be0",
-			"Rev": "bd09be08ed4377796d312df0a45314e11b8f5dc1"
-		},
-		{
-			"ImportPath": "github.com/kelseyhightower/envconfig",
-			"Comment": "1.1.0-17-g91921eb",
-			"Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-		},
-                {
-                        "ImportPath": "github.com/Sirupsen/logrus",
-                        "Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-                }
-	]
-}
diff --git a/ip-allocator/Makefile b/ip-allocator/Makefile
new file mode 100644
index 0000000..ad1834c
--- /dev/null
+++ b/ip-allocator/Makefile
@@ -0,0 +1,38 @@
+IMAGE_NAME=cord-ip-allocator
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
+
+.PHONY: help
+help:
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
+	@echo "help      - this message"
+
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
+
+.PHONY: build
+build:
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
+
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
+
+.PHONY: clean
+clean:
+	@echo ""
diff --git a/ip-allocator/allocator.go b/ip-allocator/allocator.go
index 03dc9e8..590585b 100644
--- a/ip-allocator/allocator.go
+++ b/ip-allocator/allocator.go
@@ -14,21 +14,25 @@
 package main
 
 import (
+	"flag"
 	"fmt"
 	"github.com/Sirupsen/logrus"
 	"github.com/gorilla/mux"
 	"github.com/kelseyhightower/envconfig"
 	"net/http"
+	"os"
 )
 
+const appName = "ALLOCATE"
+
 type Config struct {
-	Port      int    `default:"4242"`
-	Listen    string `default:"0.0.0.0"`
-	Network   string `default:"10.0.0.0/24"`
-	RangeLow  string `default:"10.0.0.2" envconfig:"RANGE_LOW"`
-	RangeHigh string `default:"10.0.0.253" envconfig:"RANGE_HIGH"`
-	LogLevel  string `default:"warning" envconfig:"LOG_LEVEL"`
-	LogFormat string `default:"text" envconfig:"LOG_FORMAT"`
+	Port      int    `default:"4242" desc:"port on which to listen for requests"`
+	Listen    string `default:"0.0.0.0" desc:"IP on which to listen for requests"`
+	Network   string `default:"10.0.0.0/24" desc:"subnet to allocate via requests"`
+	RangeLow  string `default:"10.0.0.2" envconfig:"RANGE_LOW" desc:"low value in range to allocate"`
+	RangeHigh string `default:"10.0.0.253" envconfig:"RANGE_HIGH" desc:"high value in range to allocate"`
+	LogLevel  string `default:"warning" envconfig:"LOG_LEVEL" desc:"detail level for logging"`
+	LogFormat string `default:"text" envconfig:"LOG_FORMAT" desc:"log output format, text or json"`
 }
 
 type Context struct {
@@ -36,12 +40,23 @@
 }
 
 var log = logrus.New()
+var appFlags = flag.NewFlagSet("", flag.ContinueOnError)
 
 func main() {
 	context := &Context{}
 
 	config := Config{}
-	err := envconfig.Process("ALLOCATE", &config)
+	appFlags.Usage = func() {
+		envconfig.Usage(appName, &config)
+	}
+	if err := appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+	err := envconfig.Process(appName, &config)
 	if err != nil {
 		log.Fatalf("Unable to parse configuration options : %s", err)
 	}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/ip-allocator/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/LICENSE b/ip-allocator/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/README.md b/ip-allocator/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/alt_exit.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/doc.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/entry.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/exported.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/formatter.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/hooks.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/json_formatter.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/logger.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/logrus.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/text_formatter.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/ip-allocator/vendor/github.com/Sirupsen/logrus/writer.go b/ip-allocator/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/ip-allocator/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/config-generator/vendor/github.com/gorilla/context/LICENSE b/ip-allocator/vendor/github.com/gorilla/mux/LICENSE
similarity index 100%
copy from config-generator/vendor/github.com/gorilla/context/LICENSE
copy to ip-allocator/vendor/github.com/gorilla/mux/LICENSE
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/README.md b/ip-allocator/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..94d396c
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,339 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", HomeHandler)
+	r.HandleFunc("/products", ProductsHandler)
+	r.HandleFunc("/articles", ArticlesHandler)
+	http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+	return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+	"net/http"
+	"log"
+	"github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+	w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+	r := mux.NewRouter()
+	// Routes consist of a path and a handler function.
+	r.HandleFunc("/", YourHandler)
+
+	// Bind to a port and pass our router in
+	log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/context_gorilla.go b/ip-allocator/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/context_native.go b/ip-allocator/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/doc.go b/ip-allocator/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..00daf4a
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts and paths can have variables with an optional regular
+	  expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+*/
+package mux
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/mux.go b/ip-allocator/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..d66ec38
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+	// Parent route, if this is a subrouter.
+	parent parentRoute
+	// Routes to be matched, in order.
+	routes []*Route
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+	// See Router.StrictSlash(). This defines the flag for new routes.
+	strictSlash bool
+	// See Router.SkipClean(). This defines the flag for new routes.
+	skipClean bool
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
+	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			return true
+		}
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		return true
+	}
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
+	}
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+	if !r.KeepContext {
+		defer contextClear(req)
+	}
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+	if r.namedRoutes == nil {
+		if r.parent != nil {
+			r.namedRoutes = r.parent.getNamedRoutes()
+		} else {
+			r.namedRoutes = make(map[string]*Route)
+		}
+	}
+	return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+	if r.parent != nil {
+		return r.parent.getRegexpGroup()
+	}
+	return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+			continue
+		}
+
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := contextGet(r, varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := contextGet(r, routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/regexp.go b/ip-allocator/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0189ad3
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if matchQuery {
+		defaultPattern = "[^?&]*"
+	} else if matchHost {
+		defaultPattern = "[^.]+"
+		matchPrefix = false
+	}
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
+		strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if matchQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if !matchPrefix {
+		pattern.WriteByte('$')
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// True for host match, false for path or query string match.
+	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
+	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
+	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if !r.matchHost {
+		if r.matchQuery {
+			return r.matchQueryString(req)
+		}
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
+	}
+
+	return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if !r.matchQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	for key, vals := range req.URL.Query() {
+		if key == templateKey && len(vals) > 0 {
+			return key + "=" + vals[0]
+		}
+	}
+	return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), 301)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/ip-allocator/vendor/github.com/gorilla/mux/route.go b/ip-allocator/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..9221915
--- /dev/null
+++ b/ip-allocator/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Parent where the route was registered (a Router).
+	parent parentRoute
+	// Request handler for the route.
+	handler http.Handler
+	// List of matchers.
+	matchers []matcher
+	// Manager for the variables from host and path.
+	regexp *routeRegexpGroup
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			return false
+		}
+	}
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+	// Set variables.
+	if r.regexp != nil {
+		r.regexp.setMatch(req, match, r)
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.getNamedRoutes()[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+	if r.err != nil {
+		return r.err
+	}
+	r.regexp = r.getRegexpGroup()
+	if !matchHost && !matchQuery {
+		if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if matchHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	r.buildVarsFunc = f
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	router := &Router{parent: r, strictSlash: r.strictSlash}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.Host("{subdomain}.domain.com").
+//       HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil {
+		return nil, errors.New("mux: route doesn't have a host or path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	if r.regexp.host != nil {
+		// Set a default scheme.
+		scheme = "http"
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	return &url.URL{
+		Scheme: scheme,
+		Host:   host,
+		Path:   path,
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+	getNamedRoutes() map[string]*Route
+	getRegexpGroup() *routeRegexpGroup
+	buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+	if r.parent == nil {
+		// During tests router is not always set.
+		r.parent = NewRouter()
+	}
+	return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+	if r.regexp == nil {
+		if r.parent == nil {
+			// During tests router is not always set.
+			r.parent = NewRouter()
+		}
+		regexp := r.parent.getRegexpGroup()
+		if regexp == nil {
+			r.regexp = new(routeRegexpGroup)
+		} else {
+			// Copy.
+			r.regexp = &routeRegexpGroup{
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
+			}
+		}
+	}
+	return r.regexp
+}
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/LICENSE b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/README.md b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/doc.go b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_os.go b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/ip-allocator/vendor/github.com/kelseyhightower/envconfig/usage.go b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/ip-allocator/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/ip-allocator/vendor/vendor.json b/ip-allocator/vendor/vendor.json
new file mode 100644
index 0000000..900d1e0
--- /dev/null
+++ b/ip-allocator/vendor/vendor.json
@@ -0,0 +1,25 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "F5dR3/i70EhSIMZfeIV+H8/PtvM=",
+			"path": "github.com/gorilla/mux",
+			"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
+			"revisionTime": "2017-01-18T13:43:44Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.org/maas/cord-ip-allocator"
+}
diff --git a/provisioner/Dockerfile b/provisioner/Dockerfile
index c1922d6..91f697b 100644
--- a/provisioner/Dockerfile
+++ b/provisioner/Dockerfile
@@ -11,52 +11,15 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM ubuntu:14.04
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-# Base image information borrowed by official golang wheezy Dockerfile
-RUN apt-get update && apt-get install -y --no-install-recommends \
-		g++ \
-		gcc \
-		libc6-dev \
-		make \
-                curl \
-	&& rm -rf /var/lib/apt/lists/*
-
-ENV GOLANG_VERSION 1.6.2
-ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz
-ENV GOLANG_DOWNLOAD_SHA256 e40c36ae71756198478624ed1bb4ce17597b3c19d243f3f0899bb5740d56212a
-
-RUN curl -kfsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \
-	&& echo "$GOLANG_DOWNLOAD_SHA256  golang.tar.gz" | sha256sum -c - \
-	&& tar -C /usr/local -xzf golang.tar.gz \
-	&& rm golang.tar.gz
-
-ENV GOPATH /go
-ENV PATH $GOPATH/bin:/usr/local/go/bin:$PATH
-
-RUN mkdir -p "$GOPATH/src" "$GOPATH/bin" && chmod -R 777 "$GOPATH"
-
-# CORD Provisioner Dockerfile
-WORKDIR /go
-
-RUN apt-get update && \
-	apt-get install -y  software-properties-common && \
-	apt-add-repository ppa:ansible/ansible && \
-	apt-get update -y  -m && \
-	apt-get install -y git ansible python-netaddr
-
-RUN mkdir -p /root/.ssh
+RUN mkdir -p /root/.ssh /service
 COPY ssh-config /root/.ssh/config
-
-RUN go get github.com/tools/godep
 ADD . /go/src/gerrit.opencord.org/maas/cord-provisioner
 
-WORKDIR /go/src/gerrit.opencord.org/maas/cord-provisioner
-RUN $GOPATH/bin/godep restore || true
-
-WORKDIR $GOPATH
-RUN go install gerrit.opencord.org/maas/cord-provisioner
+WORKDIR /go
+RUN go build -o /service/entry-point  gerrit.opencord.org/maas/cord-provisioner
 
 LABEL org.label-schema.name="provisioner" \
       org.label-schema.description="Provides provisioning of compute and switch nodes for CORD" \
@@ -64,4 +27,4 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/cord-provisioner"]
+ENTRYPOINT ["/service/entry-point"]
diff --git a/provisioner/Dockerfile.release b/provisioner/Dockerfile.release
new file mode 100644
index 0000000..f266c8b
--- /dev/null
+++ b/provisioner/Dockerfile.release
@@ -0,0 +1,30 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+# Base image information borrowed by official golang wheezy Dockerfile
+RUN apk --update add ansible openssh-client sshpass curl py2-netaddr rsync
+RUN mkdir -p /root/.ssh /service
+COPY ssh-config /root/.ssh/config
+ADD entry-point /service/entry-point
+
+LABEL org.label-schema.name="provisioner" \
+      org.label-schema.description="Provides provisioning of compute and switch nodes for CORD" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/provisioner/Godeps/Godeps.json b/provisioner/Godeps/Godeps.json
deleted file mode 100644
index 2870953..0000000
--- a/provisioner/Godeps/Godeps.json
+++ /dev/null
@@ -1,31 +0,0 @@
-{
-	"ImportPath": "gerrit.opencord.org/maas/cord-provisioner",
-	"GoVersion": "go1.6",
-	"GodepVersion": "v72",
-	"Deps": [
-		{
-			"ImportPath": "github.com/gorilla/context",
-			"Comment": "v1.1-4-gaed02d1",
-			"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
-		},
-		{
-			"ImportPath": "github.com/gorilla/mux",
-			"Comment": "v1.1-13-g9fa818a",
-			"Rev": "9fa818a44c2bf1396a17f9d5a3c0f6dd39d2ff8e"
-		},
-		{
-                        "ImportPath": "github.com/hashicorp/consul",
-                        "Comment": "v0.6.4-341-g22938ab",
-                        "Rev": "22938ab8b3ca069e490a4897a8ec13308ac61605"
-                },
-		{
-			"ImportPath": "github.com/kelseyhightower/envconfig",
-			"Comment": "1.1.0-17-g91921eb",
-			"Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-		},
-                {
-                        "ImportPath": "github.com/Sirupsen/logrus",
-                        "Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-                }
-	]
-}
diff --git a/provisioner/Makefile b/provisioner/Makefile
new file mode 100644
index 0000000..13e9d83
--- /dev/null
+++ b/provisioner/Makefile
@@ -0,0 +1,38 @@
+IMAGE_NAME=cord-provisioner
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
+
+.PHONY: help
+help:
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
+	@echo "help      - this message"
+
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
+
+.PHONY: build
+build:
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
+
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release ssh-config $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
+
+.PHONY: clean
+clean:
+	@echo ""
diff --git a/provisioner/provisioner.go b/provisioner/provisioner.go
index 73d0d1a..e949895 100644
--- a/provisioner/provisioner.go
+++ b/provisioner/provisioner.go
@@ -14,23 +14,27 @@
 package main
 
 import (
+	"flag"
 	"fmt"
 	"github.com/Sirupsen/logrus"
 	"github.com/gorilla/mux"
 	"github.com/kelseyhightower/envconfig"
 	"net/http"
+	"os"
 )
 
+const appName = "PROVISION"
+
 type Config struct {
-	Port            int    `default:"4243"`
-	Listen          string `default:"0.0.0.0"`
-	RoleSelectorURL string `default:"" envconfig:"ROLE_SELECTOR_URL"`
-	DefaultRole     string `default:"compute-node" envconfig:"DEFAULT_ROLE"`
-	Script          string `default:"do-ansible"`
-	StorageURL      string `default:"memory:" envconfig:"STORAGE_URL"`
-	NumberOfWorkers int    `default:"5" envconfig:"NUMBER_OF_WORKERS"`
-	LogLevel        string `default:"warning" envconfig:"LOG_LEVEL"`
-	LogFormat       string `default:"text" envconfig:"LOG_FORMAT"`
+	Port            int    `default:"4243" desc:"port on which to listen for requests"`
+	Listen          string `default:"0.0.0.0" desc:"IP on which to listen for requests"`
+	RoleSelectorURL string `default:"" envconfig:"ROLE_SELECTOR_URL" desc:"connection string to query role for device"`
+	DefaultRole     string `default:"compute-node" envconfig:"DEFAULT_ROLE" desc:"default role for device"`
+	Script          string `default:"do-ansible" desc:"default script to execute to provision device"`
+	StorageURL      string `default:"memory:" envconfig:"STORAGE_URL" desc:"connection string to persistence implementation"`
+	NumberOfWorkers int    `default:"5" envconfig:"NUMBER_OF_WORKERS" desc:"number of concurrent provisioning workers"`
+	LogLevel        string `default:"warning" envconfig:"LOG_LEVEL" desc:"detail level for logging"`
+	LogFormat       string `default:"text" envconfig:"LOG_FORMAT" desc:"log output format, text or json"`
 }
 
 type Context struct {
@@ -41,11 +45,22 @@
 }
 
 var log = logrus.New()
+var appFlags = flag.NewFlagSet("", flag.ContinueOnError)
 
 func main() {
 	context := &Context{}
 
-	err := envconfig.Process("PROVISION", &(context.config))
+	appFlags.Usage = func() {
+		envconfig.Usage(appName, &(context.config))
+	}
+	if err := appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+	err := envconfig.Process(appName, &(context.config))
 	if err != nil {
 		log.Fatalf("[ERRO] Unable to parse configuration options : %s", err)
 	}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/provisioner/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/LICENSE b/provisioner/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/README.md b/provisioner/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/alt_exit.go b/provisioner/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/doc.go b/provisioner/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/entry.go b/provisioner/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/exported.go b/provisioner/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/formatter.go b/provisioner/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/hooks.go b/provisioner/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/json_formatter.go b/provisioner/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/logger.go b/provisioner/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/logrus.go b/provisioner/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/text_formatter.go b/provisioner/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/provisioner/vendor/github.com/Sirupsen/logrus/writer.go b/provisioner/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/provisioner/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/config-generator/vendor/github.com/gorilla/context/LICENSE b/provisioner/vendor/github.com/gorilla/mux/LICENSE
similarity index 100%
copy from config-generator/vendor/github.com/gorilla/context/LICENSE
copy to provisioner/vendor/github.com/gorilla/mux/LICENSE
diff --git a/provisioner/vendor/github.com/gorilla/mux/README.md b/provisioner/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..94d396c
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,339 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", HomeHandler)
+	r.HandleFunc("/products", ProductsHandler)
+	r.HandleFunc("/articles", ArticlesHandler)
+	http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+	return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+	"net/http"
+	"log"
+	"github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+	w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+	r := mux.NewRouter()
+	// Routes consist of a path and a handler function.
+	r.HandleFunc("/", YourHandler)
+
+	// Bind to a port and pass our router in
+	log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/provisioner/vendor/github.com/gorilla/mux/context_gorilla.go b/provisioner/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/provisioner/vendor/github.com/gorilla/mux/context_native.go b/provisioner/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/provisioner/vendor/github.com/gorilla/mux/doc.go b/provisioner/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..00daf4a
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts and paths can have variables with an optional regular
+	  expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+*/
+package mux
diff --git a/provisioner/vendor/github.com/gorilla/mux/mux.go b/provisioner/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..d66ec38
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+	// Parent route, if this is a subrouter.
+	parent parentRoute
+	// Routes to be matched, in order.
+	routes []*Route
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+	// See Router.StrictSlash(). This defines the flag for new routes.
+	strictSlash bool
+	// See Router.SkipClean(). This defines the flag for new routes.
+	skipClean bool
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
+	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			return true
+		}
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		return true
+	}
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
+	}
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+	if !r.KeepContext {
+		defer contextClear(req)
+	}
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+	if r.namedRoutes == nil {
+		if r.parent != nil {
+			r.namedRoutes = r.parent.getNamedRoutes()
+		} else {
+			r.namedRoutes = make(map[string]*Route)
+		}
+	}
+	return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+	if r.parent != nil {
+		return r.parent.getRegexpGroup()
+	}
+	return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+			continue
+		}
+
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := contextGet(r, varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := contextGet(r, routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
diff --git a/provisioner/vendor/github.com/gorilla/mux/regexp.go b/provisioner/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0189ad3
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if matchQuery {
+		defaultPattern = "[^?&]*"
+	} else if matchHost {
+		defaultPattern = "[^.]+"
+		matchPrefix = false
+	}
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
+		strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if matchQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if !matchPrefix {
+		pattern.WriteByte('$')
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// True for host match, false for path or query string match.
+	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
+	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
+	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if !r.matchHost {
+		if r.matchQuery {
+			return r.matchQueryString(req)
+		}
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
+	}
+
+	return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if !r.matchQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	for key, vals := range req.URL.Query() {
+		if key == templateKey && len(vals) > 0 {
+			return key + "=" + vals[0]
+		}
+	}
+	return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), 301)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/provisioner/vendor/github.com/gorilla/mux/route.go b/provisioner/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..9221915
--- /dev/null
+++ b/provisioner/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Parent where the route was registered (a Router).
+	parent parentRoute
+	// Request handler for the route.
+	handler http.Handler
+	// List of matchers.
+	matchers []matcher
+	// Manager for the variables from host and path.
+	regexp *routeRegexpGroup
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			return false
+		}
+	}
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+	// Set variables.
+	if r.regexp != nil {
+		r.regexp.setMatch(req, match, r)
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.getNamedRoutes()[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+	if r.err != nil {
+		return r.err
+	}
+	r.regexp = r.getRegexpGroup()
+	if !matchHost && !matchQuery {
+		if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if matchHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	r.buildVarsFunc = f
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	router := &Router{parent: r, strictSlash: r.strictSlash}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.Host("{subdomain}.domain.com").
+//       HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil {
+		return nil, errors.New("mux: route doesn't have a host or path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	if r.regexp.host != nil {
+		// Set a default scheme.
+		scheme = "http"
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	return &url.URL{
+		Scheme: scheme,
+		Host:   host,
+		Path:   path,
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+	getNamedRoutes() map[string]*Route
+	getRegexpGroup() *routeRegexpGroup
+	buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+	if r.parent == nil {
+		// During tests router is not always set.
+		r.parent = NewRouter()
+	}
+	return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+	if r.regexp == nil {
+		if r.parent == nil {
+			// During tests router is not always set.
+			r.parent = NewRouter()
+		}
+		regexp := r.parent.getRegexpGroup()
+		if regexp == nil {
+			r.regexp = new(routeRegexpGroup)
+		} else {
+			// Copy.
+			r.regexp = &routeRegexpGroup{
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
+			}
+		}
+	}
+	return r.regexp
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/LICENSE b/provisioner/vendor/github.com/hashicorp/consul/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/README.md b/provisioner/vendor/github.com/hashicorp/consul/api/README.md
new file mode 100644
index 0000000..7e64988
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/README.md
@@ -0,0 +1,43 @@
+Consul API client
+=================
+
+This package provides the `api` package which attempts to
+provide programmatic access to the full Consul API.
+
+Currently, all of the Consul APIs included in version 0.6.0 are supported.
+
+Documentation
+=============
+
+The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api)
+
+Usage
+=====
+
+Below is an example of using the Consul client:
+
+```go
+// Get a new client
+client, err := api.NewClient(api.DefaultConfig())
+if err != nil {
+    panic(err)
+}
+
+// Get a handle to the KV API
+kv := client.KV()
+
+// PUT a new KV pair
+p := &api.KVPair{Key: "foo", Value: []byte("test")}
+_, err = kv.Put(p, nil)
+if err != nil {
+    panic(err)
+}
+
+// Lookup the pair
+pair, _, err := kv.Get("foo", nil)
+if err != nil {
+    panic(err)
+}
+fmt.Printf("KV: %v", pair)
+
+```
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/acl.go b/provisioner/vendor/github.com/hashicorp/consul/api/acl.go
new file mode 100644
index 0000000..c3fb0d5
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/acl.go
@@ -0,0 +1,140 @@
+package api
+
+const (
+	// ACLCLientType is the client type token
+	ACLClientType = "client"
+
+	// ACLManagementType is the management type token
+	ACLManagementType = "management"
+)
+
+// ACLEntry is used to represent an ACL entry
+type ACLEntry struct {
+	CreateIndex uint64
+	ModifyIndex uint64
+	ID          string
+	Name        string
+	Type        string
+	Rules       string
+}
+
+// ACL can be used to query the ACL endpoints
+type ACL struct {
+	c *Client
+}
+
+// ACL returns a handle to the ACL endpoints
+func (c *Client) ACL() *ACL {
+	return &ACL{c}
+}
+
+// Create is used to generate a new token with the given parameters
+func (a *ACL) Create(acl *ACLEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/create")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update is used to update the rules of an existing token
+func (a *ACL) Update(acl *ACLEntry, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/update")
+	r.setWriteOptions(q)
+	r.obj = acl
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Destroy is used to destroy a given ACL token ID
+func (a *ACL) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/destroy/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	return wm, nil
+}
+
+// Clone is used to return a new token cloned from an existing one
+func (a *ACL) Clone(id string, q *WriteOptions) (string, *WriteMeta, error) {
+	r := a.c.newRequest("PUT", "/v1/acl/clone/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Info is used to query for information about an ACL token
+func (a *ACL) Info(id string, q *QueryOptions) (*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/info/"+id)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to get all the ACL tokens
+func (a *ACL) List(q *QueryOptions) ([]*ACLEntry, *QueryMeta, error) {
+	r := a.c.newRequest("GET", "/v1/acl/list")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*ACLEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/agent.go b/provisioner/vendor/github.com/hashicorp/consul/api/agent.go
new file mode 100644
index 0000000..1893d1c
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/agent.go
@@ -0,0 +1,471 @@
+package api
+
+import (
+	"bufio"
+	"fmt"
+)
+
+// AgentCheck represents a check known to the agent
+type AgentCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// AgentService represents a service known to the agent
+type AgentService struct {
+	ID                string
+	Service           string
+	Tags              []string
+	Port              int
+	Address           string
+	EnableTagOverride bool
+}
+
+// AgentMember represents a cluster member known to the agent
+type AgentMember struct {
+	Name        string
+	Addr        string
+	Port        uint16
+	Tags        map[string]string
+	Status      int
+	ProtocolMin uint8
+	ProtocolMax uint8
+	ProtocolCur uint8
+	DelegateMin uint8
+	DelegateMax uint8
+	DelegateCur uint8
+}
+
+// AgentServiceRegistration is used to register a new service
+type AgentServiceRegistration struct {
+	ID                string   `json:",omitempty"`
+	Name              string   `json:",omitempty"`
+	Tags              []string `json:",omitempty"`
+	Port              int      `json:",omitempty"`
+	Address           string   `json:",omitempty"`
+	EnableTagOverride bool     `json:",omitempty"`
+	Check             *AgentServiceCheck
+	Checks            AgentServiceChecks
+}
+
+// AgentCheckRegistration is used to register a new check
+type AgentCheckRegistration struct {
+	ID        string `json:",omitempty"`
+	Name      string `json:",omitempty"`
+	Notes     string `json:",omitempty"`
+	ServiceID string `json:",omitempty"`
+	AgentServiceCheck
+}
+
+// AgentServiceCheck is used to define a node or service level check
+type AgentServiceCheck struct {
+	Script            string `json:",omitempty"`
+	DockerContainerID string `json:",omitempty"`
+	Shell             string `json:",omitempty"` // Only supported for Docker.
+	Interval          string `json:",omitempty"`
+	Timeout           string `json:",omitempty"`
+	TTL               string `json:",omitempty"`
+	HTTP              string `json:",omitempty"`
+	TCP               string `json:",omitempty"`
+	Status            string `json:",omitempty"`
+	Notes             string `json:",omitempty"`
+	TLSSkipVerify     bool   `json:",omitempty"`
+
+	// In Consul 0.7 and later, checks that are associated with a service
+	// may also contain this optional DeregisterCriticalServiceAfter field,
+	// which is a timeout in the same Go time format as Interval and TTL. If
+	// a check is in the critical state for more than this configured value,
+	// then its associated service (and all of its associated checks) will
+	// automatically be deregistered.
+	DeregisterCriticalServiceAfter string `json:",omitempty"`
+}
+type AgentServiceChecks []*AgentServiceCheck
+
+// Agent can be used to query the Agent endpoints
+type Agent struct {
+	c *Client
+
+	// cache the node name
+	nodeName string
+}
+
+// Agent returns a handle to the agent endpoints
+func (c *Client) Agent() *Agent {
+	return &Agent{c: c}
+}
+
+// Self is used to query the agent we are speaking to for
+// information about itself
+func (a *Agent) Self() (map[string]map[string]interface{}, error) {
+	r := a.c.newRequest("GET", "/v1/agent/self")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]map[string]interface{}
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Reload triggers a configuration reload for the agent we are connected to.
+func (a *Agent) Reload() error {
+	r := a.c.newRequest("PUT", "/v1/agent/reload")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// NodeName is used to get the node name of the agent
+func (a *Agent) NodeName() (string, error) {
+	if a.nodeName != "" {
+		return a.nodeName, nil
+	}
+	info, err := a.Self()
+	if err != nil {
+		return "", err
+	}
+	name := info["Config"]["NodeName"].(string)
+	a.nodeName = name
+	return name, nil
+}
+
+// Checks returns the locally registered checks
+func (a *Agent) Checks() (map[string]*AgentCheck, error) {
+	r := a.c.newRequest("GET", "/v1/agent/checks")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentCheck
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Services returns the locally registered services
+func (a *Agent) Services() (map[string]*AgentService, error) {
+	r := a.c.newRequest("GET", "/v1/agent/services")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out map[string]*AgentService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Members returns the known gossip members. The WAN
+// flag can be used to query a server for WAN members.
+func (a *Agent) Members(wan bool) ([]*AgentMember, error) {
+	r := a.c.newRequest("GET", "/v1/agent/members")
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*AgentMember
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ServiceRegister is used to register a new service with
+// the local agent
+func (a *Agent) ServiceRegister(service *AgentServiceRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/register")
+	r.obj = service
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ServiceDeregister is used to deregister a service with
+// the local agent
+func (a *Agent) ServiceDeregister(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/deregister/"+serviceID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// PassTTL is used to set a TTL check to the passing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) PassTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "pass")
+}
+
+// WarnTTL is used to set a TTL check to the warning state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) WarnTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "warn")
+}
+
+// FailTTL is used to set a TTL check to the failing state.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 or changed to use
+// UpdateTTL()'s endpoint and the server endpoints will be removed in 0.9.
+func (a *Agent) FailTTL(checkID, note string) error {
+	return a.updateTTL(checkID, note, "fail")
+}
+
+// updateTTL is used to update the TTL of a check. This is the internal
+// method that uses the old API that's present in Consul versions prior to
+// 0.6.4. Since Consul didn't have an analogous "update" API before it seemed
+// ok to break this (former) UpdateTTL in favor of the new UpdateTTL below,
+// but keep the old Pass/Warn/Fail methods using the old API under the hood.
+//
+// DEPRECATION NOTICE: This interface is deprecated in favor of UpdateTTL().
+// The client interface will be removed in 0.8 and the server endpoints will
+// be removed in 0.9.
+func (a *Agent) updateTTL(checkID, note, status string) error {
+	switch status {
+	case "pass":
+	case "warn":
+	case "fail":
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+	endpoint := fmt.Sprintf("/v1/agent/check/%s/%s", status, checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.params.Set("note", note)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// checkUpdate is the payload for a PUT for a check update.
+type checkUpdate struct {
+	// Status is one of the api.Health* states: HealthPassing
+	// ("passing"), HealthWarning ("warning"), or HealthCritical
+	// ("critical").
+	Status string
+
+	// Output is the information to post to the UI for operators as the
+	// output of the process that decided to hit the TTL check. This is
+	// different from the note field that's associated with the check
+	// itself.
+	Output string
+}
+
+// UpdateTTL is used to update the TTL of a check. This uses the newer API
+// that was introduced in Consul 0.6.4 and later. We translate the old status
+// strings for compatibility (though a newer version of Consul will still be
+// required to use this API).
+func (a *Agent) UpdateTTL(checkID, output, status string) error {
+	switch status {
+	case "pass", HealthPassing:
+		status = HealthPassing
+	case "warn", HealthWarning:
+		status = HealthWarning
+	case "fail", HealthCritical:
+		status = HealthCritical
+	default:
+		return fmt.Errorf("Invalid status: %s", status)
+	}
+
+	endpoint := fmt.Sprintf("/v1/agent/check/update/%s", checkID)
+	r := a.c.newRequest("PUT", endpoint)
+	r.obj = &checkUpdate{
+		Status: status,
+		Output: output,
+	}
+
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckRegister is used to register a new check with
+// the local agent
+func (a *Agent) CheckRegister(check *AgentCheckRegistration) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/register")
+	r.obj = check
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// CheckDeregister is used to deregister a check with
+// the local agent
+func (a *Agent) CheckDeregister(checkID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/check/deregister/"+checkID)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Join is used to instruct the agent to attempt a join to
+// another cluster member
+func (a *Agent) Join(addr string, wan bool) error {
+	r := a.c.newRequest("PUT", "/v1/agent/join/"+addr)
+	if wan {
+		r.params.Set("wan", "1")
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Leave is used to have the agent gracefully leave the cluster and shutdown
+func (a *Agent) Leave() error {
+	r := a.c.newRequest("PUT", "/v1/agent/leave")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// ForceLeave is used to have the agent eject a failed node
+func (a *Agent) ForceLeave(node string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/force-leave/"+node)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableServiceMaintenance toggles service maintenance mode on
+// for the given service ID.
+func (a *Agent) EnableServiceMaintenance(serviceID, reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableServiceMaintenance toggles service maintenance mode off
+// for the given service ID.
+func (a *Agent) DisableServiceMaintenance(serviceID string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/service/maintenance/"+serviceID)
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// EnableNodeMaintenance toggles node maintenance mode on for the
+// agent we are connected to.
+func (a *Agent) EnableNodeMaintenance(reason string) error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "true")
+	r.params.Set("reason", reason)
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// DisableNodeMaintenance toggles node maintenance mode off for the
+// agent we are connected to.
+func (a *Agent) DisableNodeMaintenance() error {
+	r := a.c.newRequest("PUT", "/v1/agent/maintenance")
+	r.params.Set("enable", "false")
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// Monitor returns a channel which will receive streaming logs from the agent
+// Providing a non-nil stopCh can be used to close the connection and stop the
+// log stream
+func (a *Agent) Monitor(loglevel string, stopCh chan struct{}, q *QueryOptions) (chan string, error) {
+	r := a.c.newRequest("GET", "/v1/agent/monitor")
+	r.setQueryOptions(q)
+	if loglevel != "" {
+		r.params.Add("loglevel", loglevel)
+	}
+	_, resp, err := requireOK(a.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+
+	logCh := make(chan string, 64)
+	go func() {
+		defer resp.Body.Close()
+
+		scanner := bufio.NewScanner(resp.Body)
+		for {
+			select {
+			case <-stopCh:
+				close(logCh)
+				return
+			default:
+			}
+			if scanner.Scan() {
+				logCh <- scanner.Text()
+			}
+		}
+	}()
+
+	return logCh, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/api.go b/provisioner/vendor/github.com/hashicorp/consul/api/api.go
new file mode 100644
index 0000000..9a59b72
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/api.go
@@ -0,0 +1,623 @@
+package api
+
+import (
+	"bytes"
+	"crypto/tls"
+	"crypto/x509"
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"log"
+	"net"
+	"net/http"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/hashicorp/go-cleanhttp"
+)
+
+const (
+	// HTTPAddrEnvName defines an environment variable name which sets
+	// the HTTP address if there is no -http-addr specified.
+	HTTPAddrEnvName = "CONSUL_HTTP_ADDR"
+
+	// HTTPTokenEnvName defines an environment variable name which sets
+	// the HTTP token.
+	HTTPTokenEnvName = "CONSUL_HTTP_TOKEN"
+
+	// HTTPAuthEnvName defines an environment variable name which sets
+	// the HTTP authentication header.
+	HTTPAuthEnvName = "CONSUL_HTTP_AUTH"
+
+	// HTTPSSLEnvName defines an environment variable name which sets
+	// whether or not to use HTTPS.
+	HTTPSSLEnvName = "CONSUL_HTTP_SSL"
+
+	// HTTPSSLVerifyEnvName defines an environment variable name which sets
+	// whether or not to disable certificate checking.
+	HTTPSSLVerifyEnvName = "CONSUL_HTTP_SSL_VERIFY"
+)
+
+// QueryOptions are used to parameterize a query
+type QueryOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// AllowStale allows any Consul server (non-leader) to service
+	// a read. This allows for lower latency and higher throughput
+	AllowStale bool
+
+	// RequireConsistent forces the read to be fully consistent.
+	// This is more expensive but prevents ever performing a stale
+	// read.
+	RequireConsistent bool
+
+	// WaitIndex is used to enable a blocking query. Waits
+	// until the timeout or the next index is reached
+	WaitIndex uint64
+
+	// WaitTime is used to bound the duration of a wait.
+	// Defaults to that of the Config, but can be overridden.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+
+	// Near is used to provide a node name that will sort the results
+	// in ascending order based on the estimated round trip time from
+	// that node. Setting this to "_agent" will use the agent's node
+	// for the sort.
+	Near string
+
+	// NodeMeta is used to filter results by nodes with the given
+	// metadata key/value pairs. Currently, only one key/value pair can
+	// be provided for filtering.
+	NodeMeta map[string]string
+}
+
+// WriteOptions are used to parameterize a write
+type WriteOptions struct {
+	// Providing a datacenter overwrites the DC provided
+	// by the Config
+	Datacenter string
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// QueryMeta is used to return meta data about a query
+type QueryMeta struct {
+	// LastIndex. This can be used as a WaitIndex to perform
+	// a blocking query
+	LastIndex uint64
+
+	// Time of last contact from the leader for the
+	// server servicing the request
+	LastContact time.Duration
+
+	// Is there a known leader
+	KnownLeader bool
+
+	// How long did the request take
+	RequestTime time.Duration
+
+	// Is address translation enabled for HTTP responses on this agent
+	AddressTranslationEnabled bool
+}
+
+// WriteMeta is used to return meta data about a write
+type WriteMeta struct {
+	// How long did the request take
+	RequestTime time.Duration
+}
+
+// HttpBasicAuth is used to authenticate http client with HTTP Basic Authentication
+type HttpBasicAuth struct {
+	// Username to use for HTTP Basic Authentication
+	Username string
+
+	// Password to use for HTTP Basic Authentication
+	Password string
+}
+
+// Config is used to configure the creation of a client
+type Config struct {
+	// Address is the address of the Consul server
+	Address string
+
+	// Scheme is the URI scheme for the Consul server
+	Scheme string
+
+	// Datacenter to use. If not provided, the default agent datacenter is used.
+	Datacenter string
+
+	// HttpClient is the client to use. Default will be
+	// used if not provided.
+	HttpClient *http.Client
+
+	// HttpAuth is the auth info to use for http access.
+	HttpAuth *HttpBasicAuth
+
+	// WaitTime limits how long a Watch will block. If not provided,
+	// the agent default values will be used.
+	WaitTime time.Duration
+
+	// Token is used to provide a per-request ACL token
+	// which overrides the agent's default token.
+	Token string
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+type TLSConfig struct {
+	// Address is the optional address of the Consul server. The port, if any
+	// will be removed from here and this will be set to the ServerName of the
+	// resulting config.
+	Address string
+
+	// CAFile is the optional path to the CA certificate used for Consul
+	// communication, defaults to the system bundle if not specified.
+	CAFile string
+
+	// CertFile is the optional path to the certificate for Consul
+	// communication. If this is set then you need to also set KeyFile.
+	CertFile string
+
+	// KeyFile is the optional path to the private key for Consul communication.
+	// If this is set then you need to also set CertFile.
+	KeyFile string
+
+	// InsecureSkipVerify if set to true will disable TLS host verification.
+	InsecureSkipVerify bool
+}
+
+// DefaultConfig returns a default configuration for the client. By default this
+// will pool and reuse idle connections to Consul. If you have a long-lived
+// client object, this is the desired behavior and should make the most efficient
+// use of the connections to Consul. If you don't reuse a client object , which
+// is not recommended, then you may notice idle connections building up over
+// time. To avoid this, use the DefaultNonPooledConfig() instead.
+func DefaultConfig() *Config {
+	return defaultConfig(cleanhttp.DefaultPooledTransport)
+}
+
+// DefaultNonPooledConfig returns a default configuration for the client which
+// does not pool connections. This isn't a recommended configuration because it
+// will reconnect to Consul on every request, but this is useful to avoid the
+// accumulation of idle connections if you make many client objects during the
+// lifetime of your application.
+func DefaultNonPooledConfig() *Config {
+	return defaultConfig(cleanhttp.DefaultTransport)
+}
+
+// defaultConfig returns the default configuration for the client, using the
+// given function to make the transport.
+func defaultConfig(transportFn func() *http.Transport) *Config {
+	config := &Config{
+		Address: "127.0.0.1:8500",
+		Scheme:  "http",
+		HttpClient: &http.Client{
+			Transport: transportFn(),
+		},
+	}
+
+	if addr := os.Getenv(HTTPAddrEnvName); addr != "" {
+		config.Address = addr
+	}
+
+	if token := os.Getenv(HTTPTokenEnvName); token != "" {
+		config.Token = token
+	}
+
+	if auth := os.Getenv(HTTPAuthEnvName); auth != "" {
+		var username, password string
+		if strings.Contains(auth, ":") {
+			split := strings.SplitN(auth, ":", 2)
+			username = split[0]
+			password = split[1]
+		} else {
+			username = auth
+		}
+
+		config.HttpAuth = &HttpBasicAuth{
+			Username: username,
+			Password: password,
+		}
+	}
+
+	if ssl := os.Getenv(HTTPSSLEnvName); ssl != "" {
+		enabled, err := strconv.ParseBool(ssl)
+		if err != nil {
+			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLEnvName, err)
+		}
+
+		if enabled {
+			config.Scheme = "https"
+		}
+	}
+
+	if verify := os.Getenv(HTTPSSLVerifyEnvName); verify != "" {
+		doVerify, err := strconv.ParseBool(verify)
+		if err != nil {
+			log.Printf("[WARN] client: could not parse %s: %s", HTTPSSLVerifyEnvName, err)
+		}
+
+		if !doVerify {
+			tlsClientConfig, err := SetupTLSConfig(&TLSConfig{
+				InsecureSkipVerify: true,
+			})
+
+			// We don't expect this to fail given that we aren't
+			// parsing any of the input, but we panic just in case
+			// since this doesn't have an error return.
+			if err != nil {
+				panic(err)
+			}
+
+			transport := transportFn()
+			transport.TLSClientConfig = tlsClientConfig
+			config.HttpClient.Transport = transport
+		}
+	}
+
+	return config
+}
+
+// TLSConfig is used to generate a TLSClientConfig that's useful for talking to
+// Consul using TLS.
+func SetupTLSConfig(tlsConfig *TLSConfig) (*tls.Config, error) {
+	tlsClientConfig := &tls.Config{
+		InsecureSkipVerify: tlsConfig.InsecureSkipVerify,
+	}
+
+	if tlsConfig.Address != "" {
+		server := tlsConfig.Address
+		hasPort := strings.LastIndex(server, ":") > strings.LastIndex(server, "]")
+		if hasPort {
+			var err error
+			server, _, err = net.SplitHostPort(server)
+			if err != nil {
+				return nil, err
+			}
+		}
+		tlsClientConfig.ServerName = server
+	}
+
+	if tlsConfig.CertFile != "" && tlsConfig.KeyFile != "" {
+		tlsCert, err := tls.LoadX509KeyPair(tlsConfig.CertFile, tlsConfig.KeyFile)
+		if err != nil {
+			return nil, err
+		}
+		tlsClientConfig.Certificates = []tls.Certificate{tlsCert}
+	}
+
+	if tlsConfig.CAFile != "" {
+		data, err := ioutil.ReadFile(tlsConfig.CAFile)
+		if err != nil {
+			return nil, fmt.Errorf("failed to read CA file: %v", err)
+		}
+
+		caPool := x509.NewCertPool()
+		if !caPool.AppendCertsFromPEM(data) {
+			return nil, fmt.Errorf("failed to parse CA certificate")
+		}
+		tlsClientConfig.RootCAs = caPool
+	}
+
+	return tlsClientConfig, nil
+}
+
+// Client provides a client to the Consul API
+type Client struct {
+	config Config
+}
+
+// NewClient returns a new client
+func NewClient(config *Config) (*Client, error) {
+	// bootstrap the config
+	defConfig := DefaultConfig()
+
+	if len(config.Address) == 0 {
+		config.Address = defConfig.Address
+	}
+
+	if len(config.Scheme) == 0 {
+		config.Scheme = defConfig.Scheme
+	}
+
+	if config.HttpClient == nil {
+		config.HttpClient = defConfig.HttpClient
+	}
+
+	if parts := strings.SplitN(config.Address, "unix://", 2); len(parts) == 2 {
+		trans := cleanhttp.DefaultTransport()
+		trans.Dial = func(_, _ string) (net.Conn, error) {
+			return net.Dial("unix", parts[1])
+		}
+		config.HttpClient = &http.Client{
+			Transport: trans,
+		}
+		config.Address = parts[1]
+	}
+
+	client := &Client{
+		config: *config,
+	}
+	return client, nil
+}
+
+// request is used to help build up a request
+type request struct {
+	config *Config
+	method string
+	url    *url.URL
+	params url.Values
+	body   io.Reader
+	header http.Header
+	obj    interface{}
+}
+
+// setQueryOptions is used to annotate the request with
+// additional query options
+func (r *request) setQueryOptions(q *QueryOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.AllowStale {
+		r.params.Set("stale", "")
+	}
+	if q.RequireConsistent {
+		r.params.Set("consistent", "")
+	}
+	if q.WaitIndex != 0 {
+		r.params.Set("index", strconv.FormatUint(q.WaitIndex, 10))
+	}
+	if q.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(q.WaitTime))
+	}
+	if q.Token != "" {
+		r.header.Set("X-Consul-Token", q.Token)
+	}
+	if q.Near != "" {
+		r.params.Set("near", q.Near)
+	}
+	if len(q.NodeMeta) > 0 {
+		for key, value := range q.NodeMeta {
+			r.params.Add("node-meta", key+":"+value)
+		}
+	}
+}
+
+// durToMsec converts a duration to a millisecond specified string. If the
+// user selected a positive value that rounds to 0 ms, then we will use 1 ms
+// so they get a short delay, otherwise Consul will translate the 0 ms into
+// a huge default delay.
+func durToMsec(dur time.Duration) string {
+	ms := dur / time.Millisecond
+	if dur > 0 && ms == 0 {
+		ms = 1
+	}
+	return fmt.Sprintf("%dms", ms)
+}
+
+// serverError is a string we look for to detect 500 errors.
+const serverError = "Unexpected response code: 500"
+
+// IsServerError returns true for 500 errors from the Consul servers, these are
+// usually retryable at a later time.
+func IsServerError(err error) bool {
+	if err == nil {
+		return false
+	}
+
+	// TODO (slackpad) - Make a real error type here instead of using
+	// a string check.
+	return strings.Contains(err.Error(), serverError)
+}
+
+// setWriteOptions is used to annotate the request with
+// additional write options
+func (r *request) setWriteOptions(q *WriteOptions) {
+	if q == nil {
+		return
+	}
+	if q.Datacenter != "" {
+		r.params.Set("dc", q.Datacenter)
+	}
+	if q.Token != "" {
+		r.header.Set("X-Consul-Token", q.Token)
+	}
+}
+
+// toHTTP converts the request to an HTTP request
+func (r *request) toHTTP() (*http.Request, error) {
+	// Encode the query parameters
+	r.url.RawQuery = r.params.Encode()
+
+	// Check if we should encode the body
+	if r.body == nil && r.obj != nil {
+		if b, err := encodeBody(r.obj); err != nil {
+			return nil, err
+		} else {
+			r.body = b
+		}
+	}
+
+	// Create the HTTP request
+	req, err := http.NewRequest(r.method, r.url.RequestURI(), r.body)
+	if err != nil {
+		return nil, err
+	}
+
+	req.URL.Host = r.url.Host
+	req.URL.Scheme = r.url.Scheme
+	req.Host = r.url.Host
+	req.Header = r.header
+
+	// Setup auth
+	if r.config.HttpAuth != nil {
+		req.SetBasicAuth(r.config.HttpAuth.Username, r.config.HttpAuth.Password)
+	}
+
+	return req, nil
+}
+
+// newRequest is used to create a new request
+func (c *Client) newRequest(method, path string) *request {
+	r := &request{
+		config: &c.config,
+		method: method,
+		url: &url.URL{
+			Scheme: c.config.Scheme,
+			Host:   c.config.Address,
+			Path:   path,
+		},
+		params: make(map[string][]string),
+		header: make(http.Header),
+	}
+	if c.config.Datacenter != "" {
+		r.params.Set("dc", c.config.Datacenter)
+	}
+	if c.config.WaitTime != 0 {
+		r.params.Set("wait", durToMsec(r.config.WaitTime))
+	}
+	if c.config.Token != "" {
+		r.header.Set("X-Consul-Token", r.config.Token)
+	}
+	return r
+}
+
+// doRequest runs a request with our client
+func (c *Client) doRequest(r *request) (time.Duration, *http.Response, error) {
+	req, err := r.toHTTP()
+	if err != nil {
+		return 0, nil, err
+	}
+	start := time.Now()
+	resp, err := c.config.HttpClient.Do(req)
+	diff := time.Now().Sub(start)
+	return diff, resp, err
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (c *Client) query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+	r := c.newRequest("GET", endpoint)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if err := decodeBody(resp, out); err != nil {
+		return nil, err
+	}
+	return qm, nil
+}
+
+// write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (c *Client) write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+	r := c.newRequest("PUT", endpoint)
+	r.setWriteOptions(q)
+	r.obj = in
+	rtt, resp, err := requireOK(c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	if out != nil {
+		if err := decodeBody(resp, &out); err != nil {
+			return nil, err
+		}
+	}
+	return wm, nil
+}
+
+// parseQueryMeta is used to help parse query meta-data
+func parseQueryMeta(resp *http.Response, q *QueryMeta) error {
+	header := resp.Header
+
+	// Parse the X-Consul-Index
+	index, err := strconv.ParseUint(header.Get("X-Consul-Index"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-Index: %v", err)
+	}
+	q.LastIndex = index
+
+	// Parse the X-Consul-LastContact
+	last, err := strconv.ParseUint(header.Get("X-Consul-LastContact"), 10, 64)
+	if err != nil {
+		return fmt.Errorf("Failed to parse X-Consul-LastContact: %v", err)
+	}
+	q.LastContact = time.Duration(last) * time.Millisecond
+
+	// Parse the X-Consul-KnownLeader
+	switch header.Get("X-Consul-KnownLeader") {
+	case "true":
+		q.KnownLeader = true
+	default:
+		q.KnownLeader = false
+	}
+
+	// Parse X-Consul-Translate-Addresses
+	switch header.Get("X-Consul-Translate-Addresses") {
+	case "true":
+		q.AddressTranslationEnabled = true
+	default:
+		q.AddressTranslationEnabled = false
+	}
+
+	return nil
+}
+
+// decodeBody is used to JSON decode a body
+func decodeBody(resp *http.Response, out interface{}) error {
+	dec := json.NewDecoder(resp.Body)
+	return dec.Decode(out)
+}
+
+// encodeBody is used to encode a request body
+func encodeBody(obj interface{}) (io.Reader, error) {
+	buf := bytes.NewBuffer(nil)
+	enc := json.NewEncoder(buf)
+	if err := enc.Encode(obj); err != nil {
+		return nil, err
+	}
+	return buf, nil
+}
+
+// requireOK is used to wrap doRequest and check for a 200
+func requireOK(d time.Duration, resp *http.Response, e error) (time.Duration, *http.Response, error) {
+	if e != nil {
+		if resp != nil {
+			resp.Body.Close()
+		}
+		return d, nil, e
+	}
+	if resp.StatusCode != 200 {
+		var buf bytes.Buffer
+		io.Copy(&buf, resp.Body)
+		resp.Body.Close()
+		return d, nil, fmt.Errorf("Unexpected response code: %d (%s)", resp.StatusCode, buf.Bytes())
+	}
+	return d, resp, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/catalog.go b/provisioner/vendor/github.com/hashicorp/consul/api/catalog.go
new file mode 100644
index 0000000..96226f1
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/catalog.go
@@ -0,0 +1,194 @@
+package api
+
+type Node struct {
+	ID              string
+	Node            string
+	Address         string
+	TaggedAddresses map[string]string
+	Meta            map[string]string
+}
+
+type CatalogService struct {
+	ID                       string
+	Node                     string
+	Address                  string
+	TaggedAddresses          map[string]string
+	NodeMeta                 map[string]string
+	ServiceID                string
+	ServiceName              string
+	ServiceAddress           string
+	ServiceTags              []string
+	ServicePort              int
+	ServiceEnableTagOverride bool
+	CreateIndex              uint64
+	ModifyIndex              uint64
+}
+
+type CatalogNode struct {
+	Node     *Node
+	Services map[string]*AgentService
+}
+
+type CatalogRegistration struct {
+	ID              string
+	Node            string
+	Address         string
+	TaggedAddresses map[string]string
+	NodeMeta        map[string]string
+	Datacenter      string
+	Service         *AgentService
+	Check           *AgentCheck
+}
+
+type CatalogDeregistration struct {
+	Node       string
+	Address    string // Obsolete.
+	Datacenter string
+	ServiceID  string
+	CheckID    string
+}
+
+// Catalog can be used to query the Catalog endpoints
+type Catalog struct {
+	c *Client
+}
+
+// Catalog returns a handle to the catalog endpoints
+func (c *Client) Catalog() *Catalog {
+	return &Catalog{c}
+}
+
+func (c *Catalog) Register(reg *CatalogRegistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/register")
+	r.setWriteOptions(q)
+	r.obj = reg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+func (c *Catalog) Deregister(dereg *CatalogDeregistration, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("PUT", "/v1/catalog/deregister")
+	r.setWriteOptions(q)
+	r.obj = dereg
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	return wm, nil
+}
+
+// Datacenters is used to query for all the known datacenters
+func (c *Catalog) Datacenters() ([]string, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to query all the known nodes
+func (c *Catalog) Nodes(q *QueryOptions) ([]*Node, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*Node
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Services is used to query for all known services
+func (c *Catalog) Services(q *QueryOptions) (map[string][]string, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/services")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out map[string][]string
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query catalog entries for a given service
+func (c *Catalog) Service(service, tag string, q *QueryOptions) ([]*CatalogService, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CatalogService
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Node is used to query for service information about a single node
+func (c *Catalog) Node(node string, q *QueryOptions) (*CatalogNode, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/catalog/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out *CatalogNode
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/coordinate.go b/provisioner/vendor/github.com/hashicorp/consul/api/coordinate.go
new file mode 100644
index 0000000..fdff207
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/coordinate.go
@@ -0,0 +1,66 @@
+package api
+
+import (
+	"github.com/hashicorp/serf/coordinate"
+)
+
+// CoordinateEntry represents a node and its associated network coordinate.
+type CoordinateEntry struct {
+	Node  string
+	Coord *coordinate.Coordinate
+}
+
+// CoordinateDatacenterMap represents a datacenter and its associated WAN
+// nodes and their associates coordinates.
+type CoordinateDatacenterMap struct {
+	Datacenter  string
+	Coordinates []CoordinateEntry
+}
+
+// Coordinate can be used to query the coordinate endpoints
+type Coordinate struct {
+	c *Client
+}
+
+// Coordinate returns a handle to the coordinate endpoints
+func (c *Client) Coordinate() *Coordinate {
+	return &Coordinate{c}
+}
+
+// Datacenters is used to return the coordinates of all the servers in the WAN
+// pool.
+func (c *Coordinate) Datacenters() ([]*CoordinateDatacenterMap, error) {
+	r := c.c.newRequest("GET", "/v1/coordinate/datacenters")
+	_, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*CoordinateDatacenterMap
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Nodes is used to return the coordinates of all the nodes in the LAN pool.
+func (c *Coordinate) Nodes(q *QueryOptions) ([]*CoordinateEntry, *QueryMeta, error) {
+	r := c.c.newRequest("GET", "/v1/coordinate/nodes")
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*CoordinateEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/event.go b/provisioner/vendor/github.com/hashicorp/consul/api/event.go
new file mode 100644
index 0000000..85b5b06
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/event.go
@@ -0,0 +1,104 @@
+package api
+
+import (
+	"bytes"
+	"strconv"
+)
+
+// Event can be used to query the Event endpoints
+type Event struct {
+	c *Client
+}
+
+// UserEvent represents an event that was fired by the user
+type UserEvent struct {
+	ID            string
+	Name          string
+	Payload       []byte
+	NodeFilter    string
+	ServiceFilter string
+	TagFilter     string
+	Version       int
+	LTime         uint64
+}
+
+// Event returns a handle to the event endpoints
+func (c *Client) Event() *Event {
+	return &Event{c}
+}
+
+// Fire is used to fire a new user event. Only the Name, Payload and Filters
+// are respected. This returns the ID or an associated error. Cross DC requests
+// are supported.
+func (e *Event) Fire(params *UserEvent, q *WriteOptions) (string, *WriteMeta, error) {
+	r := e.c.newRequest("PUT", "/v1/event/fire/"+params.Name)
+	r.setWriteOptions(q)
+	if params.NodeFilter != "" {
+		r.params.Set("node", params.NodeFilter)
+	}
+	if params.ServiceFilter != "" {
+		r.params.Set("service", params.ServiceFilter)
+	}
+	if params.TagFilter != "" {
+		r.params.Set("tag", params.TagFilter)
+	}
+	if params.Payload != nil {
+		r.body = bytes.NewReader(params.Payload)
+	}
+
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+	var out UserEvent
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// List is used to get the most recent events an agent has received.
+// This list can be optionally filtered by the name. This endpoint supports
+// quasi-blocking queries. The index is not monotonic, nor does it provide provide
+// LastContact or KnownLeader.
+func (e *Event) List(name string, q *QueryOptions) ([]*UserEvent, *QueryMeta, error) {
+	r := e.c.newRequest("GET", "/v1/event/list")
+	r.setQueryOptions(q)
+	if name != "" {
+		r.params.Set("name", name)
+	}
+	rtt, resp, err := requireOK(e.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var entries []*UserEvent
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// IDToIndex is a bit of a hack. This simulates the index generation to
+// convert an event ID into a WaitIndex.
+func (e *Event) IDToIndex(uuid string) uint64 {
+	lower := uuid[0:8] + uuid[9:13] + uuid[14:18]
+	upper := uuid[19:23] + uuid[24:36]
+	lowVal, err := strconv.ParseUint(lower, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + lower)
+	}
+	highVal, err := strconv.ParseUint(upper, 16, 64)
+	if err != nil {
+		panic("Failed to convert " + upper)
+	}
+	return lowVal ^ highVal
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/health.go b/provisioner/vendor/github.com/hashicorp/consul/api/health.go
new file mode 100644
index 0000000..8abe239
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/health.go
@@ -0,0 +1,199 @@
+package api
+
+import (
+	"fmt"
+	"strings"
+)
+
+const (
+	// HealthAny is special, and is used as a wild card,
+	// not as a specific state.
+	HealthAny      = "any"
+	HealthPassing  = "passing"
+	HealthWarning  = "warning"
+	HealthCritical = "critical"
+	HealthMaint    = "maintenance"
+)
+
+const (
+	// NodeMaint is the special key set by a node in maintenance mode.
+	NodeMaint = "_node_maintenance"
+
+	// ServiceMaintPrefix is the prefix for a service in maintenance mode.
+	ServiceMaintPrefix = "_service_maintenance:"
+)
+
+// HealthCheck is used to represent a single check
+type HealthCheck struct {
+	Node        string
+	CheckID     string
+	Name        string
+	Status      string
+	Notes       string
+	Output      string
+	ServiceID   string
+	ServiceName string
+}
+
+// HealthChecks is a collection of HealthCheck structs.
+type HealthChecks []*HealthCheck
+
+// AggregatedStatus returns the "best" status for the list of health checks.
+// Because a given entry may have many service and node-level health checks
+// attached, this function determines the best representative of the status as
+// as single string using the following heuristic:
+//
+//  maintenance > critical > warning > passing
+//
+func (c HealthChecks) AggregatedStatus() string {
+	var passing, warning, critical, maintenance bool
+	for _, check := range c {
+		id := string(check.CheckID)
+		if id == NodeMaint || strings.HasPrefix(id, ServiceMaintPrefix) {
+			maintenance = true
+			continue
+		}
+
+		switch check.Status {
+		case HealthPassing:
+			passing = true
+		case HealthWarning:
+			warning = true
+		case HealthCritical:
+			critical = true
+		default:
+			return ""
+		}
+	}
+
+	switch {
+	case maintenance:
+		return HealthMaint
+	case critical:
+		return HealthCritical
+	case warning:
+		return HealthWarning
+	case passing:
+		return HealthPassing
+	default:
+		return HealthPassing
+	}
+}
+
+// ServiceEntry is used for the health service endpoint
+type ServiceEntry struct {
+	Node    *Node
+	Service *AgentService
+	Checks  HealthChecks
+}
+
+// Health can be used to query the Health endpoints
+type Health struct {
+	c *Client
+}
+
+// Health returns a handle to the health endpoints
+func (c *Client) Health() *Health {
+	return &Health{c}
+}
+
+// Node is used to query for checks belonging to a given node
+func (h *Health) Node(node string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/node/"+node)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Checks is used to return the checks associated with a service
+func (h *Health) Checks(service string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/checks/"+service)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Service is used to query health information along with service info
+// for a given service. It can optionally do server-side filtering on a tag
+// or nodes with passing health checks only.
+func (h *Health) Service(service, tag string, passingOnly bool, q *QueryOptions) ([]*ServiceEntry, *QueryMeta, error) {
+	r := h.c.newRequest("GET", "/v1/health/service/"+service)
+	r.setQueryOptions(q)
+	if tag != "" {
+		r.params.Set("tag", tag)
+	}
+	if passingOnly {
+		r.params.Set(HealthPassing, "1")
+	}
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out []*ServiceEntry
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// State is used to retrieve all the checks in a given state.
+// The wildcard "any" state can also be used for all checks.
+func (h *Health) State(state string, q *QueryOptions) (HealthChecks, *QueryMeta, error) {
+	switch state {
+	case HealthAny:
+	case HealthWarning:
+	case HealthCritical:
+	case HealthPassing:
+	default:
+		return nil, nil, fmt.Errorf("Unsupported state: %v", state)
+	}
+	r := h.c.newRequest("GET", "/v1/health/state/"+state)
+	r.setQueryOptions(q)
+	rtt, resp, err := requireOK(h.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	var out HealthChecks
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/kv.go b/provisioner/vendor/github.com/hashicorp/consul/api/kv.go
new file mode 100644
index 0000000..44e06bb
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/kv.go
@@ -0,0 +1,419 @@
+package api
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"net/http"
+	"strconv"
+	"strings"
+)
+
+// KVPair is used to represent a single K/V entry
+type KVPair struct {
+	// Key is the name of the key. It is also part of the URL path when accessed
+	// via the API.
+	Key string
+
+	// CreateIndex holds the index corresponding the creation of this KVPair. This
+	// is a read-only field.
+	CreateIndex uint64
+
+	// ModifyIndex is used for the Check-And-Set operations and can also be fed
+	// back into the WaitIndex of the QueryOptions in order to perform blocking
+	// queries.
+	ModifyIndex uint64
+
+	// LockIndex holds the index corresponding to a lock on this key, if any. This
+	// is a read-only field.
+	LockIndex uint64
+
+	// Flags are any user-defined flags on the key. It is up to the implementer
+	// to check these values, since Consul does not treat them specially.
+	Flags uint64
+
+	// Value is the value for the key. This can be any value, but it will be
+	// base64 encoded upon transport.
+	Value []byte
+
+	// Session is a string representing the ID of the session. Any other
+	// interactions with this key over the same session must specify the same
+	// session ID.
+	Session string
+}
+
+// KVPairs is a list of KVPair objects
+type KVPairs []*KVPair
+
+// KVOp constants give possible operations available in a KVTxn.
+type KVOp string
+
+const (
+	KVSet          KVOp = "set"
+	KVDelete       KVOp = "delete"
+	KVDeleteCAS    KVOp = "delete-cas"
+	KVDeleteTree   KVOp = "delete-tree"
+	KVCAS          KVOp = "cas"
+	KVLock         KVOp = "lock"
+	KVUnlock       KVOp = "unlock"
+	KVGet          KVOp = "get"
+	KVGetTree      KVOp = "get-tree"
+	KVCheckSession KVOp = "check-session"
+	KVCheckIndex   KVOp = "check-index"
+)
+
+// KVTxnOp defines a single operation inside a transaction.
+type KVTxnOp struct {
+	Verb    KVOp
+	Key     string
+	Value   []byte
+	Flags   uint64
+	Index   uint64
+	Session string
+}
+
+// KVTxnOps defines a set of operations to be performed inside a single
+// transaction.
+type KVTxnOps []*KVTxnOp
+
+// KVTxnResponse has the outcome of a transaction.
+type KVTxnResponse struct {
+	Results []*KVPair
+	Errors  TxnErrors
+}
+
+// KV is used to manipulate the K/V API
+type KV struct {
+	c *Client
+}
+
+// KV is used to return a handle to the K/V apis
+func (c *Client) KV() *KV {
+	return &KV{c}
+}
+
+// Get is used to lookup a single key. The returned pointer
+// to the KVPair will be nil if the key does not exist.
+func (k *KV) Get(key string, q *QueryOptions) (*KVPair, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(key, nil, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List is used to lookup all keys under a prefix
+func (k *KV) List(prefix string, q *QueryOptions) (KVPairs, *QueryMeta, error) {
+	resp, qm, err := k.getInternal(prefix, map[string]string{"recurse": ""}, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []*KVPair
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// Keys is used to list all the keys under a prefix. Optionally,
+// a separator can be used to limit the responses.
+func (k *KV) Keys(prefix, separator string, q *QueryOptions) ([]string, *QueryMeta, error) {
+	params := map[string]string{"keys": ""}
+	if separator != "" {
+		params["separator"] = separator
+	}
+	resp, qm, err := k.getInternal(prefix, params, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if resp == nil {
+		return nil, qm, nil
+	}
+	defer resp.Body.Close()
+
+	var entries []string
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+func (k *KV) getInternal(key string, params map[string]string, q *QueryOptions) (*http.Response, *QueryMeta, error) {
+	r := k.c.newRequest("GET", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+	r.setQueryOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := k.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == 404 {
+		resp.Body.Close()
+		return nil, qm, nil
+	} else if resp.StatusCode != 200 {
+		resp.Body.Close()
+		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+	}
+	return resp, qm, nil
+}
+
+// Put is used to write a new value. Only the
+// Key, Flags and Value is respected.
+func (k *KV) Put(p *KVPair, q *WriteOptions) (*WriteMeta, error) {
+	params := make(map[string]string, 1)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	_, wm, err := k.put(p.Key, params, p.Value, q)
+	return wm, err
+}
+
+// CAS is used for a Check-And-Set operation. The Key,
+// ModifyIndex, Flags and Value are respected. Returns true
+// on success or false on failures.
+func (k *KV) CAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["cas"] = strconv.FormatUint(p.ModifyIndex, 10)
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Acquire is used for a lock acquisition operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Acquire(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["acquire"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+// Release is used for a lock release operation. The Key,
+// Flags, Value and Session are respected. Returns true
+// on success or false on failures.
+func (k *KV) Release(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := make(map[string]string, 2)
+	if p.Flags != 0 {
+		params["flags"] = strconv.FormatUint(p.Flags, 10)
+	}
+	params["release"] = p.Session
+	return k.put(p.Key, params, p.Value, q)
+}
+
+func (k *KV) put(key string, params map[string]string, body []byte, q *WriteOptions) (bool, *WriteMeta, error) {
+	if len(key) > 0 && key[0] == '/' {
+		return false, nil, fmt.Errorf("Invalid key. Key must not begin with a '/': %s", key)
+	}
+
+	r := k.c.newRequest("PUT", "/v1/kv/"+key)
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	r.body = bytes.NewReader(body)
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(string(buf.Bytes()), "true")
+	return res, qm, nil
+}
+
+// Delete is used to delete a single key
+func (k *KV) Delete(key string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(key, nil, w)
+	return qm, err
+}
+
+// DeleteCAS is used for a Delete Check-And-Set operation. The Key
+// and ModifyIndex are respected. Returns true on success or false on failures.
+func (k *KV) DeleteCAS(p *KVPair, q *WriteOptions) (bool, *WriteMeta, error) {
+	params := map[string]string{
+		"cas": strconv.FormatUint(p.ModifyIndex, 10),
+	}
+	return k.deleteInternal(p.Key, params, q)
+}
+
+// DeleteTree is used to delete all keys under a prefix
+func (k *KV) DeleteTree(prefix string, w *WriteOptions) (*WriteMeta, error) {
+	_, qm, err := k.deleteInternal(prefix, map[string]string{"recurse": ""}, w)
+	return qm, err
+}
+
+func (k *KV) deleteInternal(key string, params map[string]string, q *WriteOptions) (bool, *WriteMeta, error) {
+	r := k.c.newRequest("DELETE", "/v1/kv/"+strings.TrimPrefix(key, "/"))
+	r.setWriteOptions(q)
+	for param, val := range params {
+		r.params.Set(param, val)
+	}
+	rtt, resp, err := requireOK(k.c.doRequest(r))
+	if err != nil {
+		return false, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &WriteMeta{}
+	qm.RequestTime = rtt
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	res := strings.Contains(string(buf.Bytes()), "true")
+	return res, qm, nil
+}
+
+// TxnOp is the internal format we send to Consul. It's not specific to KV,
+// though currently only KV operations are supported.
+type TxnOp struct {
+	KV *KVTxnOp
+}
+
+// TxnOps is a list of transaction operations.
+type TxnOps []*TxnOp
+
+// TxnResult is the internal format we receive from Consul.
+type TxnResult struct {
+	KV *KVPair
+}
+
+// TxnResults is a list of TxnResult objects.
+type TxnResults []*TxnResult
+
+// TxnError is used to return information about an operation in a transaction.
+type TxnError struct {
+	OpIndex int
+	What    string
+}
+
+// TxnErrors is a list of TxnError objects.
+type TxnErrors []*TxnError
+
+// TxnResponse is the internal format we receive from Consul.
+type TxnResponse struct {
+	Results TxnResults
+	Errors  TxnErrors
+}
+
+// Txn is used to apply multiple KV operations in a single, atomic transaction.
+//
+// Note that Go will perform the required base64 encoding on the values
+// automatically because the type is a byte slice. Transactions are defined as a
+// list of operations to perform, using the KVOp constants and KVTxnOp structure
+// to define operations. If any operation fails, none of the changes are applied
+// to the state store. Note that this hides the internal raw transaction interface
+// and munges the input and output types into KV-specific ones for ease of use.
+// If there are more non-KV operations in the future we may break out a new
+// transaction API client, but it will be easy to keep this KV-specific variant
+// supported.
+//
+// Even though this is generally a write operation, we take a QueryOptions input
+// and return a QueryMeta output. If the transaction contains only read ops, then
+// Consul will fast-path it to a different endpoint internally which supports
+// consistency controls, but not blocking. If there are write operations then
+// the request will always be routed through raft and any consistency settings
+// will be ignored.
+//
+// Here's an example:
+//
+// ops := KVTxnOps{
+//     &KVTxnOp{
+//         Verb:    KVLock,
+//         Key:     "test/lock",
+//         Session: "adf4238a-882b-9ddc-4a9d-5b6758e4159e",
+//         Value:   []byte("hello"),
+//     },
+//     &KVTxnOp{
+//         Verb:    KVGet,
+//         Key:     "another/key",
+//     },
+// }
+// ok, response, _, err := kv.Txn(&ops, nil)
+//
+// If there is a problem making the transaction request then an error will be
+// returned. Otherwise, the ok value will be true if the transaction succeeded
+// or false if it was rolled back. The response is a structured return value which
+// will have the outcome of the transaction. Its Results member will have entries
+// for each operation. Deleted keys will have a nil entry in the, and to save
+// space, the Value of each key in the Results will be nil unless the operation
+// is a KVGet. If the transaction was rolled back, the Errors member will have
+// entries referencing the index of the operation that failed along with an error
+// message.
+func (k *KV) Txn(txn KVTxnOps, q *QueryOptions) (bool, *KVTxnResponse, *QueryMeta, error) {
+	r := k.c.newRequest("PUT", "/v1/txn")
+	r.setQueryOptions(q)
+
+	// Convert into the internal format since this is an all-KV txn.
+	ops := make(TxnOps, 0, len(txn))
+	for _, kvOp := range txn {
+		ops = append(ops, &TxnOp{KV: kvOp})
+	}
+	r.obj = ops
+	rtt, resp, err := k.c.doRequest(r)
+	if err != nil {
+		return false, nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+
+	if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusConflict {
+		var txnResp TxnResponse
+		if err := decodeBody(resp, &txnResp); err != nil {
+			return false, nil, nil, err
+		}
+
+		// Convert from the internal format.
+		kvResp := KVTxnResponse{
+			Errors: txnResp.Errors,
+		}
+		for _, result := range txnResp.Results {
+			kvResp.Results = append(kvResp.Results, result.KV)
+		}
+		return resp.StatusCode == http.StatusOK, &kvResp, qm, nil
+	}
+
+	var buf bytes.Buffer
+	if _, err := io.Copy(&buf, resp.Body); err != nil {
+		return false, nil, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	return false, nil, nil, fmt.Errorf("Failed request: %s", buf.String())
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/lock.go b/provisioner/vendor/github.com/hashicorp/consul/api/lock.go
new file mode 100644
index 0000000..9f9845a
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/lock.go
@@ -0,0 +1,384 @@
+package api
+
+import (
+	"fmt"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultLockSessionName is the Session Name we assign if none is provided
+	DefaultLockSessionName = "Consul API Lock"
+
+	// DefaultLockSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Lock. This is used because we do not have another
+	// other check to depend upon.
+	DefaultLockSessionTTL = "15s"
+
+	// DefaultLockWaitTime is how long we block for at a time to check if lock
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Lock acquisition.
+	DefaultLockWaitTime = 15 * time.Second
+
+	// DefaultLockRetryTime is how long we wait after a failed lock acquisition
+	// before attempting to do the lock again. This is so that once a lock-delay
+	// is in effect, we do not hot loop retrying the acquisition.
+	DefaultLockRetryTime = 5 * time.Second
+
+	// DefaultMonitorRetryTime is how long we wait after a failed monitor check
+	// of a lock (500 response code). This allows the monitor to ride out brief
+	// periods of unavailability, subject to the MonitorRetries setting in the
+	// lock options which is by default set to 0, disabling this feature. This
+	// affects locks and semaphores.
+	DefaultMonitorRetryTime = 2 * time.Second
+
+	// LockFlagValue is a magic flag we set to indicate a key
+	// is being used for a lock. It is used to detect a potential
+	// conflict with a semaphore.
+	LockFlagValue = 0x2ddccbc058a50c18
+)
+
+var (
+	// ErrLockHeld is returned if we attempt to double lock
+	ErrLockHeld = fmt.Errorf("Lock already held")
+
+	// ErrLockNotHeld is returned if we attempt to unlock a lock
+	// that we do not hold.
+	ErrLockNotHeld = fmt.Errorf("Lock not held")
+
+	// ErrLockInUse is returned if we attempt to destroy a lock
+	// that is in use.
+	ErrLockInUse = fmt.Errorf("Lock in use")
+
+	// ErrLockConflict is returned if the flags on a key
+	// used for a lock do not match expectation
+	ErrLockConflict = fmt.Errorf("Existing key does not match lock use")
+)
+
+// Lock is used to implement client-side leader election. It is follows the
+// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html.
+type Lock struct {
+	c    *Client
+	opts *LockOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// LockOptions is used to parameterize the Lock behavior.
+type LockOptions struct {
+	Key              string        // Must be set and have write permissions
+	Value            []byte        // Optional, value to associate with the lock
+	Session          string        // Optional, created if not specified
+	SessionOpts      *SessionEntry // Optional, options to use when creating a session
+	SessionName      string        // Optional, defaults to DefaultLockSessionName (ignored if SessionOpts is given)
+	SessionTTL       string        // Optional, defaults to DefaultLockSessionTTL (ignored if SessionOpts is given)
+	MonitorRetries   int           // Optional, defaults to 0 which means no retries
+	MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
+	LockWaitTime     time.Duration // Optional, defaults to DefaultLockWaitTime
+	LockTryOnce      bool          // Optional, defaults to false which means try forever
+}
+
+// LockKey returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockKey(key string) (*Lock, error) {
+	opts := &LockOptions{
+		Key: key,
+	}
+	return c.LockOpts(opts)
+}
+
+// LockOpts returns a handle to a lock struct which can be used
+// to acquire and release the mutex. The key used must have
+// write permissions.
+func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
+	if opts.Key == "" {
+		return nil, fmt.Errorf("missing key")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultLockSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultLockSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	if opts.MonitorRetryTime == 0 {
+		opts.MonitorRetryTime = DefaultMonitorRetryTime
+	}
+	if opts.LockWaitTime == 0 {
+		opts.LockWaitTime = DefaultLockWaitTime
+	}
+	l := &Lock{
+		c:    c,
+		opts: opts,
+	}
+	return l, nil
+}
+
+// Lock attempts to acquire the lock and blocks while doing so.
+// Providing a non-nil stopCh can be used to abort the lock attempt.
+// Returns a channel that is closed if our lock is lost or an error.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the lock is held until Unlock() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the lock being lost.
+func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return nil, ErrLockHeld
+	}
+
+	// Check if we need to create a session first
+	l.lockSession = l.opts.Session
+	if l.lockSession == "" {
+		if s, err := l.createSession(); err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		} else {
+			l.sessionRenew = make(chan struct{})
+			l.lockSession = s
+			session := l.c.Session()
+			go session.RenewPeriodic(l.opts.SessionTTL, s, nil, l.sessionRenew)
+
+			// If we fail to acquire the lock, cleanup the session
+			defer func() {
+				if !l.isHeld {
+					close(l.sessionRenew)
+					l.sessionRenew = nil
+				}
+			}()
+		}
+	}
+
+	// Setup the query options
+	kv := l.c.KV()
+	qOpts := &QueryOptions{
+		WaitTime: l.opts.LockWaitTime,
+	}
+
+	start := time.Now()
+	attempts := 0
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Handle the one-shot mode.
+	if l.opts.LockTryOnce && attempts > 0 {
+		elapsed := time.Now().Sub(start)
+		if elapsed > qOpts.WaitTime {
+			return nil, nil
+		}
+
+		qOpts.WaitTime -= elapsed
+	}
+	attempts++
+
+	// Look for an existing lock, blocking until not taken
+	pair, meta, err := kv.Get(l.opts.Key, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read lock: %v", err)
+	}
+	if pair != nil && pair.Flags != LockFlagValue {
+		return nil, ErrLockConflict
+	}
+	locked := false
+	if pair != nil && pair.Session == l.lockSession {
+		goto HELD
+	}
+	if pair != nil && pair.Session != "" {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Try to acquire the lock
+	pair = l.lockEntry(l.lockSession)
+	locked, _, err = kv.Acquire(pair, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to acquire lock: %v", err)
+	}
+
+	// Handle the case of not getting the lock
+	if !locked {
+		// Determine why the lock failed
+		qOpts.WaitIndex = 0
+		pair, meta, err = kv.Get(l.opts.Key, qOpts)
+		if pair != nil && pair.Session != "" {
+			//If the session is not null, this means that a wait can safely happen
+			//using a long poll
+			qOpts.WaitIndex = meta.LastIndex
+			goto WAIT
+		} else {
+			// If the session is empty and the lock failed to acquire, then it means
+			// a lock-delay is in effect and a timed wait must be used
+			select {
+			case <-time.After(DefaultLockRetryTime):
+				goto WAIT
+			case <-stopCh:
+				return nil, nil
+			}
+		}
+	}
+
+HELD:
+	// Watch to ensure we maintain leadership
+	leaderCh := make(chan struct{})
+	go l.monitorLock(l.lockSession, leaderCh)
+
+	// Set that we own the lock
+	l.isHeld = true
+
+	// Locked! All done
+	return leaderCh, nil
+}
+
+// Unlock released the lock. It is an error to call this
+// if the lock is not currently held.
+func (l *Lock) Unlock() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !l.isHeld {
+		return ErrLockNotHeld
+	}
+
+	// Set that we no longer own the lock
+	l.isHeld = false
+
+	// Stop the session renew
+	if l.sessionRenew != nil {
+		defer func() {
+			close(l.sessionRenew)
+			l.sessionRenew = nil
+		}()
+	}
+
+	// Get the lock entry, and clear the lock session
+	lockEnt := l.lockEntry(l.lockSession)
+	l.lockSession = ""
+
+	// Release the lock explicitly
+	kv := l.c.KV()
+	_, _, err := kv.Release(lockEnt, nil)
+	if err != nil {
+		return fmt.Errorf("failed to release lock: %v", err)
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the lock entry. It is not necessary
+// to invoke. It will fail if the lock is in use.
+func (l *Lock) Destroy() error {
+	// Hold the lock as we try to release
+	l.l.Lock()
+	defer l.l.Unlock()
+
+	// Check if we already hold the lock
+	if l.isHeld {
+		return ErrLockHeld
+	}
+
+	// Look for an existing lock
+	kv := l.c.KV()
+	pair, _, err := kv.Get(l.opts.Key, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read lock: %v", err)
+	}
+
+	// Nothing to do if the lock does not exist
+	if pair == nil {
+		return nil
+	}
+
+	// Check for possible flag conflict
+	if pair.Flags != LockFlagValue {
+		return ErrLockConflict
+	}
+
+	// Check if it is in use
+	if pair.Session != "" {
+		return ErrLockInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(pair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove lock: %v", err)
+	}
+	if !didRemove {
+		return ErrLockInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (l *Lock) createSession() (string, error) {
+	session := l.c.Session()
+	se := l.opts.SessionOpts
+	if se == nil {
+		se = &SessionEntry{
+			Name: l.opts.SessionName,
+			TTL:  l.opts.SessionTTL,
+		}
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// lockEntry returns a formatted KVPair for the lock
+func (l *Lock) lockEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     l.opts.Key,
+		Value:   l.opts.Value,
+		Session: session,
+		Flags:   LockFlagValue,
+	}
+}
+
+// monitorLock is a long running routine to monitor a lock ownership
+// It closes the stopCh if we lose our leadership.
+func (l *Lock) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := l.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	retries := l.opts.MonitorRetries
+RETRY:
+	pair, meta, err := kv.Get(l.opts.Key, opts)
+	if err != nil {
+		// If configured we can try to ride out a brief Consul unavailability
+		// by doing retries. Note that we have to attempt the retry in a non-
+		// blocking fashion so that we have a clean place to reset the retry
+		// counter if service is restored.
+		if retries > 0 && IsServerError(err) {
+			time.Sleep(l.opts.MonitorRetryTime)
+			retries--
+			opts.WaitIndex = 0
+			goto RETRY
+		}
+		return
+	}
+	if pair != nil && pair.Session == session {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/operator.go b/provisioner/vendor/github.com/hashicorp/consul/api/operator.go
new file mode 100644
index 0000000..a8d04a3
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/operator.go
@@ -0,0 +1,163 @@
+package api
+
+// Operator can be used to perform low-level operator tasks for Consul.
+type Operator struct {
+	c *Client
+}
+
+// Operator returns a handle to the operator endpoints.
+func (c *Client) Operator() *Operator {
+	return &Operator{c}
+}
+
+// RaftServer has information about a server in the Raft configuration.
+type RaftServer struct {
+	// ID is the unique ID for the server. These are currently the same
+	// as the address, but they will be changed to a real GUID in a future
+	// release of Consul.
+	ID string
+
+	// Node is the node name of the server, as known by Consul, or this
+	// will be set to "(unknown)" otherwise.
+	Node string
+
+	// Address is the IP:port of the server, used for Raft communications.
+	Address string
+
+	// Leader is true if this server is the current cluster leader.
+	Leader bool
+
+	// Voter is true if this server has a vote in the cluster. This might
+	// be false if the server is staging and still coming online, or if
+	// it's a non-voting server, which will be added in a future release of
+	// Consul.
+	Voter bool
+}
+
+// RaftConfigration is returned when querying for the current Raft configuration.
+type RaftConfiguration struct {
+	// Servers has the list of servers in the Raft configuration.
+	Servers []*RaftServer
+
+	// Index has the Raft index of this configuration.
+	Index uint64
+}
+
+// keyringRequest is used for performing Keyring operations
+type keyringRequest struct {
+	Key string
+}
+
+// KeyringResponse is returned when listing the gossip encryption keys
+type KeyringResponse struct {
+	// Whether this response is for a WAN ring
+	WAN bool
+
+	// The datacenter name this request corresponds to
+	Datacenter string
+
+	// A map of the encryption keys to the number of nodes they're installed on
+	Keys map[string]int
+
+	// The total number of nodes in this ring
+	NumNodes int
+}
+
+// RaftGetConfiguration is used to query the current Raft peer set.
+func (op *Operator) RaftGetConfiguration(q *QueryOptions) (*RaftConfiguration, error) {
+	r := op.c.newRequest("GET", "/v1/operator/raft/configuration")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out RaftConfiguration
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return &out, nil
+}
+
+// RaftRemovePeerByAddress is used to kick a stale peer (one that it in the Raft
+// quorum but no longer known to Serf or the catalog) by address in the form of
+// "IP:port".
+func (op *Operator) RaftRemovePeerByAddress(address string, q *WriteOptions) error {
+	r := op.c.newRequest("DELETE", "/v1/operator/raft/peer")
+	r.setWriteOptions(q)
+
+	// TODO (slackpad) Currently we made address a query parameter. Once
+	// IDs are in place this will be DELETE /v1/operator/raft/peer/<id>.
+	r.params.Set("address", string(address))
+
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+
+	resp.Body.Close()
+	return nil
+}
+
+// KeyringInstall is used to install a new gossip encryption key into the cluster
+func (op *Operator) KeyringInstall(key string, q *WriteOptions) error {
+	r := op.c.newRequest("POST", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// KeyringList is used to list the gossip keys installed in the cluster
+func (op *Operator) KeyringList(q *QueryOptions) ([]*KeyringResponse, error) {
+	r := op.c.newRequest("GET", "/v1/operator/keyring")
+	r.setQueryOptions(q)
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var out []*KeyringResponse
+	if err := decodeBody(resp, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// KeyringRemove is used to remove a gossip encryption key from the cluster
+func (op *Operator) KeyringRemove(key string, q *WriteOptions) error {
+	r := op.c.newRequest("DELETE", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
+
+// KeyringUse is used to change the active gossip encryption key
+func (op *Operator) KeyringUse(key string, q *WriteOptions) error {
+	r := op.c.newRequest("PUT", "/v1/operator/keyring")
+	r.setWriteOptions(q)
+	r.obj = keyringRequest{
+		Key: key,
+	}
+	_, resp, err := requireOK(op.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	resp.Body.Close()
+	return nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/prepared_query.go b/provisioner/vendor/github.com/hashicorp/consul/api/prepared_query.go
new file mode 100644
index 0000000..876e2e3
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/prepared_query.go
@@ -0,0 +1,193 @@
+package api
+
+// QueryDatacenterOptions sets options about how we fail over if there are no
+// healthy nodes in the local datacenter.
+type QueryDatacenterOptions struct {
+	// NearestN is set to the number of remote datacenters to try, based on
+	// network coordinates.
+	NearestN int
+
+	// Datacenters is a fixed list of datacenters to try after NearestN. We
+	// never try a datacenter multiple times, so those are subtracted from
+	// this list before proceeding.
+	Datacenters []string
+}
+
+// QueryDNSOptions controls settings when query results are served over DNS.
+type QueryDNSOptions struct {
+	// TTL is the time to live for the served DNS results.
+	TTL string
+}
+
+// ServiceQuery is used to query for a set of healthy nodes offering a specific
+// service.
+type ServiceQuery struct {
+	// Service is the service to query.
+	Service string
+
+	// Near allows baking in the name of a node to automatically distance-
+	// sort from. The magic "_agent" value is supported, which sorts near
+	// the agent which initiated the request by default.
+	Near string
+
+	// Failover controls what we do if there are no healthy nodes in the
+	// local datacenter.
+	Failover QueryDatacenterOptions
+
+	// If OnlyPassing is true then we will only include nodes with passing
+	// health checks (critical AND warning checks will cause a node to be
+	// discarded)
+	OnlyPassing bool
+
+	// Tags are a set of required and/or disallowed tags. If a tag is in
+	// this list it must be present. If the tag is preceded with "!" then
+	// it is disallowed.
+	Tags []string
+}
+
+// QueryTemplate carries the arguments for creating a templated query.
+type QueryTemplate struct {
+	// Type specifies the type of the query template. Currently only
+	// "name_prefix_match" is supported. This field is required.
+	Type string
+
+	// Regexp allows specifying a regex pattern to match against the name
+	// of the query being executed.
+	Regexp string
+}
+
+// PrepatedQueryDefinition defines a complete prepared query.
+type PreparedQueryDefinition struct {
+	// ID is this UUID-based ID for the query, always generated by Consul.
+	ID string
+
+	// Name is an optional friendly name for the query supplied by the
+	// user. NOTE - if this feature is used then it will reduce the security
+	// of any read ACL associated with this query/service since this name
+	// can be used to locate nodes with supplying any ACL.
+	Name string
+
+	// Session is an optional session to tie this query's lifetime to. If
+	// this is omitted then the query will not expire.
+	Session string
+
+	// Token is the ACL token used when the query was created, and it is
+	// used when a query is subsequently executed. This token, or a token
+	// with management privileges, must be used to change the query later.
+	Token string
+
+	// Service defines a service query (leaving things open for other types
+	// later).
+	Service ServiceQuery
+
+	// DNS has options that control how the results of this query are
+	// served over DNS.
+	DNS QueryDNSOptions
+
+	// Template is used to pass through the arguments for creating a
+	// prepared query with an attached template. If a template is given,
+	// interpolations are possible in other struct fields.
+	Template QueryTemplate
+}
+
+// PreparedQueryExecuteResponse has the results of executing a query.
+type PreparedQueryExecuteResponse struct {
+	// Service is the service that was queried.
+	Service string
+
+	// Nodes has the nodes that were output by the query.
+	Nodes []ServiceEntry
+
+	// DNS has the options for serving these results over DNS.
+	DNS QueryDNSOptions
+
+	// Datacenter is the datacenter that these results came from.
+	Datacenter string
+
+	// Failovers is a count of how many times we had to query a remote
+	// datacenter.
+	Failovers int
+}
+
+// PreparedQuery can be used to query the prepared query endpoints.
+type PreparedQuery struct {
+	c *Client
+}
+
+// PreparedQuery returns a handle to the prepared query endpoints.
+func (c *Client) PreparedQuery() *PreparedQuery {
+	return &PreparedQuery{c}
+}
+
+// Create makes a new prepared query. The ID of the new query is returned.
+func (c *PreparedQuery) Create(query *PreparedQueryDefinition, q *WriteOptions) (string, *WriteMeta, error) {
+	r := c.c.newRequest("POST", "/v1/query")
+	r.setWriteOptions(q)
+	r.obj = query
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return "", nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+
+	var out struct{ ID string }
+	if err := decodeBody(resp, &out); err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Update makes updates to an existing prepared query.
+func (c *PreparedQuery) Update(query *PreparedQueryDefinition, q *WriteOptions) (*WriteMeta, error) {
+	return c.c.write("/v1/query/"+query.ID, query, nil, q)
+}
+
+// List is used to fetch all the prepared queries (always requires a management
+// token).
+func (c *PreparedQuery) List(q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+	var out []*PreparedQueryDefinition
+	qm, err := c.c.query("/v1/query", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Get is used to fetch a specific prepared query.
+func (c *PreparedQuery) Get(queryID string, q *QueryOptions) ([]*PreparedQueryDefinition, *QueryMeta, error) {
+	var out []*PreparedQueryDefinition
+	qm, err := c.c.query("/v1/query/"+queryID, &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
+
+// Delete is used to delete a specific prepared query.
+func (c *PreparedQuery) Delete(queryID string, q *WriteOptions) (*WriteMeta, error) {
+	r := c.c.newRequest("DELETE", "/v1/query/"+queryID)
+	r.setWriteOptions(q)
+	rtt, resp, err := requireOK(c.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{}
+	wm.RequestTime = rtt
+	return wm, nil
+}
+
+// Execute is used to execute a specific prepared query. You can execute using
+// a query ID or name.
+func (c *PreparedQuery) Execute(queryIDOrName string, q *QueryOptions) (*PreparedQueryExecuteResponse, *QueryMeta, error) {
+	var out *PreparedQueryExecuteResponse
+	qm, err := c.c.query("/v1/query/"+queryIDOrName+"/execute", &out, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return out, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/raw.go b/provisioner/vendor/github.com/hashicorp/consul/api/raw.go
new file mode 100644
index 0000000..745a208
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/raw.go
@@ -0,0 +1,24 @@
+package api
+
+// Raw can be used to do raw queries against custom endpoints
+type Raw struct {
+	c *Client
+}
+
+// Raw returns a handle to query endpoints
+func (c *Client) Raw() *Raw {
+	return &Raw{c}
+}
+
+// Query is used to do a GET request against an endpoint
+// and deserialize the response into an interface using
+// standard Consul conventions.
+func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*QueryMeta, error) {
+	return raw.c.query(endpoint, out, q)
+}
+
+// Write is used to do a PUT request against an endpoint
+// and serialize/deserialized using the standard Consul conventions.
+func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {
+	return raw.c.write(endpoint, in, out, q)
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/semaphore.go b/provisioner/vendor/github.com/hashicorp/consul/api/semaphore.go
new file mode 100644
index 0000000..e6645ac
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/semaphore.go
@@ -0,0 +1,512 @@
+package api
+
+import (
+	"encoding/json"
+	"fmt"
+	"path"
+	"sync"
+	"time"
+)
+
+const (
+	// DefaultSemaphoreSessionName is the Session Name we assign if none is provided
+	DefaultSemaphoreSessionName = "Consul API Semaphore"
+
+	// DefaultSemaphoreSessionTTL is the default session TTL if no Session is provided
+	// when creating a new Semaphore. This is used because we do not have another
+	// other check to depend upon.
+	DefaultSemaphoreSessionTTL = "15s"
+
+	// DefaultSemaphoreWaitTime is how long we block for at a time to check if semaphore
+	// acquisition is possible. This affects the minimum time it takes to cancel
+	// a Semaphore acquisition.
+	DefaultSemaphoreWaitTime = 15 * time.Second
+
+	// DefaultSemaphoreKey is the key used within the prefix to
+	// use for coordination between all the contenders.
+	DefaultSemaphoreKey = ".lock"
+
+	// SemaphoreFlagValue is a magic flag we set to indicate a key
+	// is being used for a semaphore. It is used to detect a potential
+	// conflict with a lock.
+	SemaphoreFlagValue = 0xe0f69a2baa414de0
+)
+
+var (
+	// ErrSemaphoreHeld is returned if we attempt to double lock
+	ErrSemaphoreHeld = fmt.Errorf("Semaphore already held")
+
+	// ErrSemaphoreNotHeld is returned if we attempt to unlock a semaphore
+	// that we do not hold.
+	ErrSemaphoreNotHeld = fmt.Errorf("Semaphore not held")
+
+	// ErrSemaphoreInUse is returned if we attempt to destroy a semaphore
+	// that is in use.
+	ErrSemaphoreInUse = fmt.Errorf("Semaphore in use")
+
+	// ErrSemaphoreConflict is returned if the flags on a key
+	// used for a semaphore do not match expectation
+	ErrSemaphoreConflict = fmt.Errorf("Existing key does not match semaphore use")
+)
+
+// Semaphore is used to implement a distributed semaphore
+// using the Consul KV primitives.
+type Semaphore struct {
+	c    *Client
+	opts *SemaphoreOptions
+
+	isHeld       bool
+	sessionRenew chan struct{}
+	lockSession  string
+	l            sync.Mutex
+}
+
+// SemaphoreOptions is used to parameterize the Semaphore
+type SemaphoreOptions struct {
+	Prefix            string        // Must be set and have write permissions
+	Limit             int           // Must be set, and be positive
+	Value             []byte        // Optional, value to associate with the contender entry
+	Session           string        // Optional, created if not specified
+	SessionName       string        // Optional, defaults to DefaultLockSessionName
+	SessionTTL        string        // Optional, defaults to DefaultLockSessionTTL
+	MonitorRetries    int           // Optional, defaults to 0 which means no retries
+	MonitorRetryTime  time.Duration // Optional, defaults to DefaultMonitorRetryTime
+	SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
+	SemaphoreTryOnce  bool          // Optional, defaults to false which means try forever
+}
+
+// semaphoreLock is written under the DefaultSemaphoreKey and
+// is used to coordinate between all the contenders.
+type semaphoreLock struct {
+	// Limit is the integer limit of holders. This is used to
+	// verify that all the holders agree on the value.
+	Limit int
+
+	// Holders is a list of all the semaphore holders.
+	// It maps the session ID to true. It is used as a set effectively.
+	Holders map[string]bool
+}
+
+// SemaphorePrefix is used to created a Semaphore which will operate
+// at the given KV prefix and uses the given limit for the semaphore.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders.
+func (c *Client) SemaphorePrefix(prefix string, limit int) (*Semaphore, error) {
+	opts := &SemaphoreOptions{
+		Prefix: prefix,
+		Limit:  limit,
+	}
+	return c.SemaphoreOpts(opts)
+}
+
+// SemaphoreOpts is used to create a Semaphore with the given options.
+// The prefix must have write privileges, and the limit must be agreed
+// upon by all contenders. If a Session is not provided, one will be created.
+func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
+	if opts.Prefix == "" {
+		return nil, fmt.Errorf("missing prefix")
+	}
+	if opts.Limit <= 0 {
+		return nil, fmt.Errorf("semaphore limit must be positive")
+	}
+	if opts.SessionName == "" {
+		opts.SessionName = DefaultSemaphoreSessionName
+	}
+	if opts.SessionTTL == "" {
+		opts.SessionTTL = DefaultSemaphoreSessionTTL
+	} else {
+		if _, err := time.ParseDuration(opts.SessionTTL); err != nil {
+			return nil, fmt.Errorf("invalid SessionTTL: %v", err)
+		}
+	}
+	if opts.MonitorRetryTime == 0 {
+		opts.MonitorRetryTime = DefaultMonitorRetryTime
+	}
+	if opts.SemaphoreWaitTime == 0 {
+		opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
+	}
+	s := &Semaphore{
+		c:    c,
+		opts: opts,
+	}
+	return s, nil
+}
+
+// Acquire attempts to reserve a slot in the semaphore, blocking until
+// success, interrupted via the stopCh or an error is encountered.
+// Providing a non-nil stopCh can be used to abort the attempt.
+// On success, a channel is returned that represents our slot.
+// This channel could be closed at any time due to session invalidation,
+// communication errors, operator intervention, etc. It is NOT safe to
+// assume that the slot is held until Release() unless the Session is specifically
+// created without any associated health checks. By default Consul sessions
+// prefer liveness over safety and an application must be able to handle
+// the session being lost.
+func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return nil, ErrSemaphoreHeld
+	}
+
+	// Check if we need to create a session first
+	s.lockSession = s.opts.Session
+	if s.lockSession == "" {
+		if sess, err := s.createSession(); err != nil {
+			return nil, fmt.Errorf("failed to create session: %v", err)
+		} else {
+			s.sessionRenew = make(chan struct{})
+			s.lockSession = sess
+			session := s.c.Session()
+			go session.RenewPeriodic(s.opts.SessionTTL, sess, nil, s.sessionRenew)
+
+			// If we fail to acquire the lock, cleanup the session
+			defer func() {
+				if !s.isHeld {
+					close(s.sessionRenew)
+					s.sessionRenew = nil
+				}
+			}()
+		}
+	}
+
+	// Create the contender entry
+	kv := s.c.KV()
+	made, _, err := kv.Acquire(s.contenderEntry(s.lockSession), nil)
+	if err != nil || !made {
+		return nil, fmt.Errorf("failed to make contender entry: %v", err)
+	}
+
+	// Setup the query options
+	qOpts := &QueryOptions{
+		WaitTime: s.opts.SemaphoreWaitTime,
+	}
+
+	start := time.Now()
+	attempts := 0
+WAIT:
+	// Check if we should quit
+	select {
+	case <-stopCh:
+		return nil, nil
+	default:
+	}
+
+	// Handle the one-shot mode.
+	if s.opts.SemaphoreTryOnce && attempts > 0 {
+		elapsed := time.Now().Sub(start)
+		if elapsed > qOpts.WaitTime {
+			return nil, nil
+		}
+
+		qOpts.WaitTime -= elapsed
+	}
+	attempts++
+
+	// Read the prefix
+	pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Decode the lock
+	lockPair := s.findLock(pairs)
+	if lockPair.Flags != SemaphoreFlagValue {
+		return nil, ErrSemaphoreConflict
+	}
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return nil, err
+	}
+
+	// Verify we agree with the limit
+	if lock.Limit != s.opts.Limit {
+		return nil, fmt.Errorf("semaphore limit conflict (lock: %d, local: %d)",
+			lock.Limit, s.opts.Limit)
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if the lock is held
+	if len(lock.Holders) >= lock.Limit {
+		qOpts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+
+	// Create a new lock with us as a holder
+	lock.Holders[s.lockSession] = true
+	newLock, err := s.encodeLock(lock, lockPair.ModifyIndex)
+	if err != nil {
+		return nil, err
+	}
+
+	// Attempt the acquisition
+	didSet, _, err := kv.CAS(newLock, nil)
+	if err != nil {
+		return nil, fmt.Errorf("failed to update lock: %v", err)
+	}
+	if !didSet {
+		// Update failed, could have been a race with another contender,
+		// retry the operation
+		goto WAIT
+	}
+
+	// Watch to ensure we maintain ownership of the slot
+	lockCh := make(chan struct{})
+	go s.monitorLock(s.lockSession, lockCh)
+
+	// Set that we own the lock
+	s.isHeld = true
+
+	// Acquired! All done
+	return lockCh, nil
+}
+
+// Release is used to voluntarily give up our semaphore slot. It is
+// an error to call this if the semaphore has not been acquired.
+func (s *Semaphore) Release() error {
+	// Hold the lock as we try to release
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Ensure the lock is actually held
+	if !s.isHeld {
+		return ErrSemaphoreNotHeld
+	}
+
+	// Set that we no longer own the lock
+	s.isHeld = false
+
+	// Stop the session renew
+	if s.sessionRenew != nil {
+		defer func() {
+			close(s.sessionRenew)
+			s.sessionRenew = nil
+		}()
+	}
+
+	// Get and clear the lock session
+	lockSession := s.lockSession
+	s.lockSession = ""
+
+	// Remove ourselves as a lock holder
+	kv := s.c.KV()
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+READ:
+	pair, _, err := kv.Get(key, nil)
+	if err != nil {
+		return err
+	}
+	if pair == nil {
+		pair = &KVPair{}
+	}
+	lock, err := s.decodeLock(pair)
+	if err != nil {
+		return err
+	}
+
+	// Create a new lock without us as a holder
+	if _, ok := lock.Holders[lockSession]; ok {
+		delete(lock.Holders, lockSession)
+		newLock, err := s.encodeLock(lock, pair.ModifyIndex)
+		if err != nil {
+			return err
+		}
+
+		// Swap the locks
+		didSet, _, err := kv.CAS(newLock, nil)
+		if err != nil {
+			return fmt.Errorf("failed to update lock: %v", err)
+		}
+		if !didSet {
+			goto READ
+		}
+	}
+
+	// Destroy the contender entry
+	contenderKey := path.Join(s.opts.Prefix, lockSession)
+	if _, err := kv.Delete(contenderKey, nil); err != nil {
+		return err
+	}
+	return nil
+}
+
+// Destroy is used to cleanup the semaphore entry. It is not necessary
+// to invoke. It will fail if the semaphore is in use.
+func (s *Semaphore) Destroy() error {
+	// Hold the lock as we try to acquire
+	s.l.Lock()
+	defer s.l.Unlock()
+
+	// Check if we already hold the semaphore
+	if s.isHeld {
+		return ErrSemaphoreHeld
+	}
+
+	// List for the semaphore
+	kv := s.c.KV()
+	pairs, _, err := kv.List(s.opts.Prefix, nil)
+	if err != nil {
+		return fmt.Errorf("failed to read prefix: %v", err)
+	}
+
+	// Find the lock pair, bail if it doesn't exist
+	lockPair := s.findLock(pairs)
+	if lockPair.ModifyIndex == 0 {
+		return nil
+	}
+	if lockPair.Flags != SemaphoreFlagValue {
+		return ErrSemaphoreConflict
+	}
+
+	// Decode the lock
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return err
+	}
+
+	// Prune the dead holders
+	s.pruneDeadHolders(lock, pairs)
+
+	// Check if there are any holders
+	if len(lock.Holders) > 0 {
+		return ErrSemaphoreInUse
+	}
+
+	// Attempt the delete
+	didRemove, _, err := kv.DeleteCAS(lockPair, nil)
+	if err != nil {
+		return fmt.Errorf("failed to remove semaphore: %v", err)
+	}
+	if !didRemove {
+		return ErrSemaphoreInUse
+	}
+	return nil
+}
+
+// createSession is used to create a new managed session
+func (s *Semaphore) createSession() (string, error) {
+	session := s.c.Session()
+	se := &SessionEntry{
+		Name:     s.opts.SessionName,
+		TTL:      s.opts.SessionTTL,
+		Behavior: SessionBehaviorDelete,
+	}
+	id, _, err := session.Create(se, nil)
+	if err != nil {
+		return "", err
+	}
+	return id, nil
+}
+
+// contenderEntry returns a formatted KVPair for the contender
+func (s *Semaphore) contenderEntry(session string) *KVPair {
+	return &KVPair{
+		Key:     path.Join(s.opts.Prefix, session),
+		Value:   s.opts.Value,
+		Session: session,
+		Flags:   SemaphoreFlagValue,
+	}
+}
+
+// findLock is used to find the KV Pair which is used for coordination
+func (s *Semaphore) findLock(pairs KVPairs) *KVPair {
+	key := path.Join(s.opts.Prefix, DefaultSemaphoreKey)
+	for _, pair := range pairs {
+		if pair.Key == key {
+			return pair
+		}
+	}
+	return &KVPair{Flags: SemaphoreFlagValue}
+}
+
+// decodeLock is used to decode a semaphoreLock from an
+// entry in Consul
+func (s *Semaphore) decodeLock(pair *KVPair) (*semaphoreLock, error) {
+	// Handle if there is no lock
+	if pair == nil || pair.Value == nil {
+		return &semaphoreLock{
+			Limit:   s.opts.Limit,
+			Holders: make(map[string]bool),
+		}, nil
+	}
+
+	l := &semaphoreLock{}
+	if err := json.Unmarshal(pair.Value, l); err != nil {
+		return nil, fmt.Errorf("lock decoding failed: %v", err)
+	}
+	return l, nil
+}
+
+// encodeLock is used to encode a semaphoreLock into a KVPair
+// that can be PUT
+func (s *Semaphore) encodeLock(l *semaphoreLock, oldIndex uint64) (*KVPair, error) {
+	enc, err := json.Marshal(l)
+	if err != nil {
+		return nil, fmt.Errorf("lock encoding failed: %v", err)
+	}
+	pair := &KVPair{
+		Key:         path.Join(s.opts.Prefix, DefaultSemaphoreKey),
+		Value:       enc,
+		Flags:       SemaphoreFlagValue,
+		ModifyIndex: oldIndex,
+	}
+	return pair, nil
+}
+
+// pruneDeadHolders is used to remove all the dead lock holders
+func (s *Semaphore) pruneDeadHolders(lock *semaphoreLock, pairs KVPairs) {
+	// Gather all the live holders
+	alive := make(map[string]struct{}, len(pairs))
+	for _, pair := range pairs {
+		if pair.Session != "" {
+			alive[pair.Session] = struct{}{}
+		}
+	}
+
+	// Remove any holders that are dead
+	for holder := range lock.Holders {
+		if _, ok := alive[holder]; !ok {
+			delete(lock.Holders, holder)
+		}
+	}
+}
+
+// monitorLock is a long running routine to monitor a semaphore ownership
+// It closes the stopCh if we lose our slot.
+func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
+	defer close(stopCh)
+	kv := s.c.KV()
+	opts := &QueryOptions{RequireConsistent: true}
+WAIT:
+	retries := s.opts.MonitorRetries
+RETRY:
+	pairs, meta, err := kv.List(s.opts.Prefix, opts)
+	if err != nil {
+		// If configured we can try to ride out a brief Consul unavailability
+		// by doing retries. Note that we have to attempt the retry in a non-
+		// blocking fashion so that we have a clean place to reset the retry
+		// counter if service is restored.
+		if retries > 0 && IsServerError(err) {
+			time.Sleep(s.opts.MonitorRetryTime)
+			retries--
+			opts.WaitIndex = 0
+			goto RETRY
+		}
+		return
+	}
+	lockPair := s.findLock(pairs)
+	lock, err := s.decodeLock(lockPair)
+	if err != nil {
+		return
+	}
+	s.pruneDeadHolders(lock, pairs)
+	if _, ok := lock.Holders[session]; ok {
+		opts.WaitIndex = meta.LastIndex
+		goto WAIT
+	}
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/session.go b/provisioner/vendor/github.com/hashicorp/consul/api/session.go
new file mode 100644
index 0000000..36e99a3
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/session.go
@@ -0,0 +1,217 @@
+package api
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// SessionBehaviorRelease is the default behavior and causes
+	// all associated locks to be released on session invalidation.
+	SessionBehaviorRelease = "release"
+
+	// SessionBehaviorDelete is new in Consul 0.5 and changes the
+	// behavior to delete all associated locks on session invalidation.
+	// It can be used in a way similar to Ephemeral Nodes in ZooKeeper.
+	SessionBehaviorDelete = "delete"
+)
+
+var ErrSessionExpired = errors.New("session expired")
+
+// SessionEntry represents a session in consul
+type SessionEntry struct {
+	CreateIndex uint64
+	ID          string
+	Name        string
+	Node        string
+	Checks      []string
+	LockDelay   time.Duration
+	Behavior    string
+	TTL         string
+}
+
+// Session can be used to query the Session endpoints
+type Session struct {
+	c *Client
+}
+
+// Session returns a handle to the session endpoints
+func (c *Client) Session() *Session {
+	return &Session{c}
+}
+
+// CreateNoChecks is like Create but is used specifically to create
+// a session with no associated health checks.
+func (s *Session) CreateNoChecks(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	body := make(map[string]interface{})
+	body["Checks"] = []string{}
+	if se != nil {
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(body, q)
+
+}
+
+// Create makes a new session. Providing a session entry can
+// customize the session. It can also be nil to use defaults.
+func (s *Session) Create(se *SessionEntry, q *WriteOptions) (string, *WriteMeta, error) {
+	var obj interface{}
+	if se != nil {
+		body := make(map[string]interface{})
+		obj = body
+		if se.Name != "" {
+			body["Name"] = se.Name
+		}
+		if se.Node != "" {
+			body["Node"] = se.Node
+		}
+		if se.LockDelay != 0 {
+			body["LockDelay"] = durToMsec(se.LockDelay)
+		}
+		if len(se.Checks) > 0 {
+			body["Checks"] = se.Checks
+		}
+		if se.Behavior != "" {
+			body["Behavior"] = se.Behavior
+		}
+		if se.TTL != "" {
+			body["TTL"] = se.TTL
+		}
+	}
+	return s.create(obj, q)
+}
+
+func (s *Session) create(obj interface{}, q *WriteOptions) (string, *WriteMeta, error) {
+	var out struct{ ID string }
+	wm, err := s.c.write("/v1/session/create", obj, &out, q)
+	if err != nil {
+		return "", nil, err
+	}
+	return out.ID, wm, nil
+}
+
+// Destroy invalidates a given session
+func (s *Session) Destroy(id string, q *WriteOptions) (*WriteMeta, error) {
+	wm, err := s.c.write("/v1/session/destroy/"+id, nil, nil, q)
+	if err != nil {
+		return nil, err
+	}
+	return wm, nil
+}
+
+// Renew renews the TTL on a given session
+func (s *Session) Renew(id string, q *WriteOptions) (*SessionEntry, *WriteMeta, error) {
+	r := s.c.newRequest("PUT", "/v1/session/renew/"+id)
+	r.setWriteOptions(q)
+	rtt, resp, err := s.c.doRequest(r)
+	if err != nil {
+		return nil, nil, err
+	}
+	defer resp.Body.Close()
+
+	wm := &WriteMeta{RequestTime: rtt}
+
+	if resp.StatusCode == 404 {
+		return nil, wm, nil
+	} else if resp.StatusCode != 200 {
+		return nil, nil, fmt.Errorf("Unexpected response code: %d", resp.StatusCode)
+	}
+
+	var entries []*SessionEntry
+	if err := decodeBody(resp, &entries); err != nil {
+		return nil, nil, fmt.Errorf("Failed to read response: %v", err)
+	}
+	if len(entries) > 0 {
+		return entries[0], wm, nil
+	}
+	return nil, wm, nil
+}
+
+// RenewPeriodic is used to periodically invoke Session.Renew on a
+// session until a doneCh is closed. This is meant to be used in a long running
+// goroutine to ensure a session stays valid.
+func (s *Session) RenewPeriodic(initialTTL string, id string, q *WriteOptions, doneCh chan struct{}) error {
+	ttl, err := time.ParseDuration(initialTTL)
+	if err != nil {
+		return err
+	}
+
+	waitDur := ttl / 2
+	lastRenewTime := time.Now()
+	var lastErr error
+	for {
+		if time.Since(lastRenewTime) > ttl {
+			return lastErr
+		}
+		select {
+		case <-time.After(waitDur):
+			entry, _, err := s.Renew(id, q)
+			if err != nil {
+				waitDur = time.Second
+				lastErr = err
+				continue
+			}
+			if entry == nil {
+				return ErrSessionExpired
+			}
+
+			// Handle the server updating the TTL
+			ttl, _ = time.ParseDuration(entry.TTL)
+			waitDur = ttl / 2
+			lastRenewTime = time.Now()
+
+		case <-doneCh:
+			// Attempt a session destroy
+			s.Destroy(id, q)
+			return nil
+		}
+	}
+}
+
+// Info looks up a single session
+func (s *Session) Info(id string, q *QueryOptions) (*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/info/"+id, &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	if len(entries) > 0 {
+		return entries[0], qm, nil
+	}
+	return nil, qm, nil
+}
+
+// List gets sessions for a node
+func (s *Session) Node(node string, q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/node/"+node, &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
+
+// List gets all active sessions
+func (s *Session) List(q *QueryOptions) ([]*SessionEntry, *QueryMeta, error) {
+	var entries []*SessionEntry
+	qm, err := s.c.query("/v1/session/list", &entries, q)
+	if err != nil {
+		return nil, nil, err
+	}
+	return entries, qm, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/snapshot.go b/provisioner/vendor/github.com/hashicorp/consul/api/snapshot.go
new file mode 100644
index 0000000..e902377
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/snapshot.go
@@ -0,0 +1,47 @@
+package api
+
+import (
+	"io"
+)
+
+// Snapshot can be used to query the /v1/snapshot endpoint to take snapshots of
+// Consul's internal state and restore snapshots for disaster recovery.
+type Snapshot struct {
+	c *Client
+}
+
+// Snapshot returns a handle that exposes the snapshot endpoints.
+func (c *Client) Snapshot() *Snapshot {
+	return &Snapshot{c}
+}
+
+// Save requests a new snapshot and provides an io.ReadCloser with the snapshot
+// data to save. If this doesn't return an error, then it's the responsibility
+// of the caller to close it. Only a subset of the QueryOptions are supported:
+// Datacenter, AllowStale, and Token.
+func (s *Snapshot) Save(q *QueryOptions) (io.ReadCloser, *QueryMeta, error) {
+	r := s.c.newRequest("GET", "/v1/snapshot")
+	r.setQueryOptions(q)
+
+	rtt, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, nil, err
+	}
+
+	qm := &QueryMeta{}
+	parseQueryMeta(resp, qm)
+	qm.RequestTime = rtt
+	return resp.Body, qm, nil
+}
+
+// Restore streams in an existing snapshot and attempts to restore it.
+func (s *Snapshot) Restore(q *WriteOptions, in io.Reader) error {
+	r := s.c.newRequest("PUT", "/v1/snapshot")
+	r.body = in
+	r.setWriteOptions(q)
+	_, _, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return err
+	}
+	return nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/consul/api/status.go b/provisioner/vendor/github.com/hashicorp/consul/api/status.go
new file mode 100644
index 0000000..74ef61a
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/consul/api/status.go
@@ -0,0 +1,43 @@
+package api
+
+// Status can be used to query the Status endpoints
+type Status struct {
+	c *Client
+}
+
+// Status returns a handle to the status endpoints
+func (c *Client) Status() *Status {
+	return &Status{c}
+}
+
+// Leader is used to query for a known leader
+func (s *Status) Leader() (string, error) {
+	r := s.c.newRequest("GET", "/v1/status/leader")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return "", err
+	}
+	defer resp.Body.Close()
+
+	var leader string
+	if err := decodeBody(resp, &leader); err != nil {
+		return "", err
+	}
+	return leader, nil
+}
+
+// Peers is used to query for a known raft peers
+func (s *Status) Peers() ([]string, error) {
+	r := s.c.newRequest("GET", "/v1/status/peers")
+	_, resp, err := requireOK(s.c.doRequest(r))
+	if err != nil {
+		return nil, err
+	}
+	defer resp.Body.Close()
+
+	var peers []string
+	if err := decodeBody(resp, &peers); err != nil {
+		return nil, err
+	}
+	return peers, nil
+}
diff --git a/provisioner/vendor/github.com/hashicorp/go-cleanhttp/LICENSE b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
new file mode 100644
index 0000000..e87a115
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/LICENSE
@@ -0,0 +1,363 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. "Contributor"
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. "Contributor Version"
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor's Contribution.
+
+1.3. "Contribution"
+
+     means Covered Software of a particular Contributor.
+
+1.4. "Covered Software"
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. "Incompatible With Secondary Licenses"
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of
+        version 1.1 or earlier of the License, but not also under the terms of
+        a Secondary License.
+
+1.6. "Executable Form"
+
+     means any form of the work other than Source Code Form.
+
+1.7. "Larger Work"
+
+     means a work that combines Covered Software with other material, in a
+     separate file or files, that is not Covered Software.
+
+1.8. "License"
+
+     means this document.
+
+1.9. "Licensable"
+
+     means having the right to grant, to the maximum extent possible, whether
+     at the time of the initial grant or subsequently, any and all of the
+     rights conveyed by this License.
+
+1.10. "Modifications"
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to,
+        deletion from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. "Patent Claims" of a Contributor
+
+      means any patent claim(s), including without limitation, method,
+      process, and apparatus claims, in any patent Licensable by such
+      Contributor that would be infringed, but for the grant of the License,
+      by the making, using, selling, offering for sale, having made, import,
+      or transfer of either its Contributions or its Contributor Version.
+
+1.12. "Secondary License"
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. "Source Code Form"
+
+      means the form of the work preferred for making modifications.
+
+1.14. "You" (or "Your")
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, "You" includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, "control" means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or
+        as part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its
+        Contributions or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution
+     become effective for each Contribution on the date the Contributor first
+     distributes such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under
+     this License. No additional rights or licenses will be implied from the
+     distribution or licensing of Covered Software under this License.
+     Notwithstanding Section 2.1(b) above, no patent license is granted by a
+     Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party's
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of
+        its Contributions.
+
+     This License does not grant any rights in the trademarks, service marks,
+     or logos of any Contributor (except as may be necessary to comply with
+     the notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this
+     License (see Section 10.2) or under the terms of a Secondary License (if
+     permitted under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its
+     Contributions are its original creation(s) or it has sufficient rights to
+     grant the rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under
+     applicable copyright doctrines of fair use, fair dealing, or other
+     equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under
+     the terms of this License. You must inform recipients that the Source
+     Code Form of the Covered Software is governed by the terms of this
+     License, and how they can obtain a copy of this License. You may not
+     attempt to alter or restrict the recipients' rights in the Source Code
+     Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this
+        License, or sublicense it under different terms, provided that the
+        license for the Executable Form does not attempt to limit or alter the
+        recipients' rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for
+     the Covered Software. If the Larger Work is a combination of Covered
+     Software with a work governed by one or more Secondary Licenses, and the
+     Covered Software is not Incompatible With Secondary Licenses, this
+     License permits You to additionally distribute such Covered Software
+     under the terms of such Secondary License(s), so that the recipient of
+     the Larger Work may, at their option, further distribute the Covered
+     Software under the terms of either this License or such Secondary
+     License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices
+     (including copyright notices, patent notices, disclaimers of warranty, or
+     limitations of liability) contained within the Source Code Form of the
+     Covered Software, except that You may alter any license notices to the
+     extent required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on
+     behalf of any Contributor. You must make it absolutely clear that any
+     such warranty, support, indemnity, or liability obligation is offered by
+     You alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute,
+   judicial order, or regulation then You must: (a) comply with the terms of
+   this License to the maximum extent possible; and (b) describe the
+   limitations and the code they affect. Such description must be placed in a
+   text file included with all distributions of the Covered Software under
+   this License. Except to the extent prohibited by statute or regulation,
+   such description must be sufficiently detailed for a recipient of ordinary
+   skill to be able to understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing
+     basis, if such Contributor fails to notify You of the non-compliance by
+     some reasonable means prior to 60 days after You have come back into
+     compliance. Moreover, Your grants from a particular Contributor are
+     reinstated on an ongoing basis if such Contributor notifies You of the
+     non-compliance by some reasonable means, this is the first time You have
+     received notice of non-compliance with this License from such
+     Contributor, and You become compliant prior to 30 days after Your receipt
+     of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions,
+     counter-claims, and cross-claims) alleging that a Contributor Version
+     directly or indirectly infringes any patent, then the rights granted to
+     You by any and all Contributors for the Covered Software under Section
+     2.1 of this License shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an "as is" basis,
+   without warranty of any kind, either expressed, implied, or statutory,
+   including, without limitation, warranties that the Covered Software is free
+   of defects, merchantable, fit for a particular purpose or non-infringing.
+   The entire risk as to the quality and performance of the Covered Software
+   is with You. Should any Covered Software prove defective in any respect,
+   You (not any Contributor) assume the cost of any necessary servicing,
+   repair, or correction. This disclaimer of warranty constitutes an essential
+   part of this License. No use of  any Covered Software is authorized under
+   this License except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from
+   such party's negligence to the extent applicable law prohibits such
+   limitation. Some jurisdictions do not allow the exclusion or limitation of
+   incidental or consequential damages, so this exclusion and limitation may
+   not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts
+   of a jurisdiction where the defendant maintains its principal place of
+   business and such litigation shall be governed by laws of that
+   jurisdiction, without reference to its conflict-of-law provisions. Nothing
+   in this Section shall prevent a party's ability to bring cross-claims or
+   counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject
+   matter hereof. If any provision of this License is held to be
+   unenforceable, such provision shall be reformed only to the extent
+   necessary to make it enforceable. Any law or regulation which provides that
+   the language of a contract shall be construed against the drafter shall not
+   be used to construe this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version
+      of the License under which You originally received the Covered Software,
+      or under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a
+      modified version of this License if you rename the license and remove
+      any references to the name of the license steward (except to note that
+      such modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary
+      Licenses If You choose to distribute Source Code Form that is
+      Incompatible With Secondary Licenses under the terms of this version of
+      the License, the notice described in Exhibit B of this License must be
+      attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file,
+then You may include the notice in a location (such as a LICENSE file in a
+relevant directory) where a recipient would be likely to look for such a
+notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - "Incompatible With Secondary Licenses" Notice
+
+      This Source Code Form is "Incompatible
+      With Secondary Licenses", as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/provisioner/vendor/github.com/hashicorp/go-cleanhttp/README.md b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/README.md
new file mode 100644
index 0000000..036e531
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/README.md
@@ -0,0 +1,30 @@
+# cleanhttp
+
+Functions for accessing "clean" Go http.Client values
+
+-------------
+
+The Go standard library contains a default `http.Client` called
+`http.DefaultClient`. It is a common idiom in Go code to start with
+`http.DefaultClient` and tweak it as necessary, and in fact, this is
+encouraged; from the `http` package documentation:
+
+> The Client's Transport typically has internal state (cached TCP connections),
+so Clients should be reused instead of created as needed. Clients are safe for
+concurrent use by multiple goroutines.
+
+Unfortunately, this is a shared value, and it is not uncommon for libraries to
+assume that they are free to modify it at will. With enough dependencies, it
+can be very easy to encounter strange problems and race conditions due to
+manipulation of this shared value across libraries and goroutines (clients are
+safe for concurrent use, but writing values to the client struct itself is not
+protected).
+
+Making things worse is the fact that a bare `http.Client` will use a default
+`http.Transport` called `http.DefaultTransport`, which is another global value
+that behaves the same way. So it is not simply enough to replace
+`http.DefaultClient` with `&http.Client{}`.
+
+This repository provides some simple functions to get a "clean" `http.Client`
+-- one that uses the same default values as the Go standard library, but
+returns a client that does not share any state with other clients.
diff --git a/provisioner/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
new file mode 100644
index 0000000..f4596d8
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/cleanhttp.go
@@ -0,0 +1,53 @@
+package cleanhttp
+
+import (
+	"net"
+	"net/http"
+	"time"
+)
+
+// DefaultTransport returns a new http.Transport with the same default values
+// as http.DefaultTransport, but with idle connections and keepalives disabled.
+func DefaultTransport() *http.Transport {
+	transport := DefaultPooledTransport()
+	transport.DisableKeepAlives = true
+	transport.MaxIdleConnsPerHost = -1
+	return transport
+}
+
+// DefaultPooledTransport returns a new http.Transport with similar default
+// values to http.DefaultTransport. Do not use this for transient transports as
+// it can leak file descriptors over time. Only use this for transports that
+// will be re-used for the same host(s).
+func DefaultPooledTransport() *http.Transport {
+	transport := &http.Transport{
+		Proxy: http.ProxyFromEnvironment,
+		Dial: (&net.Dialer{
+			Timeout:   30 * time.Second,
+			KeepAlive: 30 * time.Second,
+		}).Dial,
+		TLSHandshakeTimeout: 10 * time.Second,
+		DisableKeepAlives:   false,
+		MaxIdleConnsPerHost: 1,
+	}
+	return transport
+}
+
+// DefaultClient returns a new http.Client with similar default values to
+// http.Client, but with a non-shared Transport, idle connections disabled, and
+// keepalives disabled.
+func DefaultClient() *http.Client {
+	return &http.Client{
+		Transport: DefaultTransport(),
+	}
+}
+
+// DefaultPooledClient returns a new http.Client with the same default values
+// as http.Client, but with a shared Transport. Do not use this function
+// for transient clients as it can leak file descriptors over time. Only use
+// this for clients that will be re-used for the same host(s).
+func DefaultPooledClient() *http.Client {
+	return &http.Client{
+		Transport: DefaultPooledTransport(),
+	}
+}
diff --git a/provisioner/vendor/github.com/hashicorp/go-cleanhttp/doc.go b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/doc.go
new file mode 100644
index 0000000..0584109
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/go-cleanhttp/doc.go
@@ -0,0 +1,20 @@
+// Package cleanhttp offers convenience utilities for acquiring "clean"
+// http.Transport and http.Client structs.
+//
+// Values set on http.DefaultClient and http.DefaultTransport affect all
+// callers. This can have detrimental effects, esepcially in TLS contexts,
+// where client or root certificates set to talk to multiple endpoints can end
+// up displacing each other, leading to hard-to-debug issues. This package
+// provides non-shared http.Client and http.Transport structs to ensure that
+// the configuration will not be overwritten by other parts of the application
+// or dependencies.
+//
+// The DefaultClient and DefaultTransport functions disable idle connections
+// and keepalives. Without ensuring that idle connections are closed before
+// garbage collection, short-term clients/transports can leak file descriptors,
+// eventually leading to "too many open files" errors. If you will be
+// connecting to the same hosts repeatedly from the same client, you can use
+// DefaultPooledClient to receive a client that has connection pooling
+// semantics similar to http.DefaultClient.
+//
+package cleanhttp
diff --git a/provisioner/vendor/github.com/hashicorp/serf/LICENSE b/provisioner/vendor/github.com/hashicorp/serf/LICENSE
new file mode 100644
index 0000000..c33dcc7
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/serf/LICENSE
@@ -0,0 +1,354 @@
+Mozilla Public License, version 2.0
+
+1. Definitions
+
+1.1. “Contributor”
+
+     means each individual or legal entity that creates, contributes to the
+     creation of, or owns Covered Software.
+
+1.2. “Contributor Version”
+
+     means the combination of the Contributions of others (if any) used by a
+     Contributor and that particular Contributor’s Contribution.
+
+1.3. “Contribution”
+
+     means Covered Software of a particular Contributor.
+
+1.4. “Covered Software”
+
+     means Source Code Form to which the initial Contributor has attached the
+     notice in Exhibit A, the Executable Form of such Source Code Form, and
+     Modifications of such Source Code Form, in each case including portions
+     thereof.
+
+1.5. “Incompatible With Secondary Licenses”
+     means
+
+     a. that the initial Contributor has attached the notice described in
+        Exhibit B to the Covered Software; or
+
+     b. that the Covered Software was made available under the terms of version
+        1.1 or earlier of the License, but not also under the terms of a
+        Secondary License.
+
+1.6. “Executable Form”
+
+     means any form of the work other than Source Code Form.
+
+1.7. “Larger Work”
+
+     means a work that combines Covered Software with other material, in a separate
+     file or files, that is not Covered Software.
+
+1.8. “License”
+
+     means this document.
+
+1.9. “Licensable”
+
+     means having the right to grant, to the maximum extent possible, whether at the
+     time of the initial grant or subsequently, any and all of the rights conveyed by
+     this License.
+
+1.10. “Modifications”
+
+     means any of the following:
+
+     a. any file in Source Code Form that results from an addition to, deletion
+        from, or modification of the contents of Covered Software; or
+
+     b. any new file in Source Code Form that contains any Covered Software.
+
+1.11. “Patent Claims” of a Contributor
+
+      means any patent claim(s), including without limitation, method, process,
+      and apparatus claims, in any patent Licensable by such Contributor that
+      would be infringed, but for the grant of the License, by the making,
+      using, selling, offering for sale, having made, import, or transfer of
+      either its Contributions or its Contributor Version.
+
+1.12. “Secondary License”
+
+      means either the GNU General Public License, Version 2.0, the GNU Lesser
+      General Public License, Version 2.1, the GNU Affero General Public
+      License, Version 3.0, or any later versions of those licenses.
+
+1.13. “Source Code Form”
+
+      means the form of the work preferred for making modifications.
+
+1.14. “You” (or “Your”)
+
+      means an individual or a legal entity exercising rights under this
+      License. For legal entities, “You” includes any entity that controls, is
+      controlled by, or is under common control with You. For purposes of this
+      definition, “control” means (a) the power, direct or indirect, to cause
+      the direction or management of such entity, whether by contract or
+      otherwise, or (b) ownership of more than fifty percent (50%) of the
+      outstanding shares or beneficial ownership of such entity.
+
+
+2. License Grants and Conditions
+
+2.1. Grants
+
+     Each Contributor hereby grants You a world-wide, royalty-free,
+     non-exclusive license:
+
+     a. under intellectual property rights (other than patent or trademark)
+        Licensable by such Contributor to use, reproduce, make available,
+        modify, display, perform, distribute, and otherwise exploit its
+        Contributions, either on an unmodified basis, with Modifications, or as
+        part of a Larger Work; and
+
+     b. under Patent Claims of such Contributor to make, use, sell, offer for
+        sale, have made, import, and otherwise transfer either its Contributions
+        or its Contributor Version.
+
+2.2. Effective Date
+
+     The licenses granted in Section 2.1 with respect to any Contribution become
+     effective for each Contribution on the date the Contributor first distributes
+     such Contribution.
+
+2.3. Limitations on Grant Scope
+
+     The licenses granted in this Section 2 are the only rights granted under this
+     License. No additional rights or licenses will be implied from the distribution
+     or licensing of Covered Software under this License. Notwithstanding Section
+     2.1(b) above, no patent license is granted by a Contributor:
+
+     a. for any code that a Contributor has removed from Covered Software; or
+
+     b. for infringements caused by: (i) Your and any other third party’s
+        modifications of Covered Software, or (ii) the combination of its
+        Contributions with other software (except as part of its Contributor
+        Version); or
+
+     c. under Patent Claims infringed by Covered Software in the absence of its
+        Contributions.
+
+     This License does not grant any rights in the trademarks, service marks, or
+     logos of any Contributor (except as may be necessary to comply with the
+     notice requirements in Section 3.4).
+
+2.4. Subsequent Licenses
+
+     No Contributor makes additional grants as a result of Your choice to
+     distribute the Covered Software under a subsequent version of this License
+     (see Section 10.2) or under the terms of a Secondary License (if permitted
+     under the terms of Section 3.3).
+
+2.5. Representation
+
+     Each Contributor represents that the Contributor believes its Contributions
+     are its original creation(s) or it has sufficient rights to grant the
+     rights to its Contributions conveyed by this License.
+
+2.6. Fair Use
+
+     This License is not intended to limit any rights You have under applicable
+     copyright doctrines of fair use, fair dealing, or other equivalents.
+
+2.7. Conditions
+
+     Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
+     Section 2.1.
+
+
+3. Responsibilities
+
+3.1. Distribution of Source Form
+
+     All distribution of Covered Software in Source Code Form, including any
+     Modifications that You create or to which You contribute, must be under the
+     terms of this License. You must inform recipients that the Source Code Form
+     of the Covered Software is governed by the terms of this License, and how
+     they can obtain a copy of this License. You may not attempt to alter or
+     restrict the recipients’ rights in the Source Code Form.
+
+3.2. Distribution of Executable Form
+
+     If You distribute Covered Software in Executable Form then:
+
+     a. such Covered Software must also be made available in Source Code Form,
+        as described in Section 3.1, and You must inform recipients of the
+        Executable Form how they can obtain a copy of such Source Code Form by
+        reasonable means in a timely manner, at a charge no more than the cost
+        of distribution to the recipient; and
+
+     b. You may distribute such Executable Form under the terms of this License,
+        or sublicense it under different terms, provided that the license for
+        the Executable Form does not attempt to limit or alter the recipients’
+        rights in the Source Code Form under this License.
+
+3.3. Distribution of a Larger Work
+
+     You may create and distribute a Larger Work under terms of Your choice,
+     provided that You also comply with the requirements of this License for the
+     Covered Software. If the Larger Work is a combination of Covered Software
+     with a work governed by one or more Secondary Licenses, and the Covered
+     Software is not Incompatible With Secondary Licenses, this License permits
+     You to additionally distribute such Covered Software under the terms of
+     such Secondary License(s), so that the recipient of the Larger Work may, at
+     their option, further distribute the Covered Software under the terms of
+     either this License or such Secondary License(s).
+
+3.4. Notices
+
+     You may not remove or alter the substance of any license notices (including
+     copyright notices, patent notices, disclaimers of warranty, or limitations
+     of liability) contained within the Source Code Form of the Covered
+     Software, except that You may alter any license notices to the extent
+     required to remedy known factual inaccuracies.
+
+3.5. Application of Additional Terms
+
+     You may choose to offer, and to charge a fee for, warranty, support,
+     indemnity or liability obligations to one or more recipients of Covered
+     Software. However, You may do so only on Your own behalf, and not on behalf
+     of any Contributor. You must make it absolutely clear that any such
+     warranty, support, indemnity, or liability obligation is offered by You
+     alone, and You hereby agree to indemnify every Contributor for any
+     liability incurred by such Contributor as a result of warranty, support,
+     indemnity or liability terms You offer. You may include additional
+     disclaimers of warranty and limitations of liability specific to any
+     jurisdiction.
+
+4. Inability to Comply Due to Statute or Regulation
+
+   If it is impossible for You to comply with any of the terms of this License
+   with respect to some or all of the Covered Software due to statute, judicial
+   order, or regulation then You must: (a) comply with the terms of this License
+   to the maximum extent possible; and (b) describe the limitations and the code
+   they affect. Such description must be placed in a text file included with all
+   distributions of the Covered Software under this License. Except to the
+   extent prohibited by statute or regulation, such description must be
+   sufficiently detailed for a recipient of ordinary skill to be able to
+   understand it.
+
+5. Termination
+
+5.1. The rights granted under this License will terminate automatically if You
+     fail to comply with any of its terms. However, if You become compliant,
+     then the rights granted under this License from a particular Contributor
+     are reinstated (a) provisionally, unless and until such Contributor
+     explicitly and finally terminates Your grants, and (b) on an ongoing basis,
+     if such Contributor fails to notify You of the non-compliance by some
+     reasonable means prior to 60 days after You have come back into compliance.
+     Moreover, Your grants from a particular Contributor are reinstated on an
+     ongoing basis if such Contributor notifies You of the non-compliance by
+     some reasonable means, this is the first time You have received notice of
+     non-compliance with this License from such Contributor, and You become
+     compliant prior to 30 days after Your receipt of the notice.
+
+5.2. If You initiate litigation against any entity by asserting a patent
+     infringement claim (excluding declaratory judgment actions, counter-claims,
+     and cross-claims) alleging that a Contributor Version directly or
+     indirectly infringes any patent, then the rights granted to You by any and
+     all Contributors for the Covered Software under Section 2.1 of this License
+     shall terminate.
+
+5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
+     license agreements (excluding distributors and resellers) which have been
+     validly granted by You or Your distributors under this License prior to
+     termination shall survive termination.
+
+6. Disclaimer of Warranty
+
+   Covered Software is provided under this License on an “as is” basis, without
+   warranty of any kind, either expressed, implied, or statutory, including,
+   without limitation, warranties that the Covered Software is free of defects,
+   merchantable, fit for a particular purpose or non-infringing. The entire
+   risk as to the quality and performance of the Covered Software is with You.
+   Should any Covered Software prove defective in any respect, You (not any
+   Contributor) assume the cost of any necessary servicing, repair, or
+   correction. This disclaimer of warranty constitutes an essential part of this
+   License. No use of  any Covered Software is authorized under this License
+   except under this disclaimer.
+
+7. Limitation of Liability
+
+   Under no circumstances and under no legal theory, whether tort (including
+   negligence), contract, or otherwise, shall any Contributor, or anyone who
+   distributes Covered Software as permitted above, be liable to You for any
+   direct, indirect, special, incidental, or consequential damages of any
+   character including, without limitation, damages for lost profits, loss of
+   goodwill, work stoppage, computer failure or malfunction, or any and all
+   other commercial damages or losses, even if such party shall have been
+   informed of the possibility of such damages. This limitation of liability
+   shall not apply to liability for death or personal injury resulting from such
+   party’s negligence to the extent applicable law prohibits such limitation.
+   Some jurisdictions do not allow the exclusion or limitation of incidental or
+   consequential damages, so this exclusion and limitation may not apply to You.
+
+8. Litigation
+
+   Any litigation relating to this License may be brought only in the courts of
+   a jurisdiction where the defendant maintains its principal place of business
+   and such litigation shall be governed by laws of that jurisdiction, without
+   reference to its conflict-of-law provisions. Nothing in this Section shall
+   prevent a party’s ability to bring cross-claims or counter-claims.
+
+9. Miscellaneous
+
+   This License represents the complete agreement concerning the subject matter
+   hereof. If any provision of this License is held to be unenforceable, such
+   provision shall be reformed only to the extent necessary to make it
+   enforceable. Any law or regulation which provides that the language of a
+   contract shall be construed against the drafter shall not be used to construe
+   this License against a Contributor.
+
+
+10. Versions of the License
+
+10.1. New Versions
+
+      Mozilla Foundation is the license steward. Except as provided in Section
+      10.3, no one other than the license steward has the right to modify or
+      publish new versions of this License. Each version will be given a
+      distinguishing version number.
+
+10.2. Effect of New Versions
+
+      You may distribute the Covered Software under the terms of the version of
+      the License under which You originally received the Covered Software, or
+      under the terms of any subsequent version published by the license
+      steward.
+
+10.3. Modified Versions
+
+      If you create software not governed by this License, and you want to
+      create a new license for such software, you may create and use a modified
+      version of this License if you rename the license and remove any
+      references to the name of the license steward (except to note that such
+      modified license differs from this License).
+
+10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
+      If You choose to distribute Source Code Form that is Incompatible With
+      Secondary Licenses under the terms of this version of the License, the
+      notice described in Exhibit B of this License must be attached.
+
+Exhibit A - Source Code Form License Notice
+
+      This Source Code Form is subject to the
+      terms of the Mozilla Public License, v.
+      2.0. If a copy of the MPL was not
+      distributed with this file, You can
+      obtain one at
+      http://mozilla.org/MPL/2.0/.
+
+If it is not possible or desirable to put the notice in a particular file, then
+You may include the notice in a location (such as a LICENSE file in a relevant
+directory) where a recipient would be likely to look for such a notice.
+
+You may add additional accurate notices of copyright ownership.
+
+Exhibit B - “Incompatible With Secondary Licenses” Notice
+
+      This Source Code Form is “Incompatible
+      With Secondary Licenses”, as defined by
+      the Mozilla Public License, v. 2.0.
+
diff --git a/provisioner/vendor/github.com/hashicorp/serf/coordinate/client.go b/provisioner/vendor/github.com/hashicorp/serf/coordinate/client.go
new file mode 100644
index 0000000..613bfff
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/serf/coordinate/client.go
@@ -0,0 +1,180 @@
+package coordinate
+
+import (
+	"fmt"
+	"math"
+	"sort"
+	"sync"
+	"time"
+)
+
+// Client manages the estimated network coordinate for a given node, and adjusts
+// it as the node observes round trip times and estimated coordinates from other
+// nodes. The core algorithm is based on Vivaldi, see the documentation for Config
+// for more details.
+type Client struct {
+	// coord is the current estimate of the client's network coordinate.
+	coord *Coordinate
+
+	// origin is a coordinate sitting at the origin.
+	origin *Coordinate
+
+	// config contains the tuning parameters that govern the performance of
+	// the algorithm.
+	config *Config
+
+	// adjustmentIndex is the current index into the adjustmentSamples slice.
+	adjustmentIndex uint
+
+	// adjustment is used to store samples for the adjustment calculation.
+	adjustmentSamples []float64
+
+	// latencyFilterSamples is used to store the last several RTT samples,
+	// keyed by node name. We will use the config's LatencyFilterSamples
+	// value to determine how many samples we keep, per node.
+	latencyFilterSamples map[string][]float64
+
+	// mutex enables safe concurrent access to the client.
+	mutex sync.RWMutex
+}
+
+// NewClient creates a new Client and verifies the configuration is valid.
+func NewClient(config *Config) (*Client, error) {
+	if !(config.Dimensionality > 0) {
+		return nil, fmt.Errorf("dimensionality must be >0")
+	}
+
+	return &Client{
+		coord:                NewCoordinate(config),
+		origin:               NewCoordinate(config),
+		config:               config,
+		adjustmentIndex:      0,
+		adjustmentSamples:    make([]float64, config.AdjustmentWindowSize),
+		latencyFilterSamples: make(map[string][]float64),
+	}, nil
+}
+
+// GetCoordinate returns a copy of the coordinate for this client.
+func (c *Client) GetCoordinate() *Coordinate {
+	c.mutex.RLock()
+	defer c.mutex.RUnlock()
+
+	return c.coord.Clone()
+}
+
+// SetCoordinate forces the client's coordinate to a known state.
+func (c *Client) SetCoordinate(coord *Coordinate) {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	c.coord = coord.Clone()
+}
+
+// ForgetNode removes any client state for the given node.
+func (c *Client) ForgetNode(node string) {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	delete(c.latencyFilterSamples, node)
+}
+
+// latencyFilter applies a simple moving median filter with a new sample for
+// a node. This assumes that the mutex has been locked already.
+func (c *Client) latencyFilter(node string, rttSeconds float64) float64 {
+	samples, ok := c.latencyFilterSamples[node]
+	if !ok {
+		samples = make([]float64, 0, c.config.LatencyFilterSize)
+	}
+
+	// Add the new sample and trim the list, if needed.
+	samples = append(samples, rttSeconds)
+	if len(samples) > int(c.config.LatencyFilterSize) {
+		samples = samples[1:]
+	}
+	c.latencyFilterSamples[node] = samples
+
+	// Sort a copy of the samples and return the median.
+	sorted := make([]float64, len(samples))
+	copy(sorted, samples)
+	sort.Float64s(sorted)
+	return sorted[len(sorted)/2]
+}
+
+// updateVivialdi updates the Vivaldi portion of the client's coordinate. This
+// assumes that the mutex has been locked already.
+func (c *Client) updateVivaldi(other *Coordinate, rttSeconds float64) {
+	const zeroThreshold = 1.0e-6
+
+	dist := c.coord.DistanceTo(other).Seconds()
+	if rttSeconds < zeroThreshold {
+		rttSeconds = zeroThreshold
+	}
+	wrongness := math.Abs(dist-rttSeconds) / rttSeconds
+
+	totalError := c.coord.Error + other.Error
+	if totalError < zeroThreshold {
+		totalError = zeroThreshold
+	}
+	weight := c.coord.Error / totalError
+
+	c.coord.Error = c.config.VivaldiCE*weight*wrongness + c.coord.Error*(1.0-c.config.VivaldiCE*weight)
+	if c.coord.Error > c.config.VivaldiErrorMax {
+		c.coord.Error = c.config.VivaldiErrorMax
+	}
+
+	delta := c.config.VivaldiCC * weight
+	force := delta * (rttSeconds - dist)
+	c.coord = c.coord.ApplyForce(c.config, force, other)
+}
+
+// updateAdjustment updates the adjustment portion of the client's coordinate, if
+// the feature is enabled. This assumes that the mutex has been locked already.
+func (c *Client) updateAdjustment(other *Coordinate, rttSeconds float64) {
+	if c.config.AdjustmentWindowSize == 0 {
+		return
+	}
+
+	// Note that the existing adjustment factors don't figure in to this
+	// calculation so we use the raw distance here.
+	dist := c.coord.rawDistanceTo(other)
+	c.adjustmentSamples[c.adjustmentIndex] = rttSeconds - dist
+	c.adjustmentIndex = (c.adjustmentIndex + 1) % c.config.AdjustmentWindowSize
+
+	sum := 0.0
+	for _, sample := range c.adjustmentSamples {
+		sum += sample
+	}
+	c.coord.Adjustment = sum / (2.0 * float64(c.config.AdjustmentWindowSize))
+}
+
+// updateGravity applies a small amount of gravity to pull coordinates towards
+// the center of the coordinate system to combat drift. This assumes that the
+// mutex is locked already.
+func (c *Client) updateGravity() {
+	dist := c.origin.DistanceTo(c.coord).Seconds()
+	force := -1.0 * math.Pow(dist/c.config.GravityRho, 2.0)
+	c.coord = c.coord.ApplyForce(c.config, force, c.origin)
+}
+
+// Update takes other, a coordinate for another node, and rtt, a round trip
+// time observation for a ping to that node, and updates the estimated position of
+// the client's coordinate. Returns the updated coordinate.
+func (c *Client) Update(node string, other *Coordinate, rtt time.Duration) *Coordinate {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	rttSeconds := c.latencyFilter(node, rtt.Seconds())
+	c.updateVivaldi(other, rttSeconds)
+	c.updateAdjustment(other, rttSeconds)
+	c.updateGravity()
+	return c.coord.Clone()
+}
+
+// DistanceTo returns the estimated RTT from the client's coordinate to other, the
+// coordinate for another node.
+func (c *Client) DistanceTo(other *Coordinate) time.Duration {
+	c.mutex.RLock()
+	defer c.mutex.RUnlock()
+
+	return c.coord.DistanceTo(other)
+}
diff --git a/provisioner/vendor/github.com/hashicorp/serf/coordinate/config.go b/provisioner/vendor/github.com/hashicorp/serf/coordinate/config.go
new file mode 100644
index 0000000..b85a8ab
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/serf/coordinate/config.go
@@ -0,0 +1,70 @@
+package coordinate
+
+// Config is used to set the parameters of the Vivaldi-based coordinate mapping
+// algorithm.
+//
+// The following references are called out at various points in the documentation
+// here:
+//
+// [1] Dabek, Frank, et al. "Vivaldi: A decentralized network coordinate system."
+//     ACM SIGCOMM Computer Communication Review. Vol. 34. No. 4. ACM, 2004.
+// [2] Ledlie, Jonathan, Paul Gardner, and Margo I. Seltzer. "Network Coordinates
+//     in the Wild." NSDI. Vol. 7. 2007.
+// [3] Lee, Sanghwan, et al. "On suitability of Euclidean embedding for
+//     host-based network coordinate systems." Networking, IEEE/ACM Transactions
+//     on 18.1 (2010): 27-40.
+type Config struct {
+	// The dimensionality of the coordinate system. As discussed in [2], more
+	// dimensions improves the accuracy of the estimates up to a point. Per [2]
+	// we chose 8 dimensions plus a non-Euclidean height.
+	Dimensionality uint
+
+	// VivaldiErrorMax is the default error value when a node hasn't yet made
+	// any observations. It also serves as an upper limit on the error value in
+	// case observations cause the error value to increase without bound.
+	VivaldiErrorMax float64
+
+	// VivaldiCE is a tuning factor that controls the maximum impact an
+	// observation can have on a node's confidence. See [1] for more details.
+	VivaldiCE float64
+
+	// VivaldiCC is a tuning factor that controls the maximum impact an
+	// observation can have on a node's coordinate. See [1] for more details.
+	VivaldiCC float64
+
+	// AdjustmentWindowSize is a tuning factor that determines how many samples
+	// we retain to calculate the adjustment factor as discussed in [3]. Setting
+	// this to zero disables this feature.
+	AdjustmentWindowSize uint
+
+	// HeightMin is the minimum value of the height parameter. Since this
+	// always must be positive, it will introduce a small amount error, so
+	// the chosen value should be relatively small compared to "normal"
+	// coordinates.
+	HeightMin float64
+
+	// LatencyFilterSamples is the maximum number of samples that are retained
+	// per node, in order to compute a median. The intent is to ride out blips
+	// but still keep the delay low, since our time to probe any given node is
+	// pretty infrequent. See [2] for more details.
+	LatencyFilterSize uint
+
+	// GravityRho is a tuning factor that sets how much gravity has an effect
+	// to try to re-center coordinates. See [2] for more details.
+	GravityRho float64
+}
+
+// DefaultConfig returns a Config that has some default values suitable for
+// basic testing of the algorithm, but not tuned to any particular type of cluster.
+func DefaultConfig() *Config {
+	return &Config{
+		Dimensionality:       8,
+		VivaldiErrorMax:      1.5,
+		VivaldiCE:            0.25,
+		VivaldiCC:            0.25,
+		AdjustmentWindowSize: 20,
+		HeightMin:            10.0e-6,
+		LatencyFilterSize:    3,
+		GravityRho:           150.0,
+	}
+}
diff --git a/provisioner/vendor/github.com/hashicorp/serf/coordinate/coordinate.go b/provisioner/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
new file mode 100644
index 0000000..c9194e0
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/serf/coordinate/coordinate.go
@@ -0,0 +1,183 @@
+package coordinate
+
+import (
+	"math"
+	"math/rand"
+	"time"
+)
+
+// Coordinate is a specialized structure for holding network coordinates for the
+// Vivaldi-based coordinate mapping algorithm. All of the fields should be public
+// to enable this to be serialized. All values in here are in units of seconds.
+type Coordinate struct {
+	// Vec is the Euclidean portion of the coordinate. This is used along
+	// with the other fields to provide an overall distance estimate. The
+	// units here are seconds.
+	Vec []float64
+
+	// Err reflects the confidence in the given coordinate and is updated
+	// dynamically by the Vivaldi Client. This is dimensionless.
+	Error float64
+
+	// Adjustment is a distance offset computed based on a calculation over
+	// observations from all other nodes over a fixed window and is updated
+	// dynamically by the Vivaldi Client. The units here are seconds.
+	Adjustment float64
+
+	// Height is a distance offset that accounts for non-Euclidean effects
+	// which model the access links from nodes to the core Internet. The access
+	// links are usually set by bandwidth and congestion, and the core links
+	// usually follow distance based on geography.
+	Height float64
+}
+
+const (
+	// secondsToNanoseconds is used to convert float seconds to nanoseconds.
+	secondsToNanoseconds = 1.0e9
+
+	// zeroThreshold is used to decide if two coordinates are on top of each
+	// other.
+	zeroThreshold = 1.0e-6
+)
+
+// ErrDimensionalityConflict will be panic-d if you try to perform operations
+// with incompatible dimensions.
+type DimensionalityConflictError struct{}
+
+// Adds the error interface.
+func (e DimensionalityConflictError) Error() string {
+	return "coordinate dimensionality does not match"
+}
+
+// NewCoordinate creates a new coordinate at the origin, using the given config
+// to supply key initial values.
+func NewCoordinate(config *Config) *Coordinate {
+	return &Coordinate{
+		Vec:        make([]float64, config.Dimensionality),
+		Error:      config.VivaldiErrorMax,
+		Adjustment: 0.0,
+		Height:     config.HeightMin,
+	}
+}
+
+// Clone creates an independent copy of this coordinate.
+func (c *Coordinate) Clone() *Coordinate {
+	vec := make([]float64, len(c.Vec))
+	copy(vec, c.Vec)
+	return &Coordinate{
+		Vec:        vec,
+		Error:      c.Error,
+		Adjustment: c.Adjustment,
+		Height:     c.Height,
+	}
+}
+
+// IsCompatibleWith checks to see if the two coordinates are compatible
+// dimensionally. If this returns true then you are guaranteed to not get
+// any runtime errors operating on them.
+func (c *Coordinate) IsCompatibleWith(other *Coordinate) bool {
+	return len(c.Vec) == len(other.Vec)
+}
+
+// ApplyForce returns the result of applying the force from the direction of the
+// other coordinate.
+func (c *Coordinate) ApplyForce(config *Config, force float64, other *Coordinate) *Coordinate {
+	if !c.IsCompatibleWith(other) {
+		panic(DimensionalityConflictError{})
+	}
+
+	ret := c.Clone()
+	unit, mag := unitVectorAt(c.Vec, other.Vec)
+	ret.Vec = add(ret.Vec, mul(unit, force))
+	if mag > zeroThreshold {
+		ret.Height = (ret.Height+other.Height)*force/mag + ret.Height
+		ret.Height = math.Max(ret.Height, config.HeightMin)
+	}
+	return ret
+}
+
+// DistanceTo returns the distance between this coordinate and the other
+// coordinate, including adjustments.
+func (c *Coordinate) DistanceTo(other *Coordinate) time.Duration {
+	if !c.IsCompatibleWith(other) {
+		panic(DimensionalityConflictError{})
+	}
+
+	dist := c.rawDistanceTo(other)
+	adjustedDist := dist + c.Adjustment + other.Adjustment
+	if adjustedDist > 0.0 {
+		dist = adjustedDist
+	}
+	return time.Duration(dist * secondsToNanoseconds)
+}
+
+// rawDistanceTo returns the Vivaldi distance between this coordinate and the
+// other coordinate in seconds, not including adjustments. This assumes the
+// dimensions have already been checked to be compatible.
+func (c *Coordinate) rawDistanceTo(other *Coordinate) float64 {
+	return magnitude(diff(c.Vec, other.Vec)) + c.Height + other.Height
+}
+
+// add returns the sum of vec1 and vec2. This assumes the dimensions have
+// already been checked to be compatible.
+func add(vec1 []float64, vec2 []float64) []float64 {
+	ret := make([]float64, len(vec1))
+	for i, _ := range ret {
+		ret[i] = vec1[i] + vec2[i]
+	}
+	return ret
+}
+
+// diff returns the difference between the vec1 and vec2. This assumes the
+// dimensions have already been checked to be compatible.
+func diff(vec1 []float64, vec2 []float64) []float64 {
+	ret := make([]float64, len(vec1))
+	for i, _ := range ret {
+		ret[i] = vec1[i] - vec2[i]
+	}
+	return ret
+}
+
+// mul returns vec multiplied by a scalar factor.
+func mul(vec []float64, factor float64) []float64 {
+	ret := make([]float64, len(vec))
+	for i, _ := range vec {
+		ret[i] = vec[i] * factor
+	}
+	return ret
+}
+
+// magnitude computes the magnitude of the vec.
+func magnitude(vec []float64) float64 {
+	sum := 0.0
+	for i, _ := range vec {
+		sum += vec[i] * vec[i]
+	}
+	return math.Sqrt(sum)
+}
+
+// unitVectorAt returns a unit vector pointing at vec1 from vec2. If the two
+// positions are the same then a random unit vector is returned. We also return
+// the distance between the points for use in the later height calculation.
+func unitVectorAt(vec1 []float64, vec2 []float64) ([]float64, float64) {
+	ret := diff(vec1, vec2)
+
+	// If the coordinates aren't on top of each other we can normalize.
+	if mag := magnitude(ret); mag > zeroThreshold {
+		return mul(ret, 1.0/mag), mag
+	}
+
+	// Otherwise, just return a random unit vector.
+	for i, _ := range ret {
+		ret[i] = rand.Float64() - 0.5
+	}
+	if mag := magnitude(ret); mag > zeroThreshold {
+		return mul(ret, 1.0/mag), 0.0
+	}
+
+	// And finally just give up and make a unit vector along the first
+	// dimension. This should be exceedingly rare.
+	ret = make([]float64, len(ret))
+	ret[0] = 1.0
+	return ret, 0.0
+}
diff --git a/provisioner/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/provisioner/vendor/github.com/hashicorp/serf/coordinate/phantom.go
new file mode 100644
index 0000000..6fb033c
--- /dev/null
+++ b/provisioner/vendor/github.com/hashicorp/serf/coordinate/phantom.go
@@ -0,0 +1,187 @@
+package coordinate
+
+import (
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+// GenerateClients returns a slice with nodes number of clients, all with the
+// given config.
+func GenerateClients(nodes int, config *Config) ([]*Client, error) {
+	clients := make([]*Client, nodes)
+	for i, _ := range clients {
+		client, err := NewClient(config)
+		if err != nil {
+			return nil, err
+		}
+
+		clients[i] = client
+	}
+	return clients, nil
+}
+
+// GenerateLine returns a truth matrix as if all the nodes are in a straight linke
+// with the given spacing between them.
+func GenerateLine(nodes int, spacing time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rtt := time.Duration(j-i) * spacing
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateGrid returns a truth matrix as if all the nodes are in a two dimensional
+// grid with the given spacing between them.
+func GenerateGrid(nodes int, spacing time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	n := int(math.Sqrt(float64(nodes)))
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			x1, y1 := float64(i%n), float64(i/n)
+			x2, y2 := float64(j%n), float64(j/n)
+			dx, dy := x2-x1, y2-y1
+			dist := math.Sqrt(dx*dx + dy*dy)
+			rtt := time.Duration(dist * float64(spacing))
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateSplit returns a truth matrix as if half the nodes are close together in
+// one location and half the nodes are close together in another. The lan factor
+// is used to separate the nodes locally and the wan factor represents the split
+// between the two sides.
+func GenerateSplit(nodes int, lan time.Duration, wan time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	split := nodes / 2
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rtt := lan
+			if (i <= split && j > split) || (i > split && j <= split) {
+				rtt += wan
+			}
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateCircle returns a truth matrix for a set of nodes, evenly distributed
+// around a circle with the given radius. The first node is at the "center" of the
+// circle because it's equidistant from all the other nodes, but we place it at
+// double the radius, so it should show up above all the other nodes in height.
+func GenerateCircle(nodes int, radius time.Duration) [][]time.Duration {
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			var rtt time.Duration
+			if i == 0 {
+				rtt = 2 * radius
+			} else {
+				t1 := 2.0 * math.Pi * float64(i) / float64(nodes)
+				x1, y1 := math.Cos(t1), math.Sin(t1)
+				t2 := 2.0 * math.Pi * float64(j) / float64(nodes)
+				x2, y2 := math.Cos(t2), math.Sin(t2)
+				dx, dy := x2-x1, y2-y1
+				dist := math.Sqrt(dx*dx + dy*dy)
+				rtt = time.Duration(dist * float64(radius))
+			}
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// GenerateRandom returns a truth matrix for a set of nodes with normally
+// distributed delays, with the given mean and deviation. The RNG is re-seeded
+// so you always get the same matrix for a given size.
+func GenerateRandom(nodes int, mean time.Duration, deviation time.Duration) [][]time.Duration {
+	rand.Seed(1)
+
+	truth := make([][]time.Duration, nodes)
+	for i := range truth {
+		truth[i] = make([]time.Duration, nodes)
+	}
+
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			rttSeconds := rand.NormFloat64()*deviation.Seconds() + mean.Seconds()
+			rtt := time.Duration(rttSeconds * secondsToNanoseconds)
+			truth[i][j], truth[j][i] = rtt, rtt
+		}
+	}
+	return truth
+}
+
+// Simulate runs the given number of cycles using the given list of clients and
+// truth matrix. On each cycle, each client will pick a random node and observe
+// the truth RTT, updating its coordinate estimate. The RNG is re-seeded for
+// each simulation run to get deterministic results (for this algorithm and the
+// underlying algorithm which will use random numbers for position vectors when
+// starting out with everything at the origin).
+func Simulate(clients []*Client, truth [][]time.Duration, cycles int) {
+	rand.Seed(1)
+
+	nodes := len(clients)
+	for cycle := 0; cycle < cycles; cycle++ {
+		for i, _ := range clients {
+			if j := rand.Intn(nodes); j != i {
+				c := clients[j].GetCoordinate()
+				rtt := truth[i][j]
+				node := fmt.Sprintf("node_%d", j)
+				clients[i].Update(node, c, rtt)
+			}
+		}
+	}
+}
+
+// Stats is returned from the Evaluate function with a summary of the algorithm
+// performance.
+type Stats struct {
+	ErrorMax float64
+	ErrorAvg float64
+}
+
+// Evaluate uses the coordinates of the given clients to calculate estimated
+// distances and compares them with the given truth matrix, returning summary
+// stats.
+func Evaluate(clients []*Client, truth [][]time.Duration) (stats Stats) {
+	nodes := len(clients)
+	count := 0
+	for i := 0; i < nodes; i++ {
+		for j := i + 1; j < nodes; j++ {
+			est := clients[i].DistanceTo(clients[j].GetCoordinate()).Seconds()
+			actual := truth[i][j].Seconds()
+			error := math.Abs(est-actual) / actual
+			stats.ErrorMax = math.Max(stats.ErrorMax, error)
+			stats.ErrorAvg += error
+			count += 1
+		}
+	}
+
+	stats.ErrorAvg /= float64(count)
+	fmt.Printf("Error avg=%9.6f max=%9.6f\n", stats.ErrorAvg, stats.ErrorMax)
+	return
+}
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/LICENSE b/provisioner/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/provisioner/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/README.md b/provisioner/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/doc.go b/provisioner/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/env_os.go b/provisioner/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/provisioner/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/provisioner/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/provisioner/vendor/github.com/kelseyhightower/envconfig/usage.go b/provisioner/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/provisioner/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/provisioner/vendor/vendor.json b/provisioner/vendor/vendor.json
new file mode 100644
index 0000000..1ae77af
--- /dev/null
+++ b/provisioner/vendor/vendor.json
@@ -0,0 +1,45 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "F5dR3/i70EhSIMZfeIV+H8/PtvM=",
+			"path": "github.com/gorilla/mux",
+			"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
+			"revisionTime": "2017-01-18T13:43:44Z"
+		},
+		{
+			"checksumSHA1": "byAKfVXPyAUJEHfQbMqrK5aJ8+M=",
+			"path": "github.com/hashicorp/consul/api",
+			"revision": "07b91d9d20b62969e23181e7ae135935c88dab43",
+			"revisionTime": "2017-01-19T20:28:30Z"
+		},
+		{
+			"checksumSHA1": "Uzyon2091lmwacNsl1hCytjhHtg=",
+			"origin": "github.com/hashicorp/consul/vendor/github.com/hashicorp/go-cleanhttp",
+			"path": "github.com/hashicorp/go-cleanhttp",
+			"revision": "07b91d9d20b62969e23181e7ae135935c88dab43",
+			"revisionTime": "2017-01-19T20:28:30Z"
+		},
+		{
+			"checksumSHA1": "E3Xcanc9ouQwL+CZGOUyA/+giLg=",
+			"origin": "github.com/hashicorp/consul/vendor/github.com/hashicorp/serf/coordinate",
+			"path": "github.com/hashicorp/serf/coordinate",
+			"revision": "07b91d9d20b62969e23181e7ae135935c88dab43",
+			"revisionTime": "2017-01-19T20:28:30Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.org/maas/cord-provisioner"
+}
diff --git a/roles/head-node/files/do-ansible b/roles/head-node/files/do-ansible
index 53c2b1c..0cd6809 100755
--- a/roles/head-node/files/do-ansible
+++ b/roles/head-node/files/do-ansible
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/ash
 
 ID=$1
 HOSTNAME=$2
diff --git a/roles/head-node/files/do-switch b/roles/head-node/files/do-switch
index 27c6819..c370ccb 100755
--- a/roles/head-node/files/do-switch
+++ b/roles/head-node/files/do-switch
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/ash
 
 ID=$1
 HOSTNAME=$2
diff --git a/switchq/Dockerfile b/switchq/Dockerfile
index e80a590..993fa41 100644
--- a/switchq/Dockerfile
+++ b/switchq/Dockerfile
@@ -1,4 +1,4 @@
-## Copyright 2016 Open Networking Laboratory
+## Copyright 2017 Open Networking Laboratory
 ##
 ## Licensed under the Apache License, Version 2.0 (the "License");
 ## you may not use this file except in compliance with the License.
@@ -11,22 +11,14 @@
 ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 ## See the License for the specific language governing permissions and
 ## limitations under the License.
-FROM golang:1.6-alpine
+FROM golang:1.7-alpine
 MAINTAINER Open Networking Laboratory <info@onlab.us>
 
-RUN apk --update add openssh-client git
-
-RUN mkdir -p /switchq
-COPY vendors.json /switchq/vendors.json
-
-RUN go get github.com/tools/godep
-ADD . /go/src/gerrit.opencord.com/maas/switchq
-
-WORKDIR /go/src/gerrit.opencord.com/maas/switchq
-RUN /go/bin/godep restore || true
-
+RUN mkdir /switchq /service
 WORKDIR /go
-RUN go install gerrit.opencord.com/maas/switchq
+ADD . /go/src/gerrit.opencord.com/maas/switchq
+COPY vendors.json /switchq/vendors.json
+RUN go build -o /service/entry-point gerrit.opencord.com/maas/switchq
 
 LABEL org.label-schema.name="switchq" \
       org.label-schema.description="Provides fabric switch discovery and provisioning" \
@@ -34,4 +26,5 @@
       org.label-schema.vendor="Open Networking Laboratory" \
       org.label-schema.schema-version="1.0"
 
-ENTRYPOINT ["/go/bin/switchq"]
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/switchq/Dockerfile.release b/switchq/Dockerfile.release
new file mode 100644
index 0000000..77ec075
--- /dev/null
+++ b/switchq/Dockerfile.release
@@ -0,0 +1,28 @@
+## Copyright 2017 Open Networking Laboratory
+##
+## Licensed under the Apache License, Version 2.0 (the "License");
+## you may not use this file except in compliance with the License.
+## You may obtain a copy of the License at
+##
+## http://www.apache.org/licenses/LICENSE-2.0
+##
+## Unless required by applicable law or agreed to in writing, software
+## distributed under the License is distributed on an "AS IS" BASIS,
+## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+## See the License for the specific language governing permissions and
+## limitations under the License.
+FROM alpine:3.5
+MAINTAINER Open Networking Laboratory <info@onlab.us>
+
+RUN mkdir -p /switchq /service
+COPY vendors.json /switchq/vendors.json
+ADD entry-point /service/entry-point
+
+LABEL org.label-schema.name="switchq" \
+      org.label-schema.description="Provides fabric switch discovery and provisioning" \
+      org.label-schema.vcs-url="https://gerrit.opencord.org/maas" \
+      org.label-schema.vendor="Open Networking Laboratory" \
+      org.label-schema.schema-version="1.0"
+
+WORKDIR /service
+ENTRYPOINT ["/service/entry-point"]
diff --git a/switchq/Godeps/Godeps.json b/switchq/Godeps/Godeps.json
deleted file mode 100644
index a178cff..0000000
--- a/switchq/Godeps/Godeps.json
+++ /dev/null
@@ -1,35 +0,0 @@
-{
-	"ImportPath": "gerrit.opencord.org/maas/swtichq",
-	"GoVersion": "go1.6",
-	"GodepVersion": "v72",
-	"Deps": [
-		{
-			"ImportPath": "github.com/juju/gomaasapi",
-			"Rev": "e173bc8d8d3304ff11b0ded5f6d4eea0cb560a40"
-		},
-		{
-			"ImportPath": "gopkg.in/mgo.v2/bson",
-			"Comment": "r2015.12.06-2-g03c9f3e",
-			"Rev": "03c9f3ee4c14c8e51ee521a6a7d0425658dd6f64"
-		},
-		{
-			"ImportPath": "github.com/gorilla/context",
-			"Comment": "v1.1-4-gaed02d1",
-			"Rev": "aed02d124ae4a0e94fea4541c8effd05bf0c8296"
-		},
-		{
-			"ImportPath": "github.com/gorilla/mux",
-			"Comment": "v1.1-13-g9fa818a",
-			"Rev": "9fa818a44c2bf1396a17f9d5a3c0f6dd39d2ff8e"
-		},
-		{
-			"ImportPath": "github.com/kelseyhightower/envconfig",
-			"Comment": "1.1.0-17-g91921eb",
-			"Rev": "91921eb4cf999321cdbeebdba5a03555800d493b"
-		},
-		{
-			"ImportPath": "github.com/Sirupsen/logrus",
-			"Rev": "f3cfb454f4c209e6668c95216c4744b8fddb2356"
-		}
-	]
-}
diff --git a/switchq/Makefile b/switchq/Makefile
new file mode 100644
index 0000000..a28b665
--- /dev/null
+++ b/switchq/Makefile
@@ -0,0 +1,38 @@
+IMAGE_NAME=cord-maas-switchq
+BINARY=entry-point
+BUILD_TAG=build
+PACKAGE_TAG=candidate
+
+.PHONY: help
+help:
+	@echo "build     - create the binary"
+	@echo "package   - package the binary into a docker container"
+	@echo "clean     - remove tempory files and build artifacts"
+	@echo "help      - this message"
+
+BUILD_DATE=$(shell date -u +%Y-%m-%dT%TZ)
+VCS_REF=$(shell git log --pretty=format:%H -n 1)
+VCS_REF_DATE=$(shell git log --pretty=format:%cd --date=format:%FT%T%z -n 1)
+BRANCHES=$(shell repo --color=never --no-pager branches 2>/dev/null | wc -l)
+STATUS=$(shell repo --color=never --no-pager status . | tail -n +2 | wc -l)
+MODIFIED=$(shell test $(BRANCHES) -eq 0 && test $(STATUS) -eq 0 || echo "[modified]")
+BRANCH=$(shell repo --color=never --no-pager info -l -o | grep 'Manifest branch:' | awk '{print $$NF}')
+VERSION=$(BRANCH)$(MODIFIED)
+
+.PHONY: build
+build:
+	docker build -t $(IMAGE_NAME):$(BUILD_TAG) .
+
+.PHONY: package
+package:
+	$(eval BUILD_ID := $(shell docker create $(IMAGE_NAME):$(BUILD_TAG)))
+	$(eval BINDIR := $(shell mktemp -d))
+	docker cp $(BUILD_ID):/service/$(BINARY) $(BINDIR)/$(BINARY)
+	cp Dockerfile.release vendors.json $(BINDIR)
+	docker build -f $(BINDIR)/Dockerfile.release -t $(IMAGE_NAME):$(PACKAGE_TAG) --label org.label-schema.build-date=$(BUILD_DATE) --label org.label-schema.vcs-ref=$(VCS_REF) --label org.label-schema.vcs-ref-date=$(VCS_REF_DATE) --label org.label-schema.version=$(VERSION) $(BINDIR)
+	docker rm -f $(BUILD_ID)
+	rm -r $(BINDIR)
+
+.PHONY: clean
+clean:
+	@echo ""
diff --git a/switchq/switchq.go b/switchq/switchq.go
index e946ff1..1941236 100644
--- a/switchq/switchq.go
+++ b/switchq/switchq.go
@@ -16,6 +16,7 @@
 import (
 	"bytes"
 	"encoding/json"
+	"flag"
 	"fmt"
 	"github.com/Sirupsen/logrus"
 	"github.com/gorilla/mux"
@@ -23,28 +24,31 @@
 	"github.com/kelseyhightower/envconfig"
 	"io/ioutil"
 	"net/http"
+	"os"
 	"regexp"
 	"sync"
 	"time"
 )
 
+const appName = "SWITCHQ"
+
 type Config struct {
-	VendorsURL      string `default:"file:///switchq/vendors.json" envconfig:"VENDORS_URL"`
-	AddressURL      string `default:"file:///switchq/dhcp_harvest.inc" envconfig:"ADDRESS_URL"`
-	PollInterval    string `default:"1m" envconfig:"POLL_INTERVAL"`
-	ProvisionTTL    string `default:"1h" envconfig:"PROVISION_TTL"`
-	ProvisionURL    string `default:"" envconfig:"PROVISION_URL"`
-	RoleSelectorURL string `default:"" envconfig:"ROLE_SELECTOR_URL"`
-	DefaultRole     string `default:"fabric-switch" envconfig:"DEFAULT_ROLE"`
-	Script          string `default:"do-ansible"`
-	LogLevel        string `default:"warning" envconfig:"LOG_LEVEL"`
-	LogFormat       string `default:"text" envconfig:"LOG_FORMAT"`
-	Listen          string `default:""`
-	Port            int    `default:"4244"`
-	MaasURL         string `default:"http://localhost/MAAS" envconfig:"MAAS_URL"`
-	MaasKey         string `default:"" envconfig:"MAAS_API_KEY"`
-	ShowApiKey      bool   `default:"false" envconfig:"MAAS_SHOW_API_KEY"`
-	ApiKeyFile      string `default:"/secrets/maas_api_key" envconfig:"MAAS_API_KEY_FILE"`
+	VendorsURL      string `default:"file:///switchq/vendors.json" envconfig:"VENDORS_URL" desc:"URL that specifies supported vendor OUI information"`
+	AddressURL      string `default:"file:///switchq/dhcp_harvest.inc" envconfig:"ADDRESS_URL" desc:"URL of service or file from which to query IP information"`
+	PollInterval    string `default:"1m" envconfig:"POLL_INTERVAL" desc:"how often IP information should be queried and processed"`
+	ProvisionTTL    string `default:"1h" envconfig:"PROVISION_TTL" desc:"duraction to wait for a provisioning request before considering it failed"`
+	ProvisionURL    string `default:"" envconfig:"PROVISION_URL" desc:"URL of provisioning service"`
+	RoleSelectorURL string `default:"" envconfig:"ROLE_SELECTOR_URL" desc:"URL of service to query for switch role"`
+	DefaultRole     string `default:"fabric-switch" envconfig:"DEFAULT_ROLE" desc:"default switch role"`
+	Script          string `default:"do-ansible" desc:"script to run for provisioniner"`
+	LogLevel        string `default:"warning" envconfig:"LOG_LEVEL" desc:"detail level for logging"`
+	LogFormat       string `default:"text" envconfig:"LOG_FORMAT" desc:"output format for logging, text or json"`
+	Listen          string `default:"" desc:"IP on which to listen for requests"`
+	Port            int    `default:"4244" desc:"port on which to listen for requests"`
+	MaasURL         string `default:"http://localhost/MAAS" envconfig:"MAAS_URL" desc:"connection string for MAAS"`
+	MaasKey         string `default:"" envconfig:"MAAS_API_KEY" desc:"API key for MAAS"`
+	ShowApiKey      bool   `default:"false" envconfig:"MAAS_SHOW_API_KEY" desc:"display API key in log"`
+	ApiKeyFile      string `default:"/secrets/maas_api_key" envconfig:"MAAS_API_KEY_FILE" desc:"file from which to read API key"`
 
 	vendors       Vendors
 	addressSource AddressSource
@@ -280,12 +284,24 @@
 }
 
 var log = logrus.New()
+var appFlags = flag.NewFlagSet("", flag.ContinueOnError)
 
 func main() {
 
 	var err error
 	context := &AppContext{}
-	err = envconfig.Process("SWITCHQ", &context.config)
+
+	appFlags.Usage = func() {
+		envconfig.Usage(appName, &(context.config))
+	}
+	if err := appFlags.Parse(os.Args[1:]); err != nil {
+		if err != flag.ErrHelp {
+			os.Exit(1)
+		} else {
+			return
+		}
+	}
+	err = envconfig.Process(appName, &context.config)
 	if err != nil {
 		log.Fatalf("Unable to parse configuration options : %s", err)
 	}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/switchq/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/LICENSE b/switchq/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/README.md b/switchq/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/alt_exit.go b/switchq/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/doc.go b/switchq/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/entry.go b/switchq/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/exported.go b/switchq/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/formatter.go b/switchq/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/hooks.go b/switchq/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/json_formatter.go b/switchq/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/logger.go b/switchq/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/logrus.go b/switchq/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/switchq/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/text_formatter.go b/switchq/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/switchq/vendor/github.com/Sirupsen/logrus/writer.go b/switchq/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/switchq/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/config-generator/vendor/github.com/gorilla/context/LICENSE b/switchq/vendor/github.com/gorilla/mux/LICENSE
similarity index 100%
copy from config-generator/vendor/github.com/gorilla/context/LICENSE
copy to switchq/vendor/github.com/gorilla/mux/LICENSE
diff --git a/switchq/vendor/github.com/gorilla/mux/README.md b/switchq/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..94d396c
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,339 @@
+gorilla/mux
+===
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+
+![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
+
+http://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts and paths can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Listing Routes](#listing-routes)
+* [Static Files](#static-files)
+* [Registered URLs](#registered-urls)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", HomeHandler)
+	r.HandleFunc("/products", ProductsHandler)
+	r.HandleFunc("/articles", ArticlesHandler)
+	http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+	vars := mux.Vars(r)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.domain.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+	return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+### Listing Routes
+
+Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
+
+```go
+package main
+
+import (
+    "fmt"
+    "net/http"
+
+    "github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+    return
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", handler)
+    r.HandleFunc("/products", handler)
+    r.HandleFunc("/articles", handler)
+    r.HandleFunc("/articles/{id}", handler)
+    r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+        t, err := route.GetPathTemplate()
+        if err != nil {
+            return err
+        }
+        fmt.Println(t)
+        return nil
+    })
+    http.Handle("/", r)
+}
+```
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+	var dir string
+
+	flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+	flag.Parse()
+	r := mux.NewRouter()
+
+	// This will serve files under http://localhost:8000/static/<filename>
+	r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+	srv := &http.Server{
+		Handler:      r,
+		Addr:         "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.domain.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.domain.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.domain.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.domain.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+	"net/http"
+	"log"
+	"github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+	w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+	r := mux.NewRouter()
+	// Routes consist of a path and a handler function.
+	r.HandleFunc("/", YourHandler)
+
+	// Bind to a port and pass our router in
+	log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/switchq/vendor/github.com/gorilla/mux/context_gorilla.go b/switchq/vendor/github.com/gorilla/mux/context_gorilla.go
new file mode 100644
index 0000000..d7adaa8
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/context_gorilla.go
@@ -0,0 +1,26 @@
+// +build !go1.7
+
+package mux
+
+import (
+	"net/http"
+
+	"github.com/gorilla/context"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return context.Get(r, key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	context.Set(r, key, val)
+	return r
+}
+
+func contextClear(r *http.Request) {
+	context.Clear(r)
+}
diff --git a/switchq/vendor/github.com/gorilla/mux/context_native.go b/switchq/vendor/github.com/gorilla/mux/context_native.go
new file mode 100644
index 0000000..209cbea
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/context_native.go
@@ -0,0 +1,24 @@
+// +build go1.7
+
+package mux
+
+import (
+	"context"
+	"net/http"
+)
+
+func contextGet(r *http.Request, key interface{}) interface{} {
+	return r.Context().Value(key)
+}
+
+func contextSet(r *http.Request, key, val interface{}) *http.Request {
+	if val == nil {
+		return r
+	}
+
+	return r.WithContext(context.WithValue(r.Context(), key, val))
+}
+
+func contextClear(r *http.Request) {
+	return
+}
diff --git a/switchq/vendor/github.com/gorilla/mux/doc.go b/switchq/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..00daf4a
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,240 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts and paths can have variables with an optional regular
+	  expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+*/
+package mux
diff --git a/switchq/vendor/github.com/gorilla/mux/mux.go b/switchq/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..d66ec38
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,542 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+	"strings"
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+	// Parent route, if this is a subrouter.
+	parent parentRoute
+	// Routes to be matched, in order.
+	routes []*Route
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+	// See Router.StrictSlash(). This defines the flag for new routes.
+	strictSlash bool
+	// See Router.SkipClean(). This defines the flag for new routes.
+	skipClean bool
+	// If true, do not clear the request context after handling the request.
+	// This has no effect when go1.7+ is used, since the context is stored
+	// on the request itself.
+	KeepContext bool
+	// see Router.UseEncodedPath(). This defines a flag for all routes.
+	useEncodedPath bool
+}
+
+// Match matches registered routes against the request.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			return true
+		}
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		return true
+	}
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = setVars(req, match.Vars)
+		req = setCurrentRoute(req, match.Route)
+	}
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+	if !r.KeepContext {
+		defer contextClear(req)
+	}
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.getNamedRoutes()[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+// This behavior has the drawback of needing to match routes against
+// r.RequestURI instead of r.URL.Path. Any modifications (such as http.StripPrefix)
+// to r.URL.Path will not affect routing when this flag is on and thus may
+// induce unintended behavior.
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Router) getNamedRoutes() map[string]*Route {
+	if r.namedRoutes == nil {
+		if r.parent != nil {
+			r.namedRoutes = r.parent.getNamedRoutes()
+		} else {
+			r.namedRoutes = make(map[string]*Route)
+		}
+	}
+	return r.namedRoutes
+}
+
+// getRegexpGroup returns regexp definitions from the parent route, if any.
+func (r *Router) getRegexpGroup() *routeRegexpGroup {
+	if r.parent != nil {
+		return r.parent.getRegexpGroup()
+	}
+	return nil
+}
+
+func (r *Router) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	route := &Route{parent: r, strictSlash: r.strictSlash, skipClean: r.skipClean, useEncodedPath: r.useEncodedPath}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
+			continue
+		}
+
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := contextGet(r, varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns, unless the KeepContext option is set on the
+// Router.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := contextGet(r, routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func setVars(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, varsKey, val)
+}
+
+func setCurrentRoute(r *http.Request, val interface{}) *http.Request {
+	return contextSet(r, routeKey, val)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// getPath returns the escaped path if possible; doing what URL.EscapedPath()
+// which was added in go1.5 does
+func getPath(req *http.Request) string {
+	if req.RequestURI != "" {
+		// Extract the path from RequestURI (which is escaped unlike URL.Path)
+		// as detailed here as detailed in https://golang.org/pkg/net/url/#URL
+		// for < 1.5 server side workaround
+		// http://localhost/path/here?v=1 -> /path/here
+		path := req.RequestURI
+		path = strings.TrimPrefix(path, req.URL.Scheme+`://`)
+		path = strings.TrimPrefix(path, req.URL.Host)
+		if i := strings.LastIndex(path, "?"); i > -1 {
+			path = path[:i]
+		}
+		if i := strings.LastIndex(path, "#"); i > -1 {
+			path = path[:i]
+		}
+		return path
+	}
+	return req.URL.Path
+}
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string paramers to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
diff --git a/switchq/vendor/github.com/gorilla/mux/regexp.go b/switchq/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0189ad3
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,323 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash, useEncodedPath bool) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if matchQuery {
+		defaultPattern = "[^?&]*"
+	} else if matchHost {
+		defaultPattern = "[^.]+"
+		matchPrefix = false
+	}
+	// Only match strict slash if not matching
+	if matchPrefix || matchHost || matchQuery {
+		strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if matchQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if !matchPrefix {
+		pattern.WriteByte('$')
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:       template,
+		matchHost:      matchHost,
+		matchQuery:     matchQuery,
+		strictSlash:    strictSlash,
+		useEncodedPath: useEncodedPath,
+		regexp:         reg,
+		reverse:        reverse.String(),
+		varsN:          varsN,
+		varsR:          varsR,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// True for host match, false for path or query string match.
+	matchHost bool
+	// True for query string match, false for path and host match.
+	matchQuery bool
+	// The strictSlash value defined on the route, but disabled if PathPrefix was used.
+	strictSlash bool
+	// Determines whether to use encoded path from getPath function or unencoded
+	// req.URL.Path for path matching
+	useEncodedPath bool
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if !r.matchHost {
+		if r.matchQuery {
+			return r.matchQueryString(req)
+		}
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = getPath(req)
+		}
+		return r.regexp.MatchString(path)
+	}
+
+	return r.regexp.MatchString(getHost(req))
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if !r.matchQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	for key, vals := range req.URL.Query() {
+		if key == templateKey && len(vals) > 0 {
+			return key + "=" + vals[0]
+		}
+	}
+	return ""
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v *routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = getPath(req)
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), 301)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	host := r.Host
+	// Slice off any port information.
+	if i := strings.Index(host, ":"); i != -1 {
+		host = host[:i]
+	}
+	return host
+
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/switchq/vendor/github.com/gorilla/mux/route.go b/switchq/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..9221915
--- /dev/null
+++ b/switchq/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,636 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Parent where the route was registered (a Router).
+	parent parentRoute
+	// Request handler for the route.
+	handler http.Handler
+	// List of matchers.
+	matchers []matcher
+	// Manager for the variables from host and path.
+	regexp *routeRegexpGroup
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	buildVarsFunc BuildVarsFunc
+}
+
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			return false
+		}
+	}
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+	// Set variables.
+	if r.regexp != nil {
+		r.regexp.setMatch(req, match, r)
+	}
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// If the name was registered already it will be overwritten.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.getNamedRoutes()[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery bool) error {
+	if r.err != nil {
+		return r.err
+	}
+	r.regexp = r.getRegexpGroup()
+	if !matchHost && !matchQuery {
+		if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, matchHost, matchPrefix, matchQuery, r.strictSlash, r.useEncodedPath)
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if matchHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if matchQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// It the value is an empty string, it will match any value if the key is set.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, true, false, false)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, false, false)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, false, true, false)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// It the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], false, false, true); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.URL.Scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	r.buildVarsFunc = f
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	router := &Router{parent: r, strictSlash: r.strictSlash}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.Host("{subdomain}.domain.com").
+//       HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil {
+		return nil, errors.New("mux: route doesn't have a host or path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	if r.regexp.host != nil {
+		// Set a default scheme.
+		scheme = "http"
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	return &url.URL{
+		Scheme: scheme,
+		Host:   host,
+		Path:   path,
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp == nil || r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.parent != nil {
+		m = r.parent.buildVars(m)
+	}
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
+
+// ----------------------------------------------------------------------------
+// parentRoute
+// ----------------------------------------------------------------------------
+
+// parentRoute allows routes to know about parent host and path definitions.
+type parentRoute interface {
+	getNamedRoutes() map[string]*Route
+	getRegexpGroup() *routeRegexpGroup
+	buildVars(map[string]string) map[string]string
+}
+
+// getNamedRoutes returns the map where named routes are registered.
+func (r *Route) getNamedRoutes() map[string]*Route {
+	if r.parent == nil {
+		// During tests router is not always set.
+		r.parent = NewRouter()
+	}
+	return r.parent.getNamedRoutes()
+}
+
+// getRegexpGroup returns regexp definitions from this route.
+func (r *Route) getRegexpGroup() *routeRegexpGroup {
+	if r.regexp == nil {
+		if r.parent == nil {
+			// During tests router is not always set.
+			r.parent = NewRouter()
+		}
+		regexp := r.parent.getRegexpGroup()
+		if regexp == nil {
+			r.regexp = new(routeRegexpGroup)
+		} else {
+			// Copy.
+			r.regexp = &routeRegexpGroup{
+				host:    regexp.host,
+				path:    regexp.path,
+				queries: regexp.queries,
+			}
+		}
+	}
+	return r.regexp
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/LICENSE b/switchq/vendor/github.com/juju/ansiterm/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/ansiterm/Makefile b/switchq/vendor/github.com/juju/ansiterm/Makefile
new file mode 100644
index 0000000..212fdcb
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/Makefile
@@ -0,0 +1,14 @@
+# Copyright 2016 Canonical Ltd.
+# Licensed under the LGPLv3, see LICENCE file for details.
+
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/ansiterm > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/switchq/vendor/github.com/juju/ansiterm/README.md b/switchq/vendor/github.com/juju/ansiterm/README.md
new file mode 100644
index 0000000..5674387
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/README.md
@@ -0,0 +1,323 @@
+
+# ansiterm
+    import "github.com/juju/ansiterm"
+
+Package ansiterm provides a Writer that writes out the ANSI escape
+codes for color and styles.
+
+
+
+
+
+
+
+## type Color
+``` go
+type Color int
+```
+Color represents one of the standard 16 ANSI colors.
+
+
+
+``` go
+const (
+    Default Color
+    Black
+    Red
+    Green
+    Yellow
+    Blue
+    Magenta
+    Cyan
+    Gray
+    DarkGray
+    BrightRed
+    BrightGreen
+    BrightYellow
+    BrightBlue
+    BrightMagenta
+    BrightCyan
+    White
+)
+```
+
+
+
+
+
+
+
+
+### func (Color) String
+``` go
+func (c Color) String() string
+```
+String returns the name of the color.
+
+
+
+## type Context
+``` go
+type Context struct {
+    Foreground Color
+    Background Color
+    Styles     []Style
+}
+```
+Context provides a way to specify both foreground and background colors
+along with other styles and write text to a Writer with those colors and
+styles.
+
+
+
+
+
+
+
+
+
+### func Background
+``` go
+func Background(color Color) *Context
+```
+Background is a convenience function that creates a Context with the
+specified color as the background color.
+
+
+### func Foreground
+``` go
+func Foreground(color Color) *Context
+```
+Foreground is a convenience function that creates a Context with the
+specified color as the foreground color.
+
+
+### func Styles
+``` go
+func Styles(styles ...Style) *Context
+```
+Styles is a convenience function that creates a Context with the
+specified styles set.
+
+
+
+
+### func (\*Context) Fprint
+``` go
+func (c *Context) Fprint(w sgrWriter, args ...interface{})
+```
+Fprint will set the sgr values of the writer to the specified foreground,
+background and styles, then formats using the default formats for its
+operands and writes to w. Spaces are added between operands when neither is
+a string. It returns the number of bytes written and any write error
+encountered.
+
+
+
+### func (\*Context) Fprintf
+``` go
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
+```
+Fprintf will set the sgr values of the writer to the specified
+foreground, background and styles, then write the formatted string,
+then reset the writer.
+
+
+
+### func (\*Context) SetBackground
+``` go
+func (c *Context) SetBackground(color Color) *Context
+```
+SetBackground sets the background to the specified color.
+
+
+
+### func (\*Context) SetForeground
+``` go
+func (c *Context) SetForeground(color Color) *Context
+```
+SetForeground sets the foreground to the specified color.
+
+
+
+### func (\*Context) SetStyle
+``` go
+func (c *Context) SetStyle(styles ...Style) *Context
+```
+SetStyle replaces the styles with the new values.
+
+
+
+## type Style
+``` go
+type Style int
+```
+
+
+``` go
+const (
+    Bold Style
+    Faint
+    Italic
+    Underline
+    Blink
+    Reverse
+    Strikethrough
+    Conceal
+)
+```
+
+
+
+
+
+
+
+
+### func (Style) String
+``` go
+func (s Style) String() string
+```
+
+
+## type TabWriter
+``` go
+type TabWriter struct {
+    Writer
+    // contains filtered or unexported fields
+}
+```
+TabWriter is a filter that inserts padding around tab-delimited
+columns in its input to align them in the output.
+
+It also setting of colors and styles over and above the standard
+tabwriter package.
+
+
+
+
+
+
+
+
+
+### func NewTabWriter
+``` go
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+NewTabWriter returns a writer that is able to set colors and styels.
+The ansi escape codes are stripped for width calculations.
+
+
+
+
+### func (\*TabWriter) Flush
+``` go
+func (t *TabWriter) Flush() error
+```
+Flush should be called after the last call to Write to ensure
+that any data buffered in the Writer is written to output. Any
+incomplete escape sequence at the end is considered
+complete for formatting purposes.
+
+
+
+### func (\*TabWriter) Init
+``` go
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+A Writer must be initialized with a call to Init. The first parameter (output)
+specifies the filter output. The remaining parameters control the formatting:
+
+
+	minwidth	minimal cell width including any padding
+	tabwidth	width of tab characters (equivalent number of spaces)
+	padding		padding added to a cell before computing its width
+	padchar		ASCII char used for padding
+			if padchar == '\t', the Writer will assume that the
+			width of a '\t' in the formatted output is tabwidth,
+			and cells are left-aligned independent of align_left
+			(for correct-looking results, tabwidth must correspond
+			to the tab width in the viewer displaying the result)
+	flags		formatting control
+
+
+
+## type Writer
+``` go
+type Writer struct {
+    io.Writer
+    // contains filtered or unexported fields
+}
+```
+Writer allows colors and styles to be specified. If the io.Writer
+is not a terminal capable of color, all attempts to set colors or
+styles are no-ops.
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a Writer that allows the caller to specify colors and
+styles. If the io.Writer is not a terminal capable of color, all attempts
+to set colors or styles are no-ops.
+
+
+
+
+### func (\*Writer) ClearStyle
+``` go
+func (w *Writer) ClearStyle(s Style)
+```
+ClearStyle clears the text style.
+
+
+
+### func (\*Writer) Reset
+``` go
+func (w *Writer) Reset()
+```
+Reset returns the default foreground and background colors with no styles.
+
+
+
+### func (\*Writer) SetBackground
+``` go
+func (w *Writer) SetBackground(c Color)
+```
+SetBackground sets the background color.
+
+
+
+### func (\*Writer) SetForeground
+``` go
+func (w *Writer) SetForeground(c Color)
+```
+SetForeground sets the foreground color.
+
+
+
+### func (\*Writer) SetStyle
+``` go
+func (w *Writer) SetStyle(s Style)
+```
+SetStyle sets the text style.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/switchq/vendor/github.com/juju/ansiterm/attribute.go b/switchq/vendor/github.com/juju/ansiterm/attribute.go
new file mode 100644
index 0000000..f2daa48
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/attribute.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type attribute int
+
+const (
+	unknownAttribute attribute = -1
+	reset            attribute = 0
+)
+
+// sgr returns the escape sequence for the Select Graphic Rendition
+// for the attribute.
+func (a attribute) sgr() string {
+	if a < 0 {
+		return ""
+	}
+	return fmt.Sprintf("\x1b[%dm", a)
+}
+
+type attributes []attribute
+
+func (a attributes) Len() int           { return len(a) }
+func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
+func (a attributes) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// sgr returns the combined escape sequence for the Select Graphic Rendition
+// for the sequence of attributes.
+func (a attributes) sgr() string {
+	switch len(a) {
+	case 0:
+		return ""
+	case 1:
+		return a[0].sgr()
+	default:
+		sort.Sort(a)
+		var values []string
+		for _, attr := range a {
+			values = append(values, fmt.Sprint(attr))
+		}
+		return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
+	}
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/color.go b/switchq/vendor/github.com/juju/ansiterm/color.go
new file mode 100644
index 0000000..0a97de3
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/color.go
@@ -0,0 +1,119 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Color = iota
+	Default
+	Black
+	Red
+	Green
+	Yellow
+	Blue
+	Magenta
+	Cyan
+	Gray
+	DarkGray
+	BrightRed
+	BrightGreen
+	BrightYellow
+	BrightBlue
+	BrightMagenta
+	BrightCyan
+	White
+)
+
+// Color represents one of the standard 16 ANSI colors.
+type Color int
+
+// String returns the name of the color.
+func (c Color) String() string {
+	switch c {
+	case Default:
+		return "default"
+	case Black:
+		return "black"
+	case Red:
+		return "red"
+	case Green:
+		return "green"
+	case Yellow:
+		return "yellow"
+	case Blue:
+		return "blue"
+	case Magenta:
+		return "magenta"
+	case Cyan:
+		return "cyan"
+	case Gray:
+		return "gray"
+	case DarkGray:
+		return "darkgray"
+	case BrightRed:
+		return "brightred"
+	case BrightGreen:
+		return "brightgreen"
+	case BrightYellow:
+		return "brightyellow"
+	case BrightBlue:
+		return "brightblue"
+	case BrightMagenta:
+		return "brightmagenta"
+	case BrightCyan:
+		return "brightcyan"
+	case White:
+		return "white"
+	default:
+		return ""
+	}
+}
+
+func (c Color) foreground() attribute {
+	switch c {
+	case Default:
+		return 39
+	case Black:
+		return 30
+	case Red:
+		return 31
+	case Green:
+		return 32
+	case Yellow:
+		return 33
+	case Blue:
+		return 34
+	case Magenta:
+		return 35
+	case Cyan:
+		return 36
+	case Gray:
+		return 37
+	case DarkGray:
+		return 90
+	case BrightRed:
+		return 91
+	case BrightGreen:
+		return 92
+	case BrightYellow:
+		return 93
+	case BrightBlue:
+		return 94
+	case BrightMagenta:
+		return 95
+	case BrightCyan:
+		return 96
+	case White:
+		return 97
+	default:
+		return unknownAttribute
+	}
+}
+
+func (c Color) background() attribute {
+	value := c.foreground()
+	if value != unknownAttribute {
+		return value + 10
+	}
+	return value
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/context.go b/switchq/vendor/github.com/juju/ansiterm/context.go
new file mode 100644
index 0000000..e61a867
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/context.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Context provides a way to specify both foreground and background colors
+// along with other styles and write text to a Writer with those colors and
+// styles.
+type Context struct {
+	Foreground Color
+	Background Color
+	Styles     []Style
+}
+
+// Foreground is a convenience function that creates a Context with the
+// specified color as the foreground color.
+func Foreground(color Color) *Context {
+	return &Context{Foreground: color}
+}
+
+// Background is a convenience function that creates a Context with the
+// specified color as the background color.
+func Background(color Color) *Context {
+	return &Context{Background: color}
+}
+
+// Styles is a convenience function that creates a Context with the
+// specified styles set.
+func Styles(styles ...Style) *Context {
+	return &Context{Styles: styles}
+}
+
+// SetForeground sets the foreground to the specified color.
+func (c *Context) SetForeground(color Color) *Context {
+	c.Foreground = color
+	return c
+}
+
+// SetBackground sets the background to the specified color.
+func (c *Context) SetBackground(color Color) *Context {
+	c.Background = color
+	return c
+}
+
+// SetStyle replaces the styles with the new values.
+func (c *Context) SetStyle(styles ...Style) *Context {
+	c.Styles = styles
+	return c
+}
+
+type sgrWriter interface {
+	io.Writer
+	writeSGR(value sgr)
+}
+
+// Fprintf will set the sgr values of the writer to the specified
+// foreground, background and styles, then write the formatted string,
+// then reset the writer.
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprintf(w, format, args...)
+	w.writeSGR(reset)
+}
+
+// Fprint will set the sgr values of the writer to the specified foreground,
+// background and styles, then formats using the default formats for its
+// operands and writes to w. Spaces are added between operands when neither is
+// a string. It returns the number of bytes written and any write error
+// encountered.
+func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprint(w, args...)
+	w.writeSGR(reset)
+}
+
+func (c *Context) sgr() string {
+	var values attributes
+	if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
+		values = append(values, foreground)
+	}
+	if background := c.Background.background(); background != unknownAttribute {
+		values = append(values, background)
+	}
+	for _, style := range c.Styles {
+		if value := style.enable(); value != unknownAttribute {
+			values = append(values, value)
+		}
+	}
+	return values.sgr()
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/doc.go b/switchq/vendor/github.com/juju/ansiterm/doc.go
new file mode 100644
index 0000000..7827007
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// Package ansiterm provides a Writer that writes out the ANSI escape
+// codes for color and styles.
+package ansiterm
diff --git a/switchq/vendor/github.com/juju/ansiterm/style.go b/switchq/vendor/github.com/juju/ansiterm/style.go
new file mode 100644
index 0000000..0be42da
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/style.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Style = iota
+	Bold
+	Faint
+	Italic
+	Underline
+	Blink
+	Reverse
+	Strikethrough
+	Conceal
+)
+
+type Style int
+
+func (s Style) String() string {
+	switch s {
+	case Bold:
+		return "bold"
+	case Faint:
+		return "faint"
+	case Italic:
+		return "italic"
+	case Underline:
+		return "underline"
+	case Blink:
+		return "blink"
+	case Reverse:
+		return "reverse"
+	case Strikethrough:
+		return "strikethrough"
+	case Conceal:
+		return "conceal"
+	default:
+		return ""
+	}
+}
+
+func (s Style) enable() attribute {
+	switch s {
+	case Bold:
+		return 1
+	case Faint:
+		return 2
+	case Italic:
+		return 3
+	case Underline:
+		return 4
+	case Blink:
+		return 5
+	case Reverse:
+		return 7
+	case Conceal:
+		return 8
+	case Strikethrough:
+		return 9
+	default:
+		return unknownAttribute
+	}
+}
+
+func (s Style) disable() attribute {
+	value := s.enable()
+	if value != unknownAttribute {
+		return value + 20
+	}
+	return value
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/tabwriter.go b/switchq/vendor/github.com/juju/ansiterm/tabwriter.go
new file mode 100644
index 0000000..1ff6faa
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/tabwriter.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+
+	"github.com/juju/ansiterm/tabwriter"
+)
+
+// NewTabWriter returns a writer that is able to set colors and styels.
+// The ansi escape codes are stripped for width calculations.
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
+
+// TabWriter is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// It also setting of colors and styles over and above the standard
+// tabwriter package.
+type TabWriter struct {
+	Writer
+	tw tabwriter.Writer
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (t *TabWriter) Flush() error {
+	return t.tw.Flush()
+}
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (t *TabWriter) SetColumnAlignRight(column int) {
+	t.tw.SetColumnAlignRight(column)
+}
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	writer, colorCapable := colorEnabledWriter(output)
+	t.Writer = Writer{
+		Writer:  t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
+		noColor: !colorCapable,
+	}
+	return t
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/switchq/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
new file mode 100644
index 0000000..98949d0
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is mostly a copy of the go standard library text/tabwriter. With
+// the additional stripping of ansi control characters for width calculations.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+	"bytes"
+	"io"
+	"unicode/utf8"
+
+	"github.com/lunixbochs/vtclean"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+	size  int  // cell size in bytes
+	width int  // cell width in runes
+	htab  bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+	// configuration
+	output   io.Writer
+	minwidth int
+	tabwidth int
+	padding  int
+	padbytes [8]byte
+	flags    uint
+
+	// current state
+	buf       bytes.Buffer // collected text excluding tabs or line breaks
+	pos       int          // buffer position up to which cell.width of incomplete cell has been computed
+	cell      cell         // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+	endChar   byte         // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+	lines     [][]cell     // list of lines; each line is a list of cells
+	widths    []int        // list of column widths in runes - re-used during formatting
+	alignment map[int]uint // column alignment
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+	b.buf.Reset()
+	b.pos = 0
+	b.cell = cell{}
+	b.endChar = 0
+	b.lines = b.lines[0:0]
+	b.widths = b.widths[0:0]
+	b.alignment = make(map[int]uint)
+	b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+//   (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+//   position pos; html tags and entities are excluded from this width if html
+//   filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+//   which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+//   formatting; it is kept in Writer because it's re-used
+//
+//                    |<---------- size ---------->|
+//                    |                            |
+//                    |<- width ->|<- ignored ->|  |
+//                    |           |             |  |
+// [---processed---tab------------<tag>...</tag>...]
+// ^                  ^                         ^
+// |                  |                         |
+// buf                start of incomplete cell  pos
+
+// Formatting can be controlled with these flags.
+const (
+	// Ignore html tags and treat entities (starting with '&'
+	// and ending in ';') as single characters (width = 1).
+	FilterHTML uint = 1 << iota
+
+	// Strip Escape characters bracketing escaped text segments
+	// instead of passing them through unchanged with the text.
+	StripEscape
+
+	// Force right-alignment of cell content.
+	// Default is left-alignment.
+	AlignRight
+
+	// Handle empty columns as if they were not present in
+	// the input in the first place.
+	DiscardEmptyColumns
+
+	// Always use tabs for indentation columns (i.e., padding of
+	// leading empty cells on the left) independent of padchar.
+	TabIndent
+
+	// Print a vertical bar ('|') between columns (after formatting).
+	// Discarded columns appear as zero-width columns ("||").
+	Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	if minwidth < 0 || tabwidth < 0 || padding < 0 {
+		panic("negative minwidth, tabwidth, or padding")
+	}
+	b.output = output
+	b.minwidth = minwidth
+	b.tabwidth = tabwidth
+	b.padding = padding
+	for i := range b.padbytes {
+		b.padbytes[i] = padchar
+	}
+	if padchar == '\t' {
+		// tab padding enforces left-alignment
+		flags &^= AlignRight
+	}
+	b.flags = flags
+
+	b.reset()
+
+	return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+	pos := 0
+	for i, line := range b.lines {
+		print("(", i, ") ")
+		for _, c := range line {
+			print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+			pos += c.size
+		}
+		print("\n")
+	}
+	print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+	err error
+}
+
+func (b *Writer) write0(buf []byte) {
+	n, err := b.output.Write(buf)
+	if n != len(buf) && err == nil {
+		err = io.ErrShortWrite
+	}
+	if err != nil {
+		panic(osError{err})
+	}
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+	for n > len(src) {
+		b.write0(src)
+		n -= len(src)
+	}
+	b.write0(src[0:n])
+}
+
+var (
+	newline = []byte{'\n'}
+	tabs    = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+	if b.padbytes[0] == '\t' || useTabs {
+		// padding is done with tabs
+		if b.tabwidth == 0 {
+			return // tabs have no width - can't do any padding
+		}
+		// make cellw the smallest multiple of b.tabwidth
+		cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+		n := cellw - textw // amount of padding
+		if n < 0 {
+			panic("internal error")
+		}
+		b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+		return
+	}
+
+	// padding is done with non-tab characters
+	b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	for i := line0; i < line1; i++ {
+		line := b.lines[i]
+
+		// if TabIndent is set, use tabs to pad leading empty cells
+		useTabs := b.flags&TabIndent != 0
+
+		for j, c := range line {
+			if j > 0 && b.flags&Debug != 0 {
+				// indicate column break
+				b.write0(vbar)
+			}
+
+			if c.size == 0 {
+				// empty cell
+				if j < len(b.widths) {
+					b.writePadding(c.width, b.widths[j], useTabs)
+				}
+			} else {
+				// non-empty cell
+				useTabs = false
+				alignColumnRight := b.alignment[j] == AlignRight
+				if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+				} else if alignColumnRight && j < len(b.widths) {
+					// just this column
+					internalSize := b.widths[j] - b.padding
+					if j < len(b.widths) {
+						b.writePadding(c.width, internalSize, false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					if b.padding > 0 {
+						b.writePadding(0, b.padding, false)
+					}
+					pos += c.size
+				} else { // align right
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+				}
+			}
+		}
+
+		if i+1 == len(b.lines) {
+			// last buffered line - we don't have a newline, so just write
+			// any outstanding buffered data
+			b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+			pos += b.cell.size
+		} else {
+			// not the last line - write newline
+			b.write0(newline)
+		}
+	}
+	return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	column := len(b.widths)
+	for this := line0; this < line1; this++ {
+		line := b.lines[this]
+
+		if column < len(line)-1 {
+			// cell exists in this column => this line
+			// has more cells than the previous line
+			// (the last cell per line is ignored because cells are
+			// tab-terminated; the last cell per line describes the
+			// text before the newline/formfeed and does not belong
+			// to a column)
+
+			// print unprinted lines until beginning of block
+			pos = b.writeLines(pos, line0, this)
+			line0 = this
+
+			// column block begin
+			width := b.minwidth // minimal column width
+			discardable := true // true if all cells in this column are empty and "soft"
+			for ; this < line1; this++ {
+				line = b.lines[this]
+				if column < len(line)-1 {
+					// cell exists in this column
+					c := line[column]
+					// update width
+					if w := c.width + b.padding; w > width {
+						width = w
+					}
+					// update discardable
+					if c.width > 0 || c.htab {
+						discardable = false
+					}
+				} else {
+					break
+				}
+			}
+			// column block end
+
+			// discard empty columns if necessary
+			if discardable && b.flags&DiscardEmptyColumns != 0 {
+				width = 0
+			}
+
+			// format and print all columns to the right of this column
+			// (we know the widths of this column and all columns to the left)
+			b.widths = append(b.widths, width) // push width
+			pos = b.format(pos, line0, this)
+			b.widths = b.widths[0 : len(b.widths)-1] // pop width
+			line0 = this
+		}
+	}
+
+	// print unprinted lines until end
+	return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+	b.buf.Write(text)
+	b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+	// ---- Changes here -----
+	newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
+	cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
+	b.cell.width += utf8.RuneCount([]byte(cleaned))
+	// --- end of changes ----
+	b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+	switch ch {
+	case Escape:
+		b.endChar = Escape
+	case '<':
+		b.endChar = '>'
+	case '&':
+		b.endChar = ';'
+	}
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+	switch b.endChar {
+	case Escape:
+		b.updateWidth()
+		if b.flags&StripEscape == 0 {
+			b.cell.width -= 2 // don't count the Escape chars
+		}
+	case '>': // tag of zero width
+	case ';':
+		b.cell.width++ // entity, count as one rune
+	}
+	b.pos = b.buf.Len()
+	b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+	b.cell.htab = htab
+	line := &b.lines[len(b.lines)-1]
+	*line = append(*line, b.cell)
+	b.cell = cell{}
+	return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+	if e := recover(); e != nil {
+		if nerr, ok := e.(osError); ok {
+			*err = nerr.err
+			return
+		}
+		panic("tabwriter: panic during " + op)
+	}
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+	defer b.reset() // even in the presence of errors
+	defer handlePanic(&err, "Flush")
+
+	// add current cell if not empty
+	if b.cell.size > 0 {
+		if b.endChar != 0 {
+			// inside escape - terminate it even if incomplete
+			b.endEscape()
+		}
+		b.terminateCell(false)
+	}
+
+	// format contents of buffer
+	b.format(0, 0, len(b.lines))
+
+	return
+}
+
+var hbar = []byte("---\n")
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (b *Writer) SetColumnAlignRight(column int) {
+	b.alignment[column] = AlignRight
+}
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+	defer handlePanic(&err, "Write")
+
+	// split text into cells
+	n = 0
+	for i, ch := range buf {
+		if b.endChar == 0 {
+			// outside escape
+			switch ch {
+			case '\t', '\v', '\n', '\f':
+				// end of cell
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i + 1 // ch consumed
+				ncells := b.terminateCell(ch == '\t')
+				if ch == '\n' || ch == '\f' {
+					// terminate line
+					b.addLine()
+					if ch == '\f' || ncells == 1 {
+						// A '\f' always forces a flush. Otherwise, if the previous
+						// line has only one cell which does not have an impact on
+						// the formatting of the following lines (the last cell per
+						// line is ignored by format()), thus we can flush the
+						// Writer contents.
+						if err = b.Flush(); err != nil {
+							return
+						}
+						if ch == '\f' && b.flags&Debug != 0 {
+							// indicate section break
+							b.write0(hbar)
+						}
+					}
+				}
+
+			case Escape:
+				// start of escaped sequence
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i
+				if b.flags&StripEscape != 0 {
+					n++ // strip Escape
+				}
+				b.startEscape(Escape)
+
+			case '<', '&':
+				// possibly an html tag/entity
+				if b.flags&FilterHTML != 0 {
+					// begin of tag/entity
+					b.append(buf[n:i])
+					b.updateWidth()
+					n = i
+					b.startEscape(ch)
+				}
+			}
+
+		} else {
+			// inside escape
+			if ch == b.endChar {
+				// end of tag/entity
+				j := i + 1
+				if ch == Escape && b.flags&StripEscape != 0 {
+					j = i // strip Escape
+				}
+				b.append(buf[n:j])
+				n = i + 1 // ch consumed
+				b.endEscape()
+			}
+		}
+	}
+
+	// append leftover text
+	b.append(buf[n:])
+	n = len(buf)
+	return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/terminal.go b/switchq/vendor/github.com/juju/ansiterm/terminal.go
new file mode 100644
index 0000000..96fd11c
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/terminal.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+	"os"
+
+	"github.com/mattn/go-colorable"
+	"github.com/mattn/go-isatty"
+)
+
+// colorEnabledWriter returns a writer that can handle the ansi color codes
+// and true if the writer passed in is a terminal capable of color. If the
+// TERM environment variable is set to "dumb", the terminal is not considered
+// color capable.
+func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
+	f, ok := w.(*os.File)
+	if !ok {
+		return w, false
+	}
+	// Check the TERM environment variable specifically
+	// to check for "dumb" terminals.
+	if os.Getenv("TERM") == "dumb" {
+		return w, false
+	}
+	if !isatty.IsTerminal(f.Fd()) {
+		return w, false
+	}
+	return colorable.NewColorable(f), true
+}
diff --git a/switchq/vendor/github.com/juju/ansiterm/writer.go b/switchq/vendor/github.com/juju/ansiterm/writer.go
new file mode 100644
index 0000000..32437bb
--- /dev/null
+++ b/switchq/vendor/github.com/juju/ansiterm/writer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Writer allows colors and styles to be specified. If the io.Writer
+// is not a terminal capable of color, all attempts to set colors or
+// styles are no-ops.
+type Writer struct {
+	io.Writer
+
+	noColor bool
+}
+
+// NewWriter returns a Writer that allows the caller to specify colors and
+// styles. If the io.Writer is not a terminal capable of color, all attempts
+// to set colors or styles are no-ops.
+func NewWriter(w io.Writer) *Writer {
+	writer, colorCapable := colorEnabledWriter(w)
+	return &Writer{
+		Writer:  writer,
+		noColor: !colorCapable,
+	}
+}
+
+// SetColorCapable forces the writer to either write the ANSI escape color
+// if capable is true, or to not write them if capable is false.
+func (w *Writer) SetColorCapable(capable bool) {
+	w.noColor = !capable
+}
+
+// SetForeground sets the foreground color.
+func (w *Writer) SetForeground(c Color) {
+	w.writeSGR(c.foreground())
+}
+
+// SetBackground sets the background color.
+func (w *Writer) SetBackground(c Color) {
+	w.writeSGR(c.background())
+}
+
+// SetStyle sets the text style.
+func (w *Writer) SetStyle(s Style) {
+	w.writeSGR(s.enable())
+}
+
+// ClearStyle clears the text style.
+func (w *Writer) ClearStyle(s Style) {
+	w.writeSGR(s.disable())
+}
+
+// Reset returns the default foreground and background colors with no styles.
+func (w *Writer) Reset() {
+	w.writeSGR(reset)
+}
+
+type sgr interface {
+	// sgr returns the combined escape sequence for the Select Graphic Rendition.
+	sgr() string
+}
+
+// writeSGR takes the appropriate integer SGR parameters
+// and writes out the ANIS escape code.
+func (w *Writer) writeSGR(value sgr) {
+	if w.noColor {
+		return
+	}
+	fmt.Fprint(w, value.sgr())
+}
diff --git a/switchq/vendor/github.com/juju/errors/LICENSE b/switchq/vendor/github.com/juju/errors/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/errors/Makefile b/switchq/vendor/github.com/juju/errors/Makefile
new file mode 100644
index 0000000..ab7c2e6
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test && go test -compiler gccgo
+
+docs:
+	godoc2md github.com/juju/errors > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/switchq/vendor/github.com/juju/errors/README.md b/switchq/vendor/github.com/juju/errors/README.md
new file mode 100644
index 0000000..ee24891
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/README.md
@@ -0,0 +1,536 @@
+
+# errors
+    import "github.com/juju/errors"
+
+[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+
+	    if err := SomeFunc(); err != nil {
+		    return err
+		}
+
+This instead becomes:
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Trace(err)
+		}
+
+which just records the file and line number of the Trace call, or
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Annotate(err, "more context")
+		}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+
+	    if err := FindField(field); err != nil {
+		    return errors.Wrap(err, errors.NotFoundf(field))
+		}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+
+
+
+
+
+## func AlreadyExistsf
+``` go
+func AlreadyExistsf(format string, args ...interface{}) error
+```
+AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+
+
+## func Annotate
+``` go
+func Annotate(other error, message string) error
+```
+Annotate is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "failed to frombulate")
+	}
+
+
+## func Annotatef
+``` go
+func Annotatef(other error, format string, args ...interface{}) error
+```
+Annotatef is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotatef(err, "failed to frombulate the %s", arg)
+	}
+
+
+## func Cause
+``` go
+func Cause(err error) error
+```
+Cause returns the cause of the given error.  This will be either the
+original error, or the result of a Wrap or Mask call.
+
+Cause is the usual way to diagnose errors that may have been wrapped by
+the other errors functions.
+
+
+## func DeferredAnnotatef
+``` go
+func DeferredAnnotatef(err *error, format string, args ...interface{})
+```
+DeferredAnnotatef annotates the given error (when it is not nil) with the given
+format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+does nothing. This method is used in a defer statement in order to annotate any
+resulting error with the same message.
+
+For example:
+
+
+	defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+
+
+## func Details
+``` go
+func Details(err error) string
+```
+Details returns information about the stack of errors wrapped by err, in
+the format:
+
+
+	[{filename:99: error one} {otherfile:55: cause of error one}]
+
+This is a terse alternative to ErrorStack as it returns a single line.
+
+
+## func ErrorStack
+``` go
+func ErrorStack(err error) string
+```
+ErrorStack returns a string representation of the annotated error. If the
+error passed as the parameter is not an annotated error, the result is
+simply the result of the Error() method on that error.
+
+If the error is an annotated error, a multi-line string is returned where
+each line represents one entry in the annotation stack. The full filename
+from the call stack is used in the output.
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+
+## func Errorf
+``` go
+func Errorf(format string, args ...interface{}) error
+```
+Errorf creates a new annotated error and records the location that the
+error is created.  This should be a drop in replacement for fmt.Errorf.
+
+For example:
+
+
+	return errors.Errorf("validation failed: %s", message)
+
+
+## func IsAlreadyExists
+``` go
+func IsAlreadyExists(err error) bool
+```
+IsAlreadyExists reports whether the error was created with
+AlreadyExistsf() or NewAlreadyExists().
+
+
+## func IsNotFound
+``` go
+func IsNotFound(err error) bool
+```
+IsNotFound reports whether err was created with NotFoundf() or
+NewNotFound().
+
+
+## func IsNotImplemented
+``` go
+func IsNotImplemented(err error) bool
+```
+IsNotImplemented reports whether err was created with
+NotImplementedf() or NewNotImplemented().
+
+
+## func IsNotSupported
+``` go
+func IsNotSupported(err error) bool
+```
+IsNotSupported reports whether the error was created with
+NotSupportedf() or NewNotSupported().
+
+
+## func IsNotValid
+``` go
+func IsNotValid(err error) bool
+```
+IsNotValid reports whether the error was created with NotValidf() or
+NewNotValid().
+
+
+## func IsUnauthorized
+``` go
+func IsUnauthorized(err error) bool
+```
+IsUnauthorized reports whether err was created with Unauthorizedf() or
+NewUnauthorized().
+
+
+## func Mask
+``` go
+func Mask(other error) error
+```
+Mask hides the underlying error type, and records the location of the masking.
+
+
+## func Maskf
+``` go
+func Maskf(other error, format string, args ...interface{}) error
+```
+Mask masks the given error with the given format string and arguments (like
+fmt.Sprintf), returning a new error that maintains the error stack, but
+hides the underlying error type.  The error string still contains the full
+annotations. If you want to hide the annotations, call Wrap.
+
+
+## func New
+``` go
+func New(message string) error
+```
+New is a drop in replacement for the standard libary errors module that records
+the location that the error is created.
+
+For example:
+
+
+	return errors.New("validation failed")
+
+
+## func NewAlreadyExists
+``` go
+func NewAlreadyExists(err error, msg string) error
+```
+NewAlreadyExists returns an error which wraps err and satisfies
+IsAlreadyExists().
+
+
+## func NewNotFound
+``` go
+func NewNotFound(err error, msg string) error
+```
+NewNotFound returns an error which wraps err that satisfies
+IsNotFound().
+
+
+## func NewNotImplemented
+``` go
+func NewNotImplemented(err error, msg string) error
+```
+NewNotImplemented returns an error which wraps err and satisfies
+IsNotImplemented().
+
+
+## func NewNotSupported
+``` go
+func NewNotSupported(err error, msg string) error
+```
+NewNotSupported returns an error which wraps err and satisfies
+IsNotSupported().
+
+
+## func NewNotValid
+``` go
+func NewNotValid(err error, msg string) error
+```
+NewNotValid returns an error which wraps err and satisfies IsNotValid().
+
+
+## func NewUnauthorized
+``` go
+func NewUnauthorized(err error, msg string) error
+```
+NewUnauthorized returns an error which wraps err and satisfies
+IsUnauthorized().
+
+
+## func NotFoundf
+``` go
+func NotFoundf(format string, args ...interface{}) error
+```
+NotFoundf returns an error which satisfies IsNotFound().
+
+
+## func NotImplementedf
+``` go
+func NotImplementedf(format string, args ...interface{}) error
+```
+NotImplementedf returns an error which satisfies IsNotImplemented().
+
+
+## func NotSupportedf
+``` go
+func NotSupportedf(format string, args ...interface{}) error
+```
+NotSupportedf returns an error which satisfies IsNotSupported().
+
+
+## func NotValidf
+``` go
+func NotValidf(format string, args ...interface{}) error
+```
+NotValidf returns an error which satisfies IsNotValid().
+
+
+## func Trace
+``` go
+func Trace(other error) error
+```
+Trace adds the location of the Trace call to the stack.  The Cause of the
+resulting error is the same as the error parameter.  If the other error is
+nil, the result will be nil.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+
+## func Unauthorizedf
+``` go
+func Unauthorizedf(format string, args ...interface{}) error
+```
+Unauthorizedf returns an error which satisfies IsUnauthorized().
+
+
+## func Wrap
+``` go
+func Wrap(other, newDescriptive error) error
+```
+Wrap changes the Cause of the error. The location of the Wrap call is also
+stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    newErr := &packageError{"more context", private_value}
+	    return errors.Wrap(err, newErr)
+	}
+
+
+## func Wrapf
+``` go
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
+```
+Wrapf changes the Cause of the error, and adds an annotation. The location
+of the Wrap call is also stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+	}
+
+
+
+## type Err
+``` go
+type Err struct {
+    // contains filtered or unexported fields
+}
+```
+Err holds a description of an error along with information about
+where the error was created.
+
+It may be embedded in custom error types to add extra information that
+this errors package can understand.
+
+
+
+
+
+
+
+
+
+### func NewErr
+``` go
+func NewErr(format string, args ...interface{}) Err
+```
+NewErr is used to return an Err for the purpose of embedding in other
+structures.  The location is not specified, and needs to be set with a call
+to SetLocation.
+
+For example:
+
+
+	type FooError struct {
+	    errors.Err
+	    code int
+	}
+	
+	func NewFooError(code int) error {
+	    err := &FooError{errors.NewErr("foo"), code}
+	    err.SetLocation(1)
+	    return err
+	}
+
+
+
+
+### func (\*Err) Cause
+``` go
+func (e *Err) Cause() error
+```
+The Cause of an error is the most recent error in the error stack that
+meets one of these criteria: the original error that was raised; the new
+error that was passed into the Wrap function; the most recently masked
+error; or nil if the error itself is considered the Cause.  Normally this
+method is not invoked directly, but instead through the Cause stand alone
+function.
+
+
+
+### func (\*Err) Error
+``` go
+func (e *Err) Error() string
+```
+Error implements error.Error.
+
+
+
+### func (\*Err) Location
+``` go
+func (e *Err) Location() (filename string, line int)
+```
+Location is the file and line of where the error was most recently
+created or annotated.
+
+
+
+### func (\*Err) Message
+``` go
+func (e *Err) Message() string
+```
+Message returns the message stored with the most recent location. This is
+the empty string if the most recent call was Trace, or the message stored
+with Annotate or Mask.
+
+
+
+### func (\*Err) SetLocation
+``` go
+func (e *Err) SetLocation(callDepth int)
+```
+SetLocation records the source location of the error at callDepth stack
+frames above the call.
+
+
+
+### func (\*Err) StackTrace
+``` go
+func (e *Err) StackTrace() []string
+```
+StackTrace returns one string for each location recorded in the stack of
+errors. The first value is the originating error, with a line for each
+other annotation or tracing of the error.
+
+
+
+### func (\*Err) Underlying
+``` go
+func (e *Err) Underlying() error
+```
+Underlying returns the previous error in the error stack, if any. A client
+should not ever really call this method.  It is used to build the error
+stack and should not be introspected by client calls.  Or more
+specifically, clients should not depend on anything but the `Cause` of an
+error.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/switchq/vendor/github.com/juju/errors/doc.go b/switchq/vendor/github.com/juju/errors/doc.go
new file mode 100644
index 0000000..35b119a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/doc.go
@@ -0,0 +1,81 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+    if err := SomeFunc(); err != nil {
+	    return err
+	}
+
+This instead becomes:
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+which just records the file and line number of the Trace call, or
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "more context")
+	}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+    if err := FindField(field); err != nil {
+	    return errors.Wrap(err, errors.NotFoundf(field))
+	}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+*/
+package errors
diff --git a/switchq/vendor/github.com/juju/errors/error.go b/switchq/vendor/github.com/juju/errors/error.go
new file mode 100644
index 0000000..8c51c45
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/error.go
@@ -0,0 +1,145 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+)
+
+// Err holds a description of an error along with information about
+// where the error was created.
+//
+// It may be embedded in custom error types to add extra information that
+// this errors package can understand.
+type Err struct {
+	// message holds an annotation of the error.
+	message string
+
+	// cause holds the cause of the error as returned
+	// by the Cause method.
+	cause error
+
+	// previous holds the previous error in the error stack, if any.
+	previous error
+
+	// file and line hold the source code location where the error was
+	// created.
+	file string
+	line int
+}
+
+// NewErr is used to return an Err for the purpose of embedding in other
+// structures.  The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func NewFooError(code int) error {
+//         err := &FooError{errors.NewErr("foo"), code}
+//         err.SetLocation(1)
+//         return err
+//     }
+func NewErr(format string, args ...interface{}) Err {
+	return Err{
+		message: fmt.Sprintf(format, args...),
+	}
+}
+
+// NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other
+// structures. The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func (e *FooError) Annotate(format string, args ...interface{}) error {
+//         err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
+//         err.SetLocation(1)
+//         return err
+//     })
+func NewErrWithCause(other error, format string, args ...interface{}) Err {
+	return Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(other),
+		previous: other,
+	}
+}
+
+// Location is the file and line of where the error was most recently
+// created or annotated.
+func (e *Err) Location() (filename string, line int) {
+	return e.file, e.line
+}
+
+// Underlying returns the previous error in the error stack, if any. A client
+// should not ever really call this method.  It is used to build the error
+// stack and should not be introspected by client calls.  Or more
+// specifically, clients should not depend on anything but the `Cause` of an
+// error.
+func (e *Err) Underlying() error {
+	return e.previous
+}
+
+// The Cause of an error is the most recent error in the error stack that
+// meets one of these criteria: the original error that was raised; the new
+// error that was passed into the Wrap function; the most recently masked
+// error; or nil if the error itself is considered the Cause.  Normally this
+// method is not invoked directly, but instead through the Cause stand alone
+// function.
+func (e *Err) Cause() error {
+	return e.cause
+}
+
+// Message returns the message stored with the most recent location. This is
+// the empty string if the most recent call was Trace, or the message stored
+// with Annotate or Mask.
+func (e *Err) Message() string {
+	return e.message
+}
+
+// Error implements error.Error.
+func (e *Err) Error() string {
+	// We want to walk up the stack of errors showing the annotations
+	// as long as the cause is the same.
+	err := e.previous
+	if !sameError(Cause(err), e.cause) && e.cause != nil {
+		err = e.cause
+	}
+	switch {
+	case err == nil:
+		return e.message
+	case e.message == "":
+		return err.Error()
+	}
+	return fmt.Sprintf("%s: %v", e.message, err)
+}
+
+// SetLocation records the source location of the error at callDepth stack
+// frames above the call.
+func (e *Err) SetLocation(callDepth int) {
+	_, file, line, _ := runtime.Caller(callDepth + 1)
+	e.file = trimGoPath(file)
+	e.line = line
+}
+
+// StackTrace returns one string for each location recorded in the stack of
+// errors. The first value is the originating error, with a line for each
+// other annotation or tracing of the error.
+func (e *Err) StackTrace() []string {
+	return errorStack(e)
+}
+
+// Ideally we'd have a way to check identity, but deep equals will do.
+func sameError(e1, e2 error) bool {
+	return reflect.DeepEqual(e1, e2)
+}
diff --git a/switchq/vendor/github.com/juju/errors/errortypes.go b/switchq/vendor/github.com/juju/errors/errortypes.go
new file mode 100644
index 0000000..10b3b19
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/errortypes.go
@@ -0,0 +1,284 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+)
+
+// wrap is a helper to construct an *wrapper.
+func wrap(err error, format, suffix string, args ...interface{}) Err {
+	newErr := Err{
+		message:  fmt.Sprintf(format+suffix, args...),
+		previous: err,
+	}
+	newErr.SetLocation(2)
+	return newErr
+}
+
+// notFound represents an error when something has not been found.
+type notFound struct {
+	Err
+}
+
+// NotFoundf returns an error which satisfies IsNotFound().
+func NotFoundf(format string, args ...interface{}) error {
+	return &notFound{wrap(nil, format, " not found", args...)}
+}
+
+// NewNotFound returns an error which wraps err that satisfies
+// IsNotFound().
+func NewNotFound(err error, msg string) error {
+	return &notFound{wrap(err, msg, "")}
+}
+
+// IsNotFound reports whether err was created with NotFoundf() or
+// NewNotFound().
+func IsNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notFound)
+	return ok
+}
+
+// userNotFound represents an error when an inexistent user is looked up.
+type userNotFound struct {
+	Err
+}
+
+// UserNotFoundf returns an error which satisfies IsUserNotFound().
+func UserNotFoundf(format string, args ...interface{}) error {
+	return &userNotFound{wrap(nil, format, " user not found", args...)}
+}
+
+// NewUserNotFound returns an error which wraps err and satisfies
+// IsUserNotFound().
+func NewUserNotFound(err error, msg string) error {
+	return &userNotFound{wrap(err, msg, "")}
+}
+
+// IsUserNotFound reports whether err was created with UserNotFoundf() or
+// NewUserNotFound().
+func IsUserNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*userNotFound)
+	return ok
+}
+
+// unauthorized represents an error when an operation is unauthorized.
+type unauthorized struct {
+	Err
+}
+
+// Unauthorizedf returns an error which satisfies IsUnauthorized().
+func Unauthorizedf(format string, args ...interface{}) error {
+	return &unauthorized{wrap(nil, format, "", args...)}
+}
+
+// NewUnauthorized returns an error which wraps err and satisfies
+// IsUnauthorized().
+func NewUnauthorized(err error, msg string) error {
+	return &unauthorized{wrap(err, msg, "")}
+}
+
+// IsUnauthorized reports whether err was created with Unauthorizedf() or
+// NewUnauthorized().
+func IsUnauthorized(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*unauthorized)
+	return ok
+}
+
+// notImplemented represents an error when something is not
+// implemented.
+type notImplemented struct {
+	Err
+}
+
+// NotImplementedf returns an error which satisfies IsNotImplemented().
+func NotImplementedf(format string, args ...interface{}) error {
+	return &notImplemented{wrap(nil, format, " not implemented", args...)}
+}
+
+// NewNotImplemented returns an error which wraps err and satisfies
+// IsNotImplemented().
+func NewNotImplemented(err error, msg string) error {
+	return &notImplemented{wrap(err, msg, "")}
+}
+
+// IsNotImplemented reports whether err was created with
+// NotImplementedf() or NewNotImplemented().
+func IsNotImplemented(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notImplemented)
+	return ok
+}
+
+// alreadyExists represents and error when something already exists.
+type alreadyExists struct {
+	Err
+}
+
+// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+func AlreadyExistsf(format string, args ...interface{}) error {
+	return &alreadyExists{wrap(nil, format, " already exists", args...)}
+}
+
+// NewAlreadyExists returns an error which wraps err and satisfies
+// IsAlreadyExists().
+func NewAlreadyExists(err error, msg string) error {
+	return &alreadyExists{wrap(err, msg, "")}
+}
+
+// IsAlreadyExists reports whether the error was created with
+// AlreadyExistsf() or NewAlreadyExists().
+func IsAlreadyExists(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*alreadyExists)
+	return ok
+}
+
+// notSupported represents an error when something is not supported.
+type notSupported struct {
+	Err
+}
+
+// NotSupportedf returns an error which satisfies IsNotSupported().
+func NotSupportedf(format string, args ...interface{}) error {
+	return &notSupported{wrap(nil, format, " not supported", args...)}
+}
+
+// NewNotSupported returns an error which wraps err and satisfies
+// IsNotSupported().
+func NewNotSupported(err error, msg string) error {
+	return &notSupported{wrap(err, msg, "")}
+}
+
+// IsNotSupported reports whether the error was created with
+// NotSupportedf() or NewNotSupported().
+func IsNotSupported(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notSupported)
+	return ok
+}
+
+// notValid represents an error when something is not valid.
+type notValid struct {
+	Err
+}
+
+// NotValidf returns an error which satisfies IsNotValid().
+func NotValidf(format string, args ...interface{}) error {
+	return &notValid{wrap(nil, format, " not valid", args...)}
+}
+
+// NewNotValid returns an error which wraps err and satisfies IsNotValid().
+func NewNotValid(err error, msg string) error {
+	return &notValid{wrap(err, msg, "")}
+}
+
+// IsNotValid reports whether the error was created with NotValidf() or
+// NewNotValid().
+func IsNotValid(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notValid)
+	return ok
+}
+
+// notProvisioned represents an error when something is not yet provisioned.
+type notProvisioned struct {
+	Err
+}
+
+// NotProvisionedf returns an error which satisfies IsNotProvisioned().
+func NotProvisionedf(format string, args ...interface{}) error {
+	return &notProvisioned{wrap(nil, format, " not provisioned", args...)}
+}
+
+// NewNotProvisioned returns an error which wraps err that satisfies
+// IsNotProvisioned().
+func NewNotProvisioned(err error, msg string) error {
+	return &notProvisioned{wrap(err, msg, "")}
+}
+
+// IsNotProvisioned reports whether err was created with NotProvisionedf() or
+// NewNotProvisioned().
+func IsNotProvisioned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notProvisioned)
+	return ok
+}
+
+// notAssigned represents an error when something is not yet assigned to
+// something else.
+type notAssigned struct {
+	Err
+}
+
+// NotAssignedf returns an error which satisfies IsNotAssigned().
+func NotAssignedf(format string, args ...interface{}) error {
+	return &notAssigned{wrap(nil, format, " not assigned", args...)}
+}
+
+// NewNotAssigned returns an error which wraps err that satisfies
+// IsNotAssigned().
+func NewNotAssigned(err error, msg string) error {
+	return &notAssigned{wrap(err, msg, "")}
+}
+
+// IsNotAssigned reports whether err was created with NotAssignedf() or
+// NewNotAssigned().
+func IsNotAssigned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notAssigned)
+	return ok
+}
+
+// badRequest represents an error when a request has bad parameters.
+type badRequest struct {
+	Err
+}
+
+// BadRequestf returns an error which satisfies IsBadRequest().
+func BadRequestf(format string, args ...interface{}) error {
+	return &badRequest{wrap(nil, format, "", args...)}
+}
+
+// NewBadRequest returns an error which wraps err that satisfies
+// IsBadRequest().
+func NewBadRequest(err error, msg string) error {
+	return &badRequest{wrap(err, msg, "")}
+}
+
+// IsBadRequest reports whether err was created with BadRequestf() or
+// NewBadRequest().
+func IsBadRequest(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*badRequest)
+	return ok
+}
+
+// methodNotAllowed represents an error when an HTTP request
+// is made with an inappropriate method.
+type methodNotAllowed struct {
+	Err
+}
+
+// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
+func MethodNotAllowedf(format string, args ...interface{}) error {
+	return &methodNotAllowed{wrap(nil, format, "", args...)}
+}
+
+// NewMethodNotAllowed returns an error which wraps err that satisfies
+// IsMethodNotAllowed().
+func NewMethodNotAllowed(err error, msg string) error {
+	return &methodNotAllowed{wrap(err, msg, "")}
+}
+
+// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
+// NewMethodNotAllowed().
+func IsMethodNotAllowed(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*methodNotAllowed)
+	return ok
+}
diff --git a/switchq/vendor/github.com/juju/errors/functions.go b/switchq/vendor/github.com/juju/errors/functions.go
new file mode 100644
index 0000000..994208d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/functions.go
@@ -0,0 +1,330 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"strings"
+)
+
+// New is a drop in replacement for the standard libary errors module that records
+// the location that the error is created.
+//
+// For example:
+//    return errors.New("validation failed")
+//
+func New(message string) error {
+	err := &Err{message: message}
+	err.SetLocation(1)
+	return err
+}
+
+// Errorf creates a new annotated error and records the location that the
+// error is created.  This should be a drop in replacement for fmt.Errorf.
+//
+// For example:
+//    return errors.Errorf("validation failed: %s", message)
+//
+func Errorf(format string, args ...interface{}) error {
+	err := &Err{message: fmt.Sprintf(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// Trace adds the location of the Trace call to the stack.  The Cause of the
+// resulting error is the same as the error parameter.  If the other error is
+// nil, the result will be nil.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Trace(err)
+//   }
+//
+func Trace(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{previous: other, cause: Cause(other)}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotate is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotate(err, "failed to frombulate")
+//   }
+//
+func Annotate(other error, message string) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  message,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotatef is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotatef(err, "failed to frombulate the %s", arg)
+//   }
+//
+func Annotatef(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  fmt.Sprintf(format, args...),
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// DeferredAnnotatef annotates the given error (when it is not nil) with the given
+// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+// does nothing. This method is used in a defer statement in order to annotate any
+// resulting error with the same message.
+//
+// For example:
+//
+//    defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+//
+func DeferredAnnotatef(err *error, format string, args ...interface{}) {
+	if *err == nil {
+		return
+	}
+	newErr := &Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(*err),
+		previous: *err,
+	}
+	newErr.SetLocation(1)
+	*err = newErr
+}
+
+// Wrap changes the Cause of the error. The location of the Wrap call is also
+// stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       newErr := &packageError{"more context", private_value}
+//       return errors.Wrap(err, newErr)
+//   }
+//
+func Wrap(other, newDescriptive error) error {
+	err := &Err{
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Wrapf changes the Cause of the error, and adds an annotation. The location
+// of the Wrap call is also stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+//   }
+//
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask masks the given error with the given format string and arguments (like
+// fmt.Sprintf), returning a new error that maintains the error stack, but
+// hides the underlying error type.  The error string still contains the full
+// annotations. If you want to hide the annotations, call Wrap.
+func Maskf(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask hides the underlying error type, and records the location of the masking.
+func Mask(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Cause returns the cause of the given error.  This will be either the
+// original error, or the result of a Wrap or Mask call.
+//
+// Cause is the usual way to diagnose errors that may have been wrapped by
+// the other errors functions.
+func Cause(err error) error {
+	var diag error
+	if err, ok := err.(causer); ok {
+		diag = err.Cause()
+	}
+	if diag != nil {
+		return diag
+	}
+	return err
+}
+
+type causer interface {
+	Cause() error
+}
+
+type wrapper interface {
+	// Message returns the top level error message,
+	// not including the message from the Previous
+	// error.
+	Message() string
+
+	// Underlying returns the Previous error, or nil
+	// if there is none.
+	Underlying() error
+}
+
+type locationer interface {
+	Location() (string, int)
+}
+
+var (
+	_ wrapper    = (*Err)(nil)
+	_ locationer = (*Err)(nil)
+	_ causer     = (*Err)(nil)
+)
+
+// Details returns information about the stack of errors wrapped by err, in
+// the format:
+//
+// 	[{filename:99: error one} {otherfile:55: cause of error one}]
+//
+// This is a terse alternative to ErrorStack as it returns a single line.
+func Details(err error) string {
+	if err == nil {
+		return "[]"
+	}
+	var s []byte
+	s = append(s, '[')
+	for {
+		s = append(s, '{')
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			if file != "" {
+				s = append(s, fmt.Sprintf("%s:%d", file, line)...)
+				s = append(s, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			s = append(s, cerr.Message()...)
+			err = cerr.Underlying()
+		} else {
+			s = append(s, err.Error()...)
+			err = nil
+		}
+		s = append(s, '}')
+		if err == nil {
+			break
+		}
+		s = append(s, ' ')
+	}
+	s = append(s, ']')
+	return string(s)
+}
+
+// ErrorStack returns a string representation of the annotated error. If the
+// error passed as the parameter is not an annotated error, the result is
+// simply the result of the Error() method on that error.
+//
+// If the error is an annotated error, a multi-line string is returned where
+// each line represents one entry in the annotation stack. The full filename
+// from the call stack is used in the output.
+//
+//     first error
+//     github.com/juju/errors/annotation_test.go:193:
+//     github.com/juju/errors/annotation_test.go:194: annotation
+//     github.com/juju/errors/annotation_test.go:195:
+//     github.com/juju/errors/annotation_test.go:196: more context
+//     github.com/juju/errors/annotation_test.go:197:
+func ErrorStack(err error) string {
+	return strings.Join(errorStack(err), "\n")
+}
+
+func errorStack(err error) []string {
+	if err == nil {
+		return nil
+	}
+
+	// We want the first error first
+	var lines []string
+	for {
+		var buff []byte
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			// Strip off the leading GOPATH/src path elements.
+			file = trimGoPath(file)
+			if file != "" {
+				buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
+				buff = append(buff, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			message := cerr.Message()
+			buff = append(buff, message...)
+			// If there is a cause for this error, and it is different to the cause
+			// of the underlying error, then output the error string in the stack trace.
+			var cause error
+			if err1, ok := err.(causer); ok {
+				cause = err1.Cause()
+			}
+			err = cerr.Underlying()
+			if cause != nil && !sameError(Cause(err), cause) {
+				if message != "" {
+					buff = append(buff, ": "...)
+				}
+				buff = append(buff, cause.Error()...)
+			}
+		} else {
+			buff = append(buff, err.Error()...)
+			err = nil
+		}
+		lines = append(lines, string(buff))
+		if err == nil {
+			break
+		}
+	}
+	// reverse the lines to get the original error, which was at the end of
+	// the list, back to the start.
+	var result []string
+	for i := len(lines); i > 0; i-- {
+		result = append(result, lines[i-1])
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/errors/path.go b/switchq/vendor/github.com/juju/errors/path.go
new file mode 100644
index 0000000..a7b726a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/errors/path.go
@@ -0,0 +1,38 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"runtime"
+	"strings"
+)
+
+// prefixSize is used internally to trim the user specific path from the
+// front of the returned filenames from the runtime call stack.
+var prefixSize int
+
+// goPath is the deduced path based on the location of this file as compiled.
+var goPath string
+
+func init() {
+	_, file, _, ok := runtime.Caller(0)
+	if file == "?" {
+		return
+	}
+	if ok {
+		// We know that the end of the file should be:
+		// github.com/juju/errors/path.go
+		size := len(file)
+		suffix := len("github.com/juju/errors/path.go")
+		goPath = file[:size-suffix]
+		prefixSize = len(goPath)
+	}
+}
+
+func trimGoPath(filename string) string {
+	if strings.HasPrefix(filename, goPath) {
+		return filename[prefixSize:]
+	}
+	return filename
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/LICENSE b/switchq/vendor/github.com/juju/gomaasapi/LICENSE
new file mode 100644
index 0000000..d5836af
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2012-2016 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/gomaasapi/Makefile b/switchq/vendor/github.com/juju/gomaasapi/Makefile
new file mode 100644
index 0000000..ea6cbb5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/Makefile
@@ -0,0 +1,26 @@
+# Build, and run tests.
+check: examples
+	go test ./...
+
+example_source := $(wildcard example/*.go)
+example_binaries := $(patsubst %.go,%,$(example_source))
+
+# Clean up binaries.
+clean:
+	$(RM) $(example_binaries)
+
+# Reformat the source files to match our layout standards.
+format:
+	gofmt -w .
+
+# Invoke gofmt's "simplify" option to streamline the source code.
+simplify:
+	gofmt -w -s .
+
+# Build the examples (we have no tests for them).
+examples: $(example_binaries)
+
+%: %.go
+	go build -o $@ $<
+
+.PHONY: check clean format examples simplify
diff --git a/switchq/vendor/github.com/juju/gomaasapi/README.rst b/switchq/vendor/github.com/juju/gomaasapi/README.rst
new file mode 100644
index 0000000..c153cd3
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/README.rst
@@ -0,0 +1,12 @@
+.. -*- mode: rst -*-
+
+******************************
+MAAS API client library for Go
+******************************
+
+This library serves as a minimal client for communicating with the MAAS web
+API in Go programs.
+
+For more information see the `project homepage`_.
+
+.. _project homepage: https://github.com/juju/gomaasapi
diff --git a/switchq/vendor/github.com/juju/gomaasapi/blockdevice.go b/switchq/vendor/github.com/juju/gomaasapi/blockdevice.go
new file mode 100644
index 0000000..ad04f9d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/blockdevice.go
@@ -0,0 +1,176 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type blockdevice struct {
+	resourceURI string
+
+	id      int
+	name    string
+	model   string
+	path    string
+	usedFor string
+	tags    []string
+
+	blockSize uint64
+	usedSize  uint64
+	size      uint64
+
+	partitions []*partition
+}
+
+// ID implements BlockDevice.
+func (b *blockdevice) ID() int {
+	return b.id
+}
+
+// Name implements BlockDevice.
+func (b *blockdevice) Name() string {
+	return b.name
+}
+
+// Model implements BlockDevice.
+func (b *blockdevice) Model() string {
+	return b.model
+}
+
+// Path implements BlockDevice.
+func (b *blockdevice) Path() string {
+	return b.path
+}
+
+// UsedFor implements BlockDevice.
+func (b *blockdevice) UsedFor() string {
+	return b.usedFor
+}
+
+// Tags implements BlockDevice.
+func (b *blockdevice) Tags() []string {
+	return b.tags
+}
+
+// BlockSize implements BlockDevice.
+func (b *blockdevice) BlockSize() uint64 {
+	return b.blockSize
+}
+
+// UsedSize implements BlockDevice.
+func (b *blockdevice) UsedSize() uint64 {
+	return b.usedSize
+}
+
+// Size implements BlockDevice.
+func (b *blockdevice) Size() uint64 {
+	return b.size
+}
+
+// Partitions implements BlockDevice.
+func (b *blockdevice) Partitions() []Partition {
+	result := make([]Partition, len(b.partitions))
+	for i, v := range b.partitions {
+		result[i] = v
+	}
+	return result
+}
+
+func readBlockDevices(controllerVersion version.Number, source interface{}) ([]*blockdevice, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range blockdeviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no blockdevice read func for version %s", controllerVersion)
+	}
+	readFunc := blockdeviceDeserializationFuncs[deserialisationVersion]
+	return readBlockDeviceList(valid, readFunc)
+}
+
+// readBlockDeviceList expects the values of the sourceList to be string maps.
+func readBlockDeviceList(sourceList []interface{}, readFunc blockdeviceDeserializationFunc) ([]*blockdevice, error) {
+	result := make([]*blockdevice, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for blockdevice %d, %T", i, value)
+		}
+		blockdevice, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "blockdevice %d", i)
+		}
+		result = append(result, blockdevice)
+	}
+	return result, nil
+}
+
+type blockdeviceDeserializationFunc func(map[string]interface{}) (*blockdevice, error)
+
+var blockdeviceDeserializationFuncs = map[version.Number]blockdeviceDeserializationFunc{
+	twoDotOh: blockdevice_2_0,
+}
+
+func blockdevice_2_0(source map[string]interface{}) (*blockdevice, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":       schema.ForceInt(),
+		"name":     schema.String(),
+		"model":    schema.OneOf(schema.Nil(""), schema.String()),
+		"path":     schema.String(),
+		"used_for": schema.String(),
+		"tags":     schema.List(schema.String()),
+
+		"block_size": schema.ForceUint(),
+		"used_size":  schema.ForceUint(),
+		"size":       schema.ForceUint(),
+
+		"partitions": schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	partitions, err := readPartitionList(valid["partitions"].([]interface{}), partition_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	model, _ := valid["model"].(string)
+	result := &blockdevice{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		model:   model,
+		path:    valid["path"].(string),
+		usedFor: valid["used_for"].(string),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		blockSize: valid["block_size"].(uint64),
+		usedSize:  valid["used_size"].(uint64),
+		size:      valid["size"].(uint64),
+
+		partitions: partitions,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/bootresource.go b/switchq/vendor/github.com/juju/gomaasapi/bootresource.go
new file mode 100644
index 0000000..619a2a9
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/bootresource.go
@@ -0,0 +1,136 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+type bootResource struct {
+	// Add the controller in when we need to do things with the bootResource.
+	// controller Controller
+
+	resourceURI string
+
+	id           int
+	name         string
+	type_        string
+	architecture string
+	subArches    string
+	kernelFlavor string
+}
+
+// ID implements BootResource.
+func (b *bootResource) ID() int {
+	return b.id
+}
+
+// Name implements BootResource.
+func (b *bootResource) Name() string {
+	return b.name
+}
+
+// Name implements BootResource.
+func (b *bootResource) Type() string {
+	return b.type_
+}
+
+// Name implements BootResource.
+func (b *bootResource) Architecture() string {
+	return b.architecture
+}
+
+// SubArchitectures implements BootResource.
+func (b *bootResource) SubArchitectures() set.Strings {
+	return set.NewStrings(strings.Split(b.subArches, ",")...)
+}
+
+// KernelFlavor implements BootResource.
+func (b *bootResource) KernelFlavor() string {
+	return b.kernelFlavor
+}
+
+func readBootResources(controllerVersion version.Number, source interface{}) ([]*bootResource, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range bootResourceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no boot resource read func for version %s", controllerVersion)
+	}
+	readFunc := bootResourceDeserializationFuncs[deserialisationVersion]
+	return readBootResourceList(valid, readFunc)
+}
+
+// readBootResourceList expects the values of the sourceList to be string maps.
+func readBootResourceList(sourceList []interface{}, readFunc bootResourceDeserializationFunc) ([]*bootResource, error) {
+	result := make([]*bootResource, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for boot resource %d, %T", i, value)
+		}
+		bootResource, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "boot resource %d", i)
+		}
+		result = append(result, bootResource)
+	}
+	return result, nil
+}
+
+type bootResourceDeserializationFunc func(map[string]interface{}) (*bootResource, error)
+
+var bootResourceDeserializationFuncs = map[version.Number]bootResourceDeserializationFunc{
+	twoDotOh: bootResource_2_0,
+}
+
+func bootResource_2_0(source map[string]interface{}) (*bootResource, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"type":         schema.String(),
+		"architecture": schema.String(),
+		"subarches":    schema.String(),
+		"kflavor":      schema.String(),
+	}
+	defaults := schema.Defaults{
+		"subarches": "",
+		"kflavor":   "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &bootResource{
+		resourceURI:  valid["resource_uri"].(string),
+		id:           valid["id"].(int),
+		name:         valid["name"].(string),
+		type_:        valid["type"].(string),
+		architecture: valid["architecture"].(string),
+		subArches:    valid["subarches"].(string),
+		kernelFlavor: valid["kflavor"].(string),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/client.go b/switchq/vendor/github.com/juju/gomaasapi/client.go
new file mode 100644
index 0000000..ef887e6
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/client.go
@@ -0,0 +1,314 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime/multipart"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/juju/errors"
+)
+
+const (
+	// Number of retries performed when the server returns a 503
+	// response with a 'Retry-after' header.  A request will be issued
+	// at most NumberOfRetries + 1 times.
+	NumberOfRetries = 4
+
+	RetryAfterHeaderName = "Retry-After"
+)
+
+// Client represents a way to communicating with a MAAS API instance.
+// It is stateless, so it can have concurrent requests in progress.
+type Client struct {
+	APIURL *url.URL
+	Signer OAuthSigner
+}
+
+// ServerError is an http error (or at least, a non-2xx result) received from
+// the server.  It contains the numerical HTTP status code as well as an error
+// string and the response's headers.
+type ServerError struct {
+	error
+	StatusCode  int
+	Header      http.Header
+	BodyMessage string
+}
+
+// GetServerError returns the ServerError from the cause of the error if it is a
+// ServerError, and also returns the bool to indicate if it was a ServerError or
+// not.
+func GetServerError(err error) (ServerError, bool) {
+	svrErr, ok := errors.Cause(err).(ServerError)
+	return svrErr, ok
+}
+
+// readAndClose reads and closes the given ReadCloser.
+//
+// Trying to read from a nil simply returns nil, no error.
+func readAndClose(stream io.ReadCloser) ([]byte, error) {
+	if stream == nil {
+		return nil, nil
+	}
+	defer stream.Close()
+	return ioutil.ReadAll(stream)
+}
+
+// dispatchRequest sends a request to the server, and interprets the response.
+// Client-side errors will return an empty response and a non-nil error.  For
+// server-side errors however (i.e. responses with a non 2XX status code), the
+// returned error will be ServerError and the returned body will reflect the
+// server's response.  If the server returns a 503 response with a 'Retry-after'
+// header, the request will be transparenty retried.
+func (client Client) dispatchRequest(request *http.Request) ([]byte, error) {
+	// First, store the request's body into a byte[] to be able to restore it
+	// after each request.
+	bodyContent, err := readAndClose(request.Body)
+	if err != nil {
+		return nil, err
+	}
+	for retry := 0; retry < NumberOfRetries; retry++ {
+		// Restore body before issuing request.
+		newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+		request.Body = newBody
+		body, err := client.dispatchSingleRequest(request)
+		// If this is a 503 response with a non-void "Retry-After" header: wait
+		// as instructed and retry the request.
+		if err != nil {
+			serverError, ok := errors.Cause(err).(ServerError)
+			if ok && serverError.StatusCode == http.StatusServiceUnavailable {
+				retry_time_int, errConv := strconv.Atoi(serverError.Header.Get(RetryAfterHeaderName))
+				if errConv == nil {
+					select {
+					case <-time.After(time.Duration(retry_time_int) * time.Second):
+					}
+					continue
+				}
+			}
+		}
+		return body, err
+	}
+	// Restore body before issuing request.
+	newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+	request.Body = newBody
+	return client.dispatchSingleRequest(request)
+}
+
+func (client Client) dispatchSingleRequest(request *http.Request) ([]byte, error) {
+	client.Signer.OAuthSign(request)
+	httpClient := http.Client{}
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	request.Close = true
+	response, err := httpClient.Do(request)
+	if err != nil {
+		return nil, err
+	}
+	body, err := readAndClose(response.Body)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode < 200 || response.StatusCode > 299 {
+		err := errors.Errorf("ServerError: %v (%s)", response.Status, body)
+		return body, errors.Trace(ServerError{error: err, StatusCode: response.StatusCode, Header: response.Header, BodyMessage: string(body)})
+	}
+	return body, nil
+}
+
+// GetURL returns the URL to a given resource on the API, based on its URI.
+// The resource URI may be absolute or relative; either way the result is a
+// full absolute URL including the network part.
+func (client Client) GetURL(uri *url.URL) *url.URL {
+	return client.APIURL.ResolveReference(uri)
+}
+
+// Get performs an HTTP "GET" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Get(uri *url.URL, operation string, parameters url.Values) ([]byte, error) {
+	if parameters == nil {
+		parameters = make(url.Values)
+	}
+	opParameter := parameters.Get("op")
+	if opParameter != "" {
+		msg := errors.Errorf("reserved parameter 'op' passed (with value '%s')", opParameter)
+		return nil, msg
+	}
+	if operation != "" {
+		parameters.Set("op", operation)
+	}
+	queryUrl := client.GetURL(uri)
+	queryUrl.RawQuery = parameters.Encode()
+	request, err := http.NewRequest("GET", queryUrl.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	return client.dispatchRequest(request)
+}
+
+// writeMultiPartFiles writes the given files as parts of a multipart message
+// using the given writer.
+func writeMultiPartFiles(writer *multipart.Writer, files map[string][]byte) error {
+	for fileName, fileContent := range files {
+
+		fw, err := writer.CreateFormFile(fileName, fileName)
+		if err != nil {
+			return err
+		}
+		io.Copy(fw, bytes.NewBuffer(fileContent))
+	}
+	return nil
+}
+
+// writeMultiPartParams writes the given parameters as parts of a multipart
+// message using the given writer.
+func writeMultiPartParams(writer *multipart.Writer, parameters url.Values) error {
+	for key, values := range parameters {
+		for _, value := range values {
+			fw, err := writer.CreateFormField(key)
+			if err != nil {
+				return err
+			}
+			buffer := bytes.NewBufferString(value)
+			io.Copy(fw, buffer)
+		}
+	}
+	return nil
+
+}
+
+// nonIdempotentRequestFiles implements the common functionality of PUT and
+// POST requests (but not GET or DELETE requests) when uploading files is
+// needed.
+func (client Client) nonIdempotentRequestFiles(method string, uri *url.URL, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	buf := new(bytes.Buffer)
+	writer := multipart.NewWriter(buf)
+	err := writeMultiPartFiles(writer, files)
+	if err != nil {
+		return nil, err
+	}
+	err = writeMultiPartParams(writer, parameters)
+	if err != nil {
+		return nil, err
+	}
+	writer.Close()
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), buf)
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", writer.FormDataContentType())
+	return client.dispatchRequest(request)
+
+}
+
+// nonIdempotentRequest implements the common functionality of PUT and POST
+// requests (but not GET or DELETE requests).
+func (client Client) nonIdempotentRequest(method string, uri *url.URL, parameters url.Values) ([]byte, error) {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), strings.NewReader(string(parameters.Encode())))
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	return client.dispatchRequest(request)
+}
+
+// Post performs an HTTP "POST" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Post(uri *url.URL, operation string, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	queryParams := url.Values{"op": {operation}}
+	uri.RawQuery = queryParams.Encode()
+	if files != nil {
+		return client.nonIdempotentRequestFiles("POST", uri, parameters, files)
+	}
+	return client.nonIdempotentRequest("POST", uri, parameters)
+}
+
+// Put updates an object on the API, using an HTTP "PUT" request.
+func (client Client) Put(uri *url.URL, parameters url.Values) ([]byte, error) {
+	return client.nonIdempotentRequest("PUT", uri, parameters)
+}
+
+// Delete deletes an object on the API, using an HTTP "DELETE" request.
+func (client Client) Delete(uri *url.URL) error {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest("DELETE", url.String(), strings.NewReader(""))
+	if err != nil {
+		return err
+	}
+	_, err = client.dispatchRequest(request)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Anonymous "signature method" implementation.
+type anonSigner struct{}
+
+func (signer anonSigner) OAuthSign(request *http.Request) error {
+	return nil
+}
+
+// *anonSigner implements the OAuthSigner interface.
+var _ OAuthSigner = anonSigner{}
+
+func composeAPIURL(BaseURL string, apiVersion string) (*url.URL, error) {
+	baseurl := EnsureTrailingSlash(BaseURL)
+	apiurl := fmt.Sprintf("%sapi/%s/", baseurl, apiVersion)
+	return url.Parse(apiurl)
+}
+
+// NewAnonymousClient creates a client that issues anonymous requests.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAnonymousClient(BaseURL string, apiVersion string) (*Client, error) {
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: &anonSigner{}, APIURL: parsedBaseURL}, nil
+}
+
+// NewAuthenticatedClient parses the given MAAS API key into the individual
+// OAuth tokens and creates an Client that will use these tokens to sign the
+// requests it issues.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAuthenticatedClient(BaseURL string, apiKey string, apiVersion string) (*Client, error) {
+	elements := strings.Split(apiKey, ":")
+	if len(elements) != 3 {
+		errString := fmt.Sprintf("invalid API key %q; expected \"<consumer secret>:<token key>:<token secret>\"", apiKey)
+		return nil, errors.NewNotValid(nil, errString)
+	}
+	token := &OAuthToken{
+		ConsumerKey: elements[0],
+		// The consumer secret is the empty string in MAAS' authentication.
+		ConsumerSecret: "",
+		TokenKey:       elements[1],
+		TokenSecret:    elements[2],
+	}
+	signer, err := NewPlainTestOAuthSigner(token, "MAAS API")
+	if err != nil {
+		return nil, err
+	}
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: signer, APIURL: parsedBaseURL}, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/controller.go b/switchq/vendor/github.com/juju/gomaasapi/controller.go
new file mode 100644
index 0000000..3c729c2
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/controller.go
@@ -0,0 +1,890 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync/atomic"
+
+	"github.com/juju/errors"
+	"github.com/juju/loggo"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+var (
+	logger = loggo.GetLogger("maas")
+
+	// The supported versions should be ordered from most desirable version to
+	// least as they will be tried in order.
+	supportedAPIVersions = []string{"2.0"}
+
+	// Each of the api versions that change the request or response structure
+	// for any given call should have a value defined for easy definition of
+	// the deserialization functions.
+	twoDotOh = version.Number{Major: 2, Minor: 0}
+
+	// Current request number. Informational only for logging.
+	requestNumber int64
+)
+
+// ControllerArgs is an argument struct for passing the required parameters
+// to the NewController method.
+type ControllerArgs struct {
+	BaseURL string
+	APIKey  string
+}
+
+// NewController creates an authenticated client to the MAAS API, and checks
+// the capabilities of the server.
+//
+// If the APIKey is not valid, a NotValid error is returned.
+// If the credentials are incorrect, a PermissionError is returned.
+func NewController(args ControllerArgs) (Controller, error) {
+	// For now we don't need to test multiple versions. It is expected that at
+	// some time in the future, we will try the most up to date version and then
+	// work our way backwards.
+	for _, apiVersion := range supportedAPIVersions {
+		major, minor, err := version.ParseMajorMinor(apiVersion)
+		// We should not get an error here. See the test.
+		if err != nil {
+			return nil, errors.Errorf("bad version defined in supported versions: %q", apiVersion)
+		}
+		client, err := NewAuthenticatedClient(args.BaseURL, args.APIKey, apiVersion)
+		if err != nil {
+			// If the credentials aren't valid, return now.
+			if errors.IsNotValid(err) {
+				return nil, errors.Trace(err)
+			}
+			// Any other error attempting to create the authenticated client
+			// is an unexpected error and return now.
+			return nil, NewUnexpectedError(err)
+		}
+		controllerVersion := version.Number{
+			Major: major,
+			Minor: minor,
+		}
+		controller := &controller{client: client}
+		// The controllerVersion returned from the function will include any patch version.
+		controller.capabilities, controller.apiVersion, err = controller.readAPIVersion(controllerVersion)
+		if err != nil {
+			logger.Debugf("read version failed: %#v", err)
+			continue
+		}
+
+		if err := controller.checkCreds(); err != nil {
+			return nil, errors.Trace(err)
+		}
+		return controller, nil
+	}
+
+	return nil, NewUnsupportedVersionError("controller at %s does not support any of %s", args.BaseURL, supportedAPIVersions)
+}
+
+type controller struct {
+	client       *Client
+	apiVersion   version.Number
+	capabilities set.Strings
+}
+
+// Capabilities implements Controller.
+func (c *controller) Capabilities() set.Strings {
+	return c.capabilities
+}
+
+// BootResources implements Controller.
+func (c *controller) BootResources() ([]BootResource, error) {
+	source, err := c.get("boot-resources")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	resources, err := readBootResources(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []BootResource
+	for _, r := range resources {
+		result = append(result, r)
+	}
+	return result, nil
+}
+
+// Fabrics implements Controller.
+func (c *controller) Fabrics() ([]Fabric, error) {
+	source, err := c.get("fabrics")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	fabrics, err := readFabrics(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Fabric
+	for _, f := range fabrics {
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// Spaces implements Controller.
+func (c *controller) Spaces() ([]Space, error) {
+	source, err := c.get("spaces")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	spaces, err := readSpaces(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Space
+	for _, space := range spaces {
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+// Zones implements Controller.
+func (c *controller) Zones() ([]Zone, error) {
+	source, err := c.get("zones")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	zones, err := readZones(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Zone
+	for _, z := range zones {
+		result = append(result, z)
+	}
+	return result, nil
+}
+
+// DevicesArgs is a argument struct for selecting Devices.
+// Only devices that match the specified criteria are returned.
+type DevicesArgs struct {
+	Hostname     []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+}
+
+// Devices implements Controller.
+func (c *controller) Devices(args DevicesArgs) ([]Device, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostname)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	source, err := c.getQuery("devices", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	devices, err := readDevices(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, d := range devices {
+		d.controller = c
+		result = append(result, d)
+	}
+	return result, nil
+}
+
+// CreateDeviceArgs is a argument struct for passing information into CreateDevice.
+type CreateDeviceArgs struct {
+	Hostname     string
+	MACAddresses []string
+	Domain       string
+	Parent       string
+}
+
+// Devices implements Controller.
+func (c *controller) CreateDevice(args CreateDeviceArgs) (Device, error) {
+	// There must be at least one mac address.
+	if len(args.MACAddresses) == 0 {
+		return nil, NewBadRequestError("at least one MAC address must be specified")
+	}
+	params := NewURLParams()
+	params.MaybeAdd("hostname", args.Hostname)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAddMany("mac_addresses", args.MACAddresses)
+	params.MaybeAdd("parent", args.Parent)
+	result, err := c.post("devices", "", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, NewUnexpectedError(err)
+	}
+
+	device, err := readDevice(c.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	device.controller = c
+	return device, nil
+}
+
+// MachinesArgs is a argument struct for selecting Machines.
+// Only machines that match the specified criteria are returned.
+type MachinesArgs struct {
+	Hostnames    []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+	OwnerData    map[string]string
+}
+
+// Machines implements Controller.
+func (c *controller) Machines(args MachinesArgs) ([]Machine, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostnames)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	// At the moment the MAAS API doesn't support filtering by owner
+	// data so we do that ourselves below.
+	source, err := c.getQuery("machines", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	machines, err := readMachines(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Machine
+	for _, m := range machines {
+		m.controller = c
+		if ownerDataMatches(m.ownerData, args.OwnerData) {
+			result = append(result, m)
+		}
+	}
+	return result, nil
+}
+
+func ownerDataMatches(ownerData, filter map[string]string) bool {
+	for key, value := range filter {
+		if ownerData[key] != value {
+			return false
+		}
+	}
+	return true
+}
+
+// StorageSpec represents one element of storage constraints necessary
+// to be satisfied to allocate a machine.
+type StorageSpec struct {
+	// Label is optional and an arbitrary string. Labels need to be unique
+	// across the StorageSpec elements specified in the AllocateMachineArgs.
+	Label string
+	// Size is required and refers to the required minimum size in GB.
+	Size int
+	// Zero or more tags assocated to with the disks.
+	Tags []string
+}
+
+// Validate ensures that there is a positive size and that there are no Empty
+// tag values.
+func (s *StorageSpec) Validate() error {
+	if s.Size <= 0 {
+		return errors.NotValidf("Size value %d", s.Size)
+	}
+	for _, v := range s.Tags {
+		if v == "" {
+			return errors.NotValidf("empty tag")
+		}
+	}
+	return nil
+}
+
+// String returns the string representation of the storage spec.
+func (s *StorageSpec) String() string {
+	label := s.Label
+	if label != "" {
+		label += ":"
+	}
+	tags := strings.Join(s.Tags, ",")
+	if tags != "" {
+		tags = "(" + tags + ")"
+	}
+	return fmt.Sprintf("%s%d%s", label, s.Size, tags)
+}
+
+// InterfaceSpec represents one elemenet of network related constraints.
+type InterfaceSpec struct {
+	// Label is required and an arbitrary string. Labels need to be unique
+	// across the InterfaceSpec elements specified in the AllocateMachineArgs.
+	// The label is returned in the ConstraintMatches response from
+	// AllocateMachine.
+	Label string
+	Space string
+
+	// NOTE: there are other interface spec values that we are not exposing at
+	// this stage that can be added on an as needed basis. Other possible values are:
+	//     'fabric_class', 'not_fabric_class',
+	//     'subnet_cidr', 'not_subnet_cidr',
+	//     'vid', 'not_vid',
+	//     'fabric', 'not_fabric',
+	//     'subnet', 'not_subnet',
+	//     'mode'
+}
+
+// Validate ensures that a Label is specified and that there is at least one
+// Space or NotSpace value set.
+func (a *InterfaceSpec) Validate() error {
+	if a.Label == "" {
+		return errors.NotValidf("missing Label")
+	}
+	// Perhaps at some stage in the future there will be other possible specs
+	// supported (like vid, subnet, etc), but until then, just space to check.
+	if a.Space == "" {
+		return errors.NotValidf("empty Space constraint")
+	}
+	return nil
+}
+
+// String returns the interface spec as MaaS requires it.
+func (a *InterfaceSpec) String() string {
+	return fmt.Sprintf("%s:space=%s", a.Label, a.Space)
+}
+
+// AllocateMachineArgs is an argument struct for passing args into Machine.Allocate.
+type AllocateMachineArgs struct {
+	Hostname     string
+	Architecture string
+	MinCPUCount  int
+	// MinMemory represented in MB.
+	MinMemory int
+	Tags      []string
+	NotTags   []string
+	Zone      string
+	NotInZone []string
+	// Storage represents the required disks on the Machine. If any are specified
+	// the first value is used for the root disk.
+	Storage []StorageSpec
+	// Interfaces represents a number of required interfaces on the machine.
+	// Each InterfaceSpec relates to an individual network interface.
+	Interfaces []InterfaceSpec
+	// NotSpace is a machine level constraint, and applies to the entire machine
+	// rather than specific interfaces.
+	NotSpace  []string
+	AgentName string
+	Comment   string
+	DryRun    bool
+}
+
+// Validate makes sure that any labels specifed in Storage or Interfaces
+// are unique, and that the required specifications are valid.
+func (a *AllocateMachineArgs) Validate() error {
+	storageLabels := set.NewStrings()
+	for _, spec := range a.Storage {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Storage")
+		}
+		if spec.Label != "" {
+			if storageLabels.Contains(spec.Label) {
+				return errors.NotValidf("reusing storage label %q", spec.Label)
+			}
+			storageLabels.Add(spec.Label)
+		}
+	}
+	interfaceLabels := set.NewStrings()
+	for _, spec := range a.Interfaces {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Interfaces")
+		}
+		if interfaceLabels.Contains(spec.Label) {
+			return errors.NotValidf("reusing interface label %q", spec.Label)
+		}
+		interfaceLabels.Add(spec.Label)
+	}
+	for _, v := range a.NotSpace {
+		if v == "" {
+			return errors.NotValidf("empty NotSpace constraint")
+		}
+	}
+	return nil
+}
+
+func (a *AllocateMachineArgs) storage() string {
+	var values []string
+	for _, spec := range a.Storage {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ",")
+}
+
+func (a *AllocateMachineArgs) interfaces() string {
+	var values []string
+	for _, spec := range a.Interfaces {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ";")
+}
+
+func (a *AllocateMachineArgs) notSubnets() []string {
+	var values []string
+	for _, v := range a.NotSpace {
+		values = append(values, "space:"+v)
+	}
+	return values
+}
+
+// ConstraintMatches provides a way for the caller of AllocateMachine to determine
+//.how the allocated machine matched the storage and interfaces constraints specified.
+// The labels that were used in the constraints are the keys in the maps.
+type ConstraintMatches struct {
+	// Interface is a mapping of the constraint label specified to the Interfaces
+	// that match that constraint.
+	Interfaces map[string][]Interface
+
+	// Storage is a mapping of the constraint label specified to the BlockDevices
+	// that match that constraint.
+	Storage map[string][]BlockDevice
+}
+
+// AllocateMachine implements Controller.
+//
+// Returns an error that satisfies IsNoMatchError if the requested
+// constraints cannot be met.
+func (c *controller) AllocateMachine(args AllocateMachineArgs) (Machine, ConstraintMatches, error) {
+	var matches ConstraintMatches
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Hostname)
+	params.MaybeAdd("arch", args.Architecture)
+	params.MaybeAddInt("cpu_count", args.MinCPUCount)
+	params.MaybeAddInt("mem", args.MinMemory)
+	params.MaybeAddMany("tags", args.Tags)
+	params.MaybeAddMany("not_tags", args.NotTags)
+	params.MaybeAdd("storage", args.storage())
+	params.MaybeAdd("interfaces", args.interfaces())
+	params.MaybeAddMany("not_subnets", args.notSubnets())
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAddMany("not_in_zone", args.NotInZone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	params.MaybeAdd("comment", args.Comment)
+	params.MaybeAddBool("dry_run", args.DryRun)
+	result, err := c.post("machines", "allocate", params.Values)
+	if err != nil {
+		// A 409 Status code is "No Matching Machines"
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusConflict {
+				return nil, matches, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, matches, NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(c.apiVersion, result)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+	machine.controller = c
+
+	// Parse the constraint matches.
+	matches, err = parseAllocateConstraintsResponse(result, machine)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+
+	return machine, matches, nil
+}
+
+// ReleaseMachinesArgs is an argument struct for passing the machine system IDs
+// and an optional comment into the ReleaseMachines method.
+type ReleaseMachinesArgs struct {
+	SystemIDs []string
+	Comment   string
+}
+
+// ReleaseMachines implements Controller.
+//
+// Release multiple machines at once. Returns
+//  - BadRequestError if any of the machines cannot be found
+//  - PermissionError if the user does not have permission to release any of the machines
+//  - CannotCompleteError if any of the machines could not be released due to their current state
+func (c *controller) ReleaseMachines(args ReleaseMachinesArgs) error {
+	params := NewURLParams()
+	params.MaybeAddMany("machines", args.SystemIDs)
+	params.MaybeAdd("comment", args.Comment)
+	_, err := c.post("machines", "release", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusConflict:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	return nil
+}
+
+// Files implements Controller.
+func (c *controller) Files(prefix string) ([]File, error) {
+	params := NewURLParams()
+	params.MaybeAdd("prefix", prefix)
+	source, err := c.getQuery("files", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	files, err := readFiles(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []File
+	for _, f := range files {
+		f.controller = c
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// GetFile implements Controller.
+func (c *controller) GetFile(filename string) (File, error) {
+	if filename == "" {
+		return nil, errors.NotValidf("missing filename")
+	}
+	source, err := c.get("files/" + filename)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusNotFound {
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	file, err := readFile(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	file.controller = c
+	return file, nil
+}
+
+// AddFileArgs is a argument struct for passing information into AddFile.
+// One of Content or (Reader, Length) must be specified.
+type AddFileArgs struct {
+	Filename string
+	Content  []byte
+	Reader   io.Reader
+	Length   int64
+}
+
+// Validate checks to make sure the filename has no slashes, and that one of
+// Content or (Reader, Length) is specified.
+func (a *AddFileArgs) Validate() error {
+	dir, _ := path.Split(a.Filename)
+	if dir != "" {
+		return errors.NotValidf("paths in Filename %q", a.Filename)
+	}
+	if a.Filename == "" {
+		return errors.NotValidf("missing Filename")
+	}
+	if a.Content == nil {
+		if a.Reader == nil {
+			return errors.NotValidf("missing Content or Reader")
+		}
+		if a.Length == 0 {
+			return errors.NotValidf("missing Length")
+		}
+	} else {
+		if a.Reader != nil {
+			return errors.NotValidf("specifying Content and Reader")
+		}
+		if a.Length != 0 {
+			return errors.NotValidf("specifying Length and Content")
+		}
+	}
+	return nil
+}
+
+// AddFile implements Controller.
+func (c *controller) AddFile(args AddFileArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	fileContent := args.Content
+	if fileContent == nil {
+		content, err := ioutil.ReadAll(io.LimitReader(args.Reader, args.Length))
+		if err != nil {
+			return errors.Annotatef(err, "cannot read file content")
+		}
+		fileContent = content
+	}
+	params := url.Values{"filename": {args.Filename}}
+	_, err := c.postFile("files", "", params, fileContent)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) checkCreds() error {
+	if _, err := c.getOp("users", "whoami"); err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusUnauthorized {
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) put(path string, params url.Values) (interface{}, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: PUT %s%s, params: %s", requestID, c.client.APIURL, path, params.Encode())
+	bytes, err := c.client.Put(&url.URL{Path: path}, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) post(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._postRaw(path, op, params, nil)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) postFile(path, op string, params url.Values, fileContent []byte) (interface{}, error) {
+	// Only one file is ever sent at a time.
+	files := map[string][]byte{"file": fileContent}
+	return c._postRaw(path, op, params, files)
+}
+
+func (c *controller) _postRaw(path, op string, params url.Values, files map[string][]byte) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		opArg := ""
+		if op != "" {
+			opArg = "?op=" + op
+		}
+		logger.Tracef("request %x: POST %s%s%s, params=%s", requestID, c.client.APIURL, path, opArg, params.Encode())
+	}
+	bytes, err := c.client.Post(&url.URL{Path: path}, op, params, files)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func (c *controller) delete(path string) error {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: DELETE %s%s", requestID, c.client.APIURL, path)
+	err := c.client.Delete(&url.URL{Path: path})
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return errors.Trace(err)
+	}
+	logger.Tracef("response %x: complete", requestID)
+	return nil
+}
+
+func (c *controller) getQuery(path string, params url.Values) (interface{}, error) {
+	return c._get(path, "", params)
+}
+
+func (c *controller) get(path string) (interface{}, error) {
+	return c._get(path, "", nil)
+}
+
+func (c *controller) getOp(path, op string) (interface{}, error) {
+	return c._get(path, op, nil)
+}
+
+func (c *controller) _get(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._getRaw(path, op, params)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) _getRaw(path, op string, params url.Values) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		var query string
+		if params != nil {
+			query = "?" + params.Encode()
+		}
+		logger.Tracef("request %x: GET %s%s%s", requestID, c.client.APIURL, path, query)
+	}
+	bytes, err := c.client.Get(&url.URL{Path: path}, op, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func nextRequestID() int64 {
+	return atomic.AddInt64(&requestNumber, 1)
+}
+
+func (c *controller) readAPIVersion(apiVersion version.Number) (set.Strings, version.Number, error) {
+	parsed, err := c.get("version")
+	if err != nil {
+		return nil, apiVersion, errors.Trace(err)
+	}
+
+	// As we care about other fields, add them.
+	fields := schema.Fields{
+		"capabilities": schema.List(schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(parsed, nil)
+	if err != nil {
+		return nil, apiVersion, WrapWithDeserializationError(err, "version response")
+	}
+	// For now, we don't append any subversion, but as it becomes used, we
+	// should parse and check.
+
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	capabilities := set.NewStrings()
+	capabilityValues := valid["capabilities"].([]interface{})
+	for _, value := range capabilityValues {
+		capabilities.Add(value.(string))
+	}
+
+	return capabilities, apiVersion, nil
+}
+
+func parseAllocateConstraintsResponse(source interface{}, machine *machine) (ConstraintMatches, error) {
+	var empty ConstraintMatches
+	matchFields := schema.Fields{
+		"storage":    schema.StringMap(schema.List(schema.ForceInt())),
+		"interfaces": schema.StringMap(schema.List(schema.ForceInt())),
+	}
+	matchDefaults := schema.Defaults{
+		"storage":    schema.Omit,
+		"interfaces": schema.Omit,
+	}
+	fields := schema.Fields{
+		"constraints_by_type": schema.FieldMap(matchFields, matchDefaults),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return empty, WrapWithDeserializationError(err, "allocation constraints response schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	constraintsMap := valid["constraints_by_type"].(map[string]interface{})
+	result := ConstraintMatches{
+		Interfaces: make(map[string][]Interface),
+		Storage:    make(map[string][]BlockDevice),
+	}
+
+	if interfaceMatches, found := constraintsMap["interfaces"]; found {
+		matches := convertConstraintMatches(interfaceMatches)
+		for label, ids := range matches {
+			interfaces := make([]Interface, len(ids))
+			for index, id := range ids {
+				iface := machine.Interface(id)
+				if iface == nil {
+					return empty, NewDeserializationError("constraint match interface %q: %d does not match an interface for the machine", label, id)
+				}
+				interfaces[index] = iface
+			}
+			result.Interfaces[label] = interfaces
+		}
+	}
+
+	if storageMatches, found := constraintsMap["storage"]; found {
+		matches := convertConstraintMatches(storageMatches)
+		for label, ids := range matches {
+			blockDevices := make([]BlockDevice, len(ids))
+			for index, id := range ids {
+				blockDevice := machine.PhysicalBlockDevice(id)
+				if blockDevice == nil {
+					return empty, NewDeserializationError("constraint match storage %q: %d does not match a physical block device for the machine", label, id)
+				}
+				blockDevices[index] = blockDevice
+			}
+			result.Storage[label] = blockDevices
+		}
+	}
+	return result, nil
+}
+
+func convertConstraintMatches(source interface{}) map[string][]int {
+	// These casts are all safe because of the schema check.
+	result := make(map[string][]int)
+	matchMap := source.(map[string]interface{})
+	for label, values := range matchMap {
+		items := values.([]interface{})
+		result[label] = make([]int, len(items))
+		for index, value := range items {
+			result[label][index] = value.(int)
+		}
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/dependencies.tsv b/switchq/vendor/github.com/juju/gomaasapi/dependencies.tsv
new file mode 100644
index 0000000..4cd966f
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/dependencies.tsv
@@ -0,0 +1,11 @@
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/loggo	git	8477fc936adf0e382d680310047ca27e128a309a	2015-05-27T03:58:39Z
+github.com/juju/names	git	8a0aa0963bbacdc790914892e9ff942e94d6f795	2016-03-30T15:05:33Z
+github.com/juju/schema	git	075de04f9b7d7580d60a1e12a0b3f50bb18e6998	2016-04-20T04:42:03Z
+github.com/juju/testing	git	162fafccebf20a4207ab93d63b986c230e3f4d2e	2016-04-04T09:43:17Z
+github.com/juju/utils	git	eb6cb958762135bb61aed1e0951f657c674d427f	2016-04-11T02:40:59Z
+github.com/juju/version	git	ef897ad7f130870348ce306f61332f5335355063	2015-11-27T20:34:00Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/switchq/vendor/github.com/juju/gomaasapi/device.go b/switchq/vendor/github.com/juju/gomaasapi/device.go
new file mode 100644
index 0000000..7c9bc70
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/device.go
@@ -0,0 +1,293 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type device struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID string
+	hostname string
+	fqdn     string
+
+	parent string
+	owner  string
+
+	ipAddresses  []string
+	interfaceSet []*interface_
+	zone         *zone
+}
+
+// SystemID implements Device.
+func (d *device) SystemID() string {
+	return d.systemID
+}
+
+// Hostname implements Device.
+func (d *device) Hostname() string {
+	return d.hostname
+}
+
+// FQDN implements Device.
+func (d *device) FQDN() string {
+	return d.fqdn
+}
+
+// Parent implements Device.
+func (d *device) Parent() string {
+	return d.parent
+}
+
+// Owner implements Device.
+func (d *device) Owner() string {
+	return d.owner
+}
+
+// IPAddresses implements Device.
+func (d *device) IPAddresses() []string {
+	return d.ipAddresses
+}
+
+// Zone implements Device.
+func (d *device) Zone() Zone {
+	if d.zone == nil {
+		return nil
+	}
+	return d.zone
+}
+
+// InterfaceSet implements Device.
+func (d *device) InterfaceSet() []Interface {
+	result := make([]Interface, len(d.interfaceSet))
+	for i, v := range d.interfaceSet {
+		v.controller = d.controller
+		result[i] = v
+	}
+	return result
+}
+
+// CreateInterfaceArgs is an argument struct for passing parameters to
+// the Machine.CreateInterface method.
+type CreateInterfaceArgs struct {
+	// Name of the interface (required).
+	Name string
+	// MACAddress is the MAC address of the interface (required).
+	MACAddress string
+	// VLAN is the untagged VLAN the interface is connected to (required).
+	VLAN VLAN
+	// Tags to attach to the interface (optional).
+	Tags []string
+	// MTU - Maximum transmission unit. (optional)
+	MTU int
+	// AcceptRA - Accept router advertisements. (IPv6 only)
+	AcceptRA bool
+	// Autoconf - Perform stateless autoconfiguration. (IPv6 only)
+	Autoconf bool
+}
+
+// Validate checks the required fields are set for the arg structure.
+func (a *CreateInterfaceArgs) Validate() error {
+	if a.Name == "" {
+		return errors.NotValidf("missing Name")
+	}
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+	if a.VLAN == nil {
+		return errors.NotValidf("missing VLAN")
+	}
+	return nil
+}
+
+// interfacesURI used to add interfaces for this device. The operations
+// are on the nodes endpoint, not devices.
+func (d *device) interfacesURI() string {
+	return strings.Replace(d.resourceURI, "devices", "nodes", 1) + "interfaces/"
+}
+
+// CreateInterface implements Device.
+func (d *device) CreateInterface(args CreateInterfaceArgs) (Interface, error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("name", args.Name)
+	params.Values.Add("mac_address", args.MACAddress)
+	params.Values.Add("vlan", fmt.Sprint(args.VLAN.ID()))
+	params.MaybeAdd("tags", strings.Join(args.Tags, ","))
+	params.MaybeAddInt("mtu", args.MTU)
+	params.MaybeAddBool("accept_ra", args.AcceptRA)
+	params.MaybeAddBool("autoconf", args.Autoconf)
+	result, err := d.controller.post(d.interfacesURI(), "create_physical", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return nil, errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+
+	iface, err := readInterface(d.controller.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	iface.controller = d.controller
+
+	// TODO: add to the interfaces for the device when the interfaces are returned.
+	// lp:bug 1567213.
+	return iface, nil
+}
+
+// Delete implements Device.
+func (d *device) Delete() error {
+	err := d.controller.delete(d.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func readDevice(controllerVersion version.Number, source interface{}) (*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readDevices(controllerVersion version.Number, source interface{}) ([]*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readDeviceList(valid, readFunc)
+}
+
+func getDeviceDeserializationFunc(controllerVersion version.Number) (deviceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range deviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no device read func for version %s", controllerVersion)
+	}
+	return deviceDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readDeviceList expects the values of the sourceList to be string maps.
+func readDeviceList(sourceList []interface{}, readFunc deviceDeserializationFunc) ([]*device, error) {
+	result := make([]*device, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for device %d, %T", i, value)
+		}
+		device, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "device %d", i)
+		}
+		result = append(result, device)
+	}
+	return result, nil
+}
+
+type deviceDeserializationFunc func(map[string]interface{}) (*device, error)
+
+var deviceDeserializationFuncs = map[version.Number]deviceDeserializationFunc{
+	twoDotOh: device_2_0,
+}
+
+func device_2_0(source map[string]interface{}) (*device, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id": schema.String(),
+		"hostname":  schema.String(),
+		"fqdn":      schema.String(),
+		"parent":    schema.OneOf(schema.Nil(""), schema.String()),
+		"owner":     schema.OneOf(schema.Nil(""), schema.String()),
+
+		"ip_addresses":  schema.List(schema.String()),
+		"interface_set": schema.List(schema.StringMap(schema.Any())),
+		"zone":          schema.StringMap(schema.Any()),
+	}
+	defaults := schema.Defaults{
+		"owner":  "",
+		"parent": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	owner, _ := valid["owner"].(string)
+	parent, _ := valid["parent"].(string)
+	result := &device{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID: valid["system_id"].(string),
+		hostname: valid["hostname"].(string),
+		fqdn:     valid["fqdn"].(string),
+		parent:   parent,
+		owner:    owner,
+
+		ipAddresses:  convertToStringSlice(valid["ip_addresses"]),
+		interfaceSet: interfaceSet,
+		zone:         zone,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/enum.go b/switchq/vendor/github.com/juju/gomaasapi/enum.go
new file mode 100644
index 0000000..a516d6b
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/enum.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+const (
+	// NodeStatus* values represent the vocabulary of a Node‘s possible statuses.
+
+	// The node has been created and has a system ID assigned to it.
+	NodeStatusDeclared = "0"
+
+	//Testing and other commissioning steps are taking place.
+	NodeStatusCommissioning = "1"
+
+	// Smoke or burn-in testing has a found a problem.
+	NodeStatusFailedTests = "2"
+
+	// The node can’t be contacted.
+	NodeStatusMissing = "3"
+
+	// The node is in the general pool ready to be deployed.
+	NodeStatusReady = "4"
+
+	// The node is ready for named deployment.
+	NodeStatusReserved = "5"
+
+	// The node is powering a service from a charm or is ready for use with a fresh Ubuntu install.
+	NodeStatusDeployed = "6"
+
+	// The node has been removed from service manually until an admin overrides the retirement.
+	NodeStatusRetired = "7"
+
+	// The node is broken: a step in the node lifecyle failed. More details
+	// can be found in the node's event log.
+	NodeStatusBroken = "8"
+
+	// The node is being installed.
+	NodeStatusDeploying = "9"
+
+	// The node has been allocated to a user and is ready for deployment.
+	NodeStatusAllocated = "10"
+
+	// The deployment of the node failed.
+	NodeStatusFailedDeployment = "11"
+
+	// The node is powering down after a release request.
+	NodeStatusReleasing = "12"
+
+	// The releasing of the node failed.
+	NodeStatusFailedReleasing = "13"
+
+	// The node is erasing its disks.
+	NodeStatusDiskErasing = "14"
+
+	// The node failed to erase its disks.
+	NodeStatusFailedDiskErasing = "15"
+)
diff --git a/switchq/vendor/github.com/juju/gomaasapi/errors.go b/switchq/vendor/github.com/juju/gomaasapi/errors.go
new file mode 100644
index 0000000..8931d56
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/errors.go
@@ -0,0 +1,161 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+)
+
+// NoMatchError is returned when the requested action cannot be performed
+// due to being unable to service due to no entities available that match the
+// request.
+type NoMatchError struct {
+	errors.Err
+}
+
+// NewNoMatchError constructs a new NoMatchError and sets the location.
+func NewNoMatchError(message string) error {
+	err := &NoMatchError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsNoMatchError returns true if err is a NoMatchError.
+func IsNoMatchError(err error) bool {
+	_, ok := errors.Cause(err).(*NoMatchError)
+	return ok
+}
+
+// UnexpectedError is an error for a condition that hasn't been determined.
+type UnexpectedError struct {
+	errors.Err
+}
+
+// NewUnexpectedError constructs a new UnexpectedError and sets the location.
+func NewUnexpectedError(err error) error {
+	uerr := &UnexpectedError{Err: errors.NewErr("unexpected: %v", err)}
+	uerr.SetLocation(1)
+	return errors.Wrap(err, uerr)
+}
+
+// IsUnexpectedError returns true if err is an UnexpectedError.
+func IsUnexpectedError(err error) bool {
+	_, ok := errors.Cause(err).(*UnexpectedError)
+	return ok
+}
+
+// UnsupportedVersionError refers to calls made to an unsupported api version.
+type UnsupportedVersionError struct {
+	errors.Err
+}
+
+// NewUnsupportedVersionError constructs a new UnsupportedVersionError and sets the location.
+func NewUnsupportedVersionError(format string, args ...interface{}) error {
+	err := &UnsupportedVersionError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsUnsupportedVersionError returns true if err is an UnsupportedVersionError.
+func IsUnsupportedVersionError(err error) bool {
+	_, ok := errors.Cause(err).(*UnsupportedVersionError)
+	return ok
+}
+
+// DeserializationError types are returned when the returned JSON data from
+// the controller doesn't match the code's expectations.
+type DeserializationError struct {
+	errors.Err
+}
+
+// NewDeserializationError constructs a new DeserializationError and sets the location.
+func NewDeserializationError(format string, args ...interface{}) error {
+	err := &DeserializationError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// WrapWithDeserializationError constructs a new DeserializationError with the
+// specified message, and sets the location and returns a new error with the
+// full error stack set including the error passed in.
+func WrapWithDeserializationError(err error, format string, args ...interface{}) error {
+	message := fmt.Sprintf(format, args...)
+	// We want the deserialization error message to include the error text of the
+	// previous error, but wrap it in the new type.
+	derr := &DeserializationError{Err: errors.NewErr(message + ": " + err.Error())}
+	derr.SetLocation(1)
+	wrapped := errors.Wrap(err, derr)
+	// We want the location of the wrapped error to be the caller of this function,
+	// not the line above.
+	if errType, ok := wrapped.(*errors.Err); ok {
+		// We know it is because that is what Wrap returns.
+		errType.SetLocation(1)
+	}
+	return wrapped
+}
+
+// IsDeserializationError returns true if err is a DeserializationError.
+func IsDeserializationError(err error) bool {
+	_, ok := errors.Cause(err).(*DeserializationError)
+	return ok
+}
+
+// BadRequestError is returned when the requested action cannot be performed
+// due to bad or incorrect parameters passed to the server.
+type BadRequestError struct {
+	errors.Err
+}
+
+// NewBadRequestError constructs a new BadRequestError and sets the location.
+func NewBadRequestError(message string) error {
+	err := &BadRequestError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsBadRequestError returns true if err is a NoMatchError.
+func IsBadRequestError(err error) bool {
+	_, ok := errors.Cause(err).(*BadRequestError)
+	return ok
+}
+
+// PermissionError is returned when the user does not have permission to do the
+// requested action.
+type PermissionError struct {
+	errors.Err
+}
+
+// NewPermissionError constructs a new PermissionError and sets the location.
+func NewPermissionError(message string) error {
+	err := &PermissionError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsPermissionError returns true if err is a NoMatchError.
+func IsPermissionError(err error) bool {
+	_, ok := errors.Cause(err).(*PermissionError)
+	return ok
+}
+
+// CannotCompleteError is returned when the requested action is unable to
+// complete for some server side reason.
+type CannotCompleteError struct {
+	errors.Err
+}
+
+// NewCannotCompleteError constructs a new CannotCompleteError and sets the location.
+func NewCannotCompleteError(message string) error {
+	err := &CannotCompleteError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsCannotCompleteError returns true if err is a NoMatchError.
+func IsCannotCompleteError(err error) bool {
+	_, ok := errors.Cause(err).(*CannotCompleteError)
+	return ok
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/fabric.go b/switchq/vendor/github.com/juju/gomaasapi/fabric.go
new file mode 100644
index 0000000..e38a61a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/fabric.go
@@ -0,0 +1,128 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type fabric struct {
+	// Add the controller in when we need to do things with the fabric.
+	// controller Controller
+
+	resourceURI string
+
+	id        int
+	name      string
+	classType string
+
+	vlans []*vlan
+}
+
+// ID implements Fabric.
+func (f *fabric) ID() int {
+	return f.id
+}
+
+// Name implements Fabric.
+func (f *fabric) Name() string {
+	return f.name
+}
+
+// ClassType implements Fabric.
+func (f *fabric) ClassType() string {
+	return f.classType
+}
+
+// VLANs implements Fabric.
+func (f *fabric) VLANs() []VLAN {
+	var result []VLAN
+	for _, v := range f.vlans {
+		result = append(result, v)
+	}
+	return result
+}
+
+func readFabrics(controllerVersion version.Number, source interface{}) ([]*fabric, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range fabricDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no fabric read func for version %s", controllerVersion)
+	}
+	readFunc := fabricDeserializationFuncs[deserialisationVersion]
+	return readFabricList(valid, readFunc)
+}
+
+// readFabricList expects the values of the sourceList to be string maps.
+func readFabricList(sourceList []interface{}, readFunc fabricDeserializationFunc) ([]*fabric, error) {
+	result := make([]*fabric, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for fabric %d, %T", i, value)
+		}
+		fabric, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "fabric %d", i)
+		}
+		result = append(result, fabric)
+	}
+	return result, nil
+}
+
+type fabricDeserializationFunc func(map[string]interface{}) (*fabric, error)
+
+var fabricDeserializationFuncs = map[version.Number]fabricDeserializationFunc{
+	twoDotOh: fabric_2_0,
+}
+
+func fabric_2_0(source map[string]interface{}) (*fabric, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"class_type":   schema.OneOf(schema.Nil(""), schema.String()),
+		"vlans":        schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlans, err := readVLANList(valid["vlans"].([]interface{}), vlan_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the class_type is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	classType, _ := valid["class_type"].(string)
+
+	result := &fabric{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		classType:   classType,
+		vlans:       vlans,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/file.go b/switchq/vendor/github.com/juju/gomaasapi/file.go
new file mode 100644
index 0000000..63fb854
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/file.go
@@ -0,0 +1,181 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/base64"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type file struct {
+	controller *controller
+
+	resourceURI  string
+	filename     string
+	anonymousURI *url.URL
+	content      string
+}
+
+// Filename implements File.
+func (f *file) Filename() string {
+	return f.filename
+}
+
+// AnonymousURL implements File.
+func (f *file) AnonymousURL() string {
+	url := f.controller.client.GetURL(f.anonymousURI)
+	return url.String()
+}
+
+// Delete implements File.
+func (f *file) Delete() error {
+	err := f.controller.delete(f.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// ReadAll implements File.
+func (f *file) ReadAll() ([]byte, error) {
+	if f.content == "" {
+		return f.readFromServer()
+	}
+	bytes, err := base64.StdEncoding.DecodeString(f.content)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func (f *file) readFromServer() ([]byte, error) {
+	// If the content is available, it is base64 encoded, so
+	args := make(url.Values)
+	args.Add("filename", f.filename)
+	bytes, err := f.controller._getRaw("files", "get", args)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func readFiles(controllerVersion version.Number, source interface{}) ([]*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readFileList(valid, readFunc)
+}
+
+func readFile(controllerVersion version.Number, source interface{}) (*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func getFileDeserializationFunc(controllerVersion version.Number) (fileDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range fileDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no file read func for version %s", controllerVersion)
+	}
+	return fileDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readFileList expects the values of the sourceList to be string maps.
+func readFileList(sourceList []interface{}, readFunc fileDeserializationFunc) ([]*file, error) {
+	result := make([]*file, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for file %d, %T", i, value)
+		}
+		file, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "file %d", i)
+		}
+		result = append(result, file)
+	}
+	return result, nil
+}
+
+type fileDeserializationFunc func(map[string]interface{}) (*file, error)
+
+var fileDeserializationFuncs = map[version.Number]fileDeserializationFunc{
+	twoDotOh: file_2_0,
+}
+
+func file_2_0(source map[string]interface{}) (*file, error) {
+	fields := schema.Fields{
+		"resource_uri":      schema.String(),
+		"filename":          schema.String(),
+		"anon_resource_uri": schema.String(),
+		"content":           schema.String(),
+	}
+	defaults := schema.Defaults{
+		"content": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	anonURI, err := url.ParseRequestURI(valid["anon_resource_uri"].(string))
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+
+	result := &file{
+		resourceURI:  valid["resource_uri"].(string),
+		filename:     valid["filename"].(string),
+		anonymousURI: anonURI,
+		content:      valid["content"].(string),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/filesystem.go b/switchq/vendor/github.com/juju/gomaasapi/filesystem.go
new file mode 100644
index 0000000..4514e52
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/filesystem.go
@@ -0,0 +1,69 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/schema"
+
+type filesystem struct {
+	fstype     string
+	mountPoint string
+	label      string
+	uuid       string
+	// no idea what the mount_options are as a value type, so ignoring for now.
+}
+
+// Type implements FileSystem.
+func (f *filesystem) Type() string {
+	return f.fstype
+}
+
+// MountPoint implements FileSystem.
+func (f *filesystem) MountPoint() string {
+	return f.mountPoint
+}
+
+// Label implements FileSystem.
+func (f *filesystem) Label() string {
+	return f.label
+}
+
+// UUID implements FileSystem.
+func (f *filesystem) UUID() string {
+	return f.uuid
+}
+
+// There is no need for controller based parsing of filesystems until we need it.
+// Currently the filesystem reading is only called by the Partition parsing.
+
+func filesystem2_0(source map[string]interface{}) (*filesystem, error) {
+	fields := schema.Fields{
+		"fstype":      schema.String(),
+		"mount_point": schema.OneOf(schema.Nil(""), schema.String()),
+		"label":       schema.OneOf(schema.Nil(""), schema.String()),
+		"uuid":        schema.String(),
+		// TODO: mount_options when we know the type (note it can be
+		// nil).
+	}
+	defaults := schema.Defaults{
+		"mount_point": "",
+		"label":       "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "filesystem 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	mount_point, _ := valid["mount_point"].(string)
+	label, _ := valid["label"].(string)
+	result := &filesystem{
+		fstype:     valid["fstype"].(string),
+		mountPoint: mount_point,
+		label:      label,
+		uuid:       valid["uuid"].(string),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/gomaasapi.go b/switchq/vendor/github.com/juju/gomaasapi/gomaasapi.go
new file mode 100644
index 0000000..f457e29
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/gomaasapi.go
@@ -0,0 +1,4 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
diff --git a/switchq/vendor/github.com/juju/gomaasapi/interface.go b/switchq/vendor/github.com/juju/gomaasapi/interface.go
new file mode 100644
index 0000000..f30a9a8
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/interface.go
@@ -0,0 +1,440 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+// Can't use interface as a type, so add an underscore. Yay.
+type interface_ struct {
+	controller *controller
+
+	resourceURI string
+
+	id      int
+	name    string
+	type_   string
+	enabled bool
+	tags    []string
+
+	vlan  *vlan
+	links []*link
+
+	macAddress   string
+	effectiveMTU int
+
+	parents  []string
+	children []string
+}
+
+func (i *interface_) updateFrom(other *interface_) {
+	i.resourceURI = other.resourceURI
+	i.id = other.id
+	i.name = other.name
+	i.type_ = other.type_
+	i.enabled = other.enabled
+	i.tags = other.tags
+	i.vlan = other.vlan
+	i.links = other.links
+	i.macAddress = other.macAddress
+	i.effectiveMTU = other.effectiveMTU
+	i.parents = other.parents
+	i.children = other.children
+}
+
+// ID implements Interface.
+func (i *interface_) ID() int {
+	return i.id
+}
+
+// Name implements Interface.
+func (i *interface_) Name() string {
+	return i.name
+}
+
+// Parents implements Interface.
+func (i *interface_) Parents() []string {
+	return i.parents
+}
+
+// Children implements Interface.
+func (i *interface_) Children() []string {
+	return i.children
+}
+
+// Type implements Interface.
+func (i *interface_) Type() string {
+	return i.type_
+}
+
+// Enabled implements Interface.
+func (i *interface_) Enabled() bool {
+	return i.enabled
+}
+
+// Tags implements Interface.
+func (i *interface_) Tags() []string {
+	return i.tags
+}
+
+// VLAN implements Interface.
+func (i *interface_) VLAN() VLAN {
+	if i.vlan == nil {
+		return nil
+	}
+	return i.vlan
+}
+
+// Links implements Interface.
+func (i *interface_) Links() []Link {
+	result := make([]Link, len(i.links))
+	for i, link := range i.links {
+		result[i] = link
+	}
+	return result
+}
+
+// MACAddress implements Interface.
+func (i *interface_) MACAddress() string {
+	return i.macAddress
+}
+
+// EffectiveMTU implements Interface.
+func (i *interface_) EffectiveMTU() int {
+	return i.effectiveMTU
+}
+
+// UpdateInterfaceArgs is an argument struct for calling Interface.Update.
+type UpdateInterfaceArgs struct {
+	Name       string
+	MACAddress string
+	VLAN       VLAN
+}
+
+func (a *UpdateInterfaceArgs) vlanID() int {
+	if a.VLAN == nil {
+		return 0
+	}
+	return a.VLAN.ID()
+}
+
+// Update implements Interface.
+func (i *interface_) Update(args UpdateInterfaceArgs) error {
+	var empty UpdateInterfaceArgs
+	if args == empty {
+		return nil
+	}
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Name)
+	params.MaybeAdd("mac_address", args.MACAddress)
+	params.MaybeAddInt("vlan", args.vlanID())
+	source, err := i.controller.put(i.resourceURI, params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+// Delete implements Interface.
+func (i *interface_) Delete() error {
+	err := i.controller.delete(i.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// InterfaceLinkMode is the type of the various link mode constants used for
+// LinkSubnetArgs.
+type InterfaceLinkMode string
+
+const (
+	// LinkModeDHCP - Bring the interface up with DHCP on the given subnet. Only
+	// one subnet can be set to DHCP. If the subnet is managed this interface
+	// will pull from the dynamic IP range.
+	LinkModeDHCP InterfaceLinkMode = "DHCP"
+
+	// LinkModeStatic - Bring the interface up with a STATIC IP address on the
+	// given subnet. Any number of STATIC links can exist on an interface.
+	LinkModeStatic InterfaceLinkMode = "STATIC"
+
+	// LinkModeLinkUp - Bring the interface up only on the given subnet. No IP
+	// address will be assigned to this interface. The interface cannot have any
+	// current DHCP or STATIC links.
+	LinkModeLinkUp InterfaceLinkMode = "LINK_UP"
+)
+
+// LinkSubnetArgs is an argument struct for passing parameters to
+// the Interface.LinkSubnet method.
+type LinkSubnetArgs struct {
+	// Mode is used to describe how the address is provided for the Link.
+	// Required field.
+	Mode InterfaceLinkMode
+	// Subnet is the subnet to link to. Required field.
+	Subnet Subnet
+	// IPAddress is only valid when the Mode is set to LinkModeStatic. If
+	// not specified with a Mode of LinkModeStatic, an IP address from the
+	// subnet will be auto selected.
+	IPAddress string
+	// DefaultGateway will set the gateway IP address for the Subnet as the
+	// default gateway for the machine or device the interface belongs to.
+	// Option can only be used with mode LinkModeStatic.
+	DefaultGateway bool
+}
+
+// Validate ensures that the Mode and Subnet are set, and that the other options
+// are consistent with the Mode.
+func (a *LinkSubnetArgs) Validate() error {
+	switch a.Mode {
+	case LinkModeDHCP, LinkModeLinkUp, LinkModeStatic:
+	case "":
+		return errors.NotValidf("missing Mode")
+	default:
+		return errors.NotValidf("unknown Mode value (%q)", a.Mode)
+	}
+	if a.Subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	if a.IPAddress != "" && a.Mode != LinkModeStatic {
+		return errors.NotValidf("setting IP Address when Mode is not LinkModeStatic")
+	}
+	if a.DefaultGateway && a.Mode != LinkModeStatic {
+		return errors.NotValidf("specifying DefaultGateway for Mode %q", a.Mode)
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) LinkSubnet(args LinkSubnetArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("mode", string(args.Mode))
+	params.Values.Add("subnet", fmt.Sprint(args.Subnet.ID()))
+	params.MaybeAdd("ip_address", args.IPAddress)
+	params.MaybeAddBool("default_gateway", args.DefaultGateway)
+	source, err := i.controller.post(i.resourceURI, "link_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+func (i *interface_) linkForSubnet(subnet Subnet) *link {
+	for _, link := range i.links {
+		if s := link.Subnet(); s != nil && s.ID() == subnet.ID() {
+			return link
+		}
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) UnlinkSubnet(subnet Subnet) error {
+	if subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	link := i.linkForSubnet(subnet)
+	if link == nil {
+		return errors.NotValidf("unlinked Subnet")
+	}
+	params := NewURLParams()
+	params.Values.Add("id", fmt.Sprint(link.ID()))
+	source, err := i.controller.post(i.resourceURI, "unlink_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+
+	return nil
+}
+
+func readInterface(controllerVersion version.Number, source interface{}) (*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readInterfaces(controllerVersion version.Number, source interface{}) ([]*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readInterfaceList(valid, readFunc)
+}
+
+func getInterfaceDeserializationFunc(controllerVersion version.Number) (interfaceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range interfaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no interface read func for version %s", controllerVersion)
+	}
+	return interfaceDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readInterfaceList(sourceList []interface{}, readFunc interfaceDeserializationFunc) ([]*interface_, error) {
+	result := make([]*interface_, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for interface %d, %T", i, value)
+		}
+		read, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "interface %d", i)
+		}
+		result = append(result, read)
+	}
+	return result, nil
+}
+
+type interfaceDeserializationFunc func(map[string]interface{}) (*interface_, error)
+
+var interfaceDeserializationFuncs = map[version.Number]interfaceDeserializationFunc{
+	twoDotOh: interface_2_0,
+}
+
+func interface_2_0(source map[string]interface{}) (*interface_, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":      schema.ForceInt(),
+		"name":    schema.String(),
+		"type":    schema.String(),
+		"enabled": schema.Bool(),
+		"tags":    schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+
+		"vlan":  schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"links": schema.List(schema.StringMap(schema.Any())),
+
+		"mac_address":   schema.OneOf(schema.Nil(""), schema.String()),
+		"effective_mtu": schema.ForceInt(),
+
+		"parents":  schema.List(schema.String()),
+		"children": schema.List(schema.String()),
+	}
+	defaults := schema.Defaults{
+		"mac_address": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var vlan *vlan
+	// If it's not an attribute map then we know it's nil from the schema check.
+	if vlanMap, ok := valid["vlan"].(map[string]interface{}); ok {
+		vlan, err = vlan_2_0(vlanMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	links, err := readLinkList(valid["links"].([]interface{}), link_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	macAddress, _ := valid["mac_address"].(string)
+	result := &interface_{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		type_:   valid["type"].(string),
+		enabled: valid["enabled"].(bool),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		vlan:  vlan,
+		links: links,
+
+		macAddress:   macAddress,
+		effectiveMTU: valid["effective_mtu"].(int),
+
+		parents:  convertToStringSlice(valid["parents"]),
+		children: convertToStringSlice(valid["children"]),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/interfaces.go b/switchq/vendor/github.com/juju/gomaasapi/interfaces.go
new file mode 100644
index 0000000..6b80115
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/interfaces.go
@@ -0,0 +1,362 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/utils/set"
+
+const (
+	// Capability constants.
+	NetworksManagement      = "networks-management"
+	StaticIPAddresses       = "static-ipaddresses"
+	IPv6DeploymentUbuntu    = "ipv6-deployment-ubuntu"
+	DevicesManagement       = "devices-management"
+	StorageDeploymentUbuntu = "storage-deployment-ubuntu"
+	NetworkDeploymentUbuntu = "network-deployment-ubuntu"
+)
+
+// Controller represents an API connection to a MAAS Controller. Since the API
+// is restful, there is no long held connection to the API server, but instead
+// HTTP calls are made and JSON response structures parsed.
+type Controller interface {
+
+	// Capabilities returns a set of capabilities as defined by the string
+	// constants.
+	Capabilities() set.Strings
+
+	BootResources() ([]BootResource, error)
+
+	// Fabrics returns the list of Fabrics defined in the MAAS controller.
+	Fabrics() ([]Fabric, error)
+
+	// Spaces returns the list of Spaces defined in the MAAS controller.
+	Spaces() ([]Space, error)
+
+	// Zones lists all the zones known to the MAAS controller.
+	Zones() ([]Zone, error)
+
+	// Machines returns a list of machines that match the params.
+	Machines(MachinesArgs) ([]Machine, error)
+
+	// AllocateMachine will attempt to allocate a machine to the user.
+	// If successful, the allocated machine is returned.
+	AllocateMachine(AllocateMachineArgs) (Machine, ConstraintMatches, error)
+
+	// ReleaseMachines will stop the specified machines, and release them
+	// from the user making them available to be allocated again.
+	ReleaseMachines(ReleaseMachinesArgs) error
+
+	// Devices returns a list of devices that match the params.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// CreateDevice creates and returns a new Device.
+	CreateDevice(CreateDeviceArgs) (Device, error)
+
+	// Files returns all the files that match the specified prefix.
+	Files(prefix string) ([]File, error)
+
+	// Return a single file by its filename.
+	GetFile(filename string) (File, error)
+
+	// AddFile adds or replaces the content of the specified filename.
+	// If or when the MAAS api is able to return metadata about a single
+	// file without sending the content of the file, we can return a File
+	// instance here too.
+	AddFile(AddFileArgs) error
+}
+
+// File represents a file stored in the MAAS controller.
+type File interface {
+	// Filename is the name of the file. No path, just the filename.
+	Filename() string
+
+	// AnonymousURL is a URL that can be used to retrieve the conents of the
+	// file without credentials.
+	AnonymousURL() string
+
+	// Delete removes the file from the MAAS controller.
+	Delete() error
+
+	// ReadAll returns the content of the file.
+	ReadAll() ([]byte, error)
+}
+
+// Fabric represents a set of interconnected VLANs that are capable of mutual
+// communication. A fabric can be thought of as a logical grouping in which
+// VLANs can be considered unique.
+//
+// For example, a distributed network may have a fabric in London containing
+// VLAN 100, while a separate fabric in San Francisco may contain a VLAN 100,
+// whose attached subnets are completely different and unrelated.
+type Fabric interface {
+	ID() int
+	Name() string
+	ClassType() string
+
+	VLANs() []VLAN
+}
+
+// VLAN represents an instance of a Virtual LAN. VLANs are a common way to
+// create logically separate networks using the same physical infrastructure.
+//
+// Managed switches can assign VLANs to each port in either a “tagged” or an
+// “untagged” manner. A VLAN is said to be “untagged” on a particular port when
+// it is the default VLAN for that port, and requires no special configuration
+// in order to access.
+//
+// “Tagged” VLANs (traditionally used by network administrators in order to
+// aggregate multiple networks over inter-switch “trunk” lines) can also be used
+// with nodes in MAAS. That is, if a switch port is configured such that
+// “tagged” VLAN frames can be sent and received by a MAAS node, that MAAS node
+// can be configured to automatically bring up VLAN interfaces, so that the
+// deployed node can make use of them.
+//
+// A “Default VLAN” is created for every Fabric, to which every new VLAN-aware
+// object in the fabric will be associated to by default (unless otherwise
+// specified).
+type VLAN interface {
+	ID() int
+	Name() string
+	Fabric() string
+
+	// VID is the VLAN ID. eth0.10 -> VID = 10.
+	VID() int
+	// MTU (maximum transmission unit) is the largest size packet or frame,
+	// specified in octets (eight-bit bytes), that can be sent.
+	MTU() int
+	DHCP() bool
+
+	PrimaryRack() string
+	SecondaryRack() string
+}
+
+// Zone represents a physical zone that a Machine is in. The meaning of a
+// physical zone is up to you: it could identify e.g. a server rack, a network,
+// or a data centre. Users can then allocate nodes from specific physical zones,
+// to suit their redundancy or performance requirements.
+type Zone interface {
+	Name() string
+	Description() string
+}
+
+// BootResource is the bomb... find something to say here.
+type BootResource interface {
+	ID() int
+	Name() string
+	Type() string
+	Architecture() string
+	SubArchitectures() set.Strings
+	KernelFlavor() string
+}
+
+// Device represents some form of device in MAAS.
+type Device interface {
+	// TODO: add domain
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	IPAddresses() []string
+	Zone() Zone
+
+	// Parent returns the SystemID of the Parent. Most often this will be a
+	// Machine.
+	Parent() string
+
+	// Owner is the username of the user that created the device.
+	Owner() string
+
+	// InterfaceSet returns all the interfaces for the Device.
+	InterfaceSet() []Interface
+
+	// CreateInterface will create a physical interface for this machine.
+	CreateInterface(CreateInterfaceArgs) (Interface, error)
+
+	// Delete will remove this Device.
+	Delete() error
+}
+
+// Machine represents a physical machine.
+type Machine interface {
+	OwnerDataHolder
+
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	Tags() []string
+
+	OperatingSystem() string
+	DistroSeries() string
+	Architecture() string
+	Memory() int
+	CPUCount() int
+
+	IPAddresses() []string
+	PowerState() string
+
+	// Devices returns a list of devices that match the params and have
+	// this Machine as the parent.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// Consider bundling the status values into a single struct.
+	// but need to check for consistent representation if exposed on other
+	// entities.
+
+	StatusName() string
+	StatusMessage() string
+
+	// BootInterface returns the interface that was used to boot the Machine.
+	BootInterface() Interface
+	// InterfaceSet returns all the interfaces for the Machine.
+	InterfaceSet() []Interface
+	// Interface returns the interface for the machine that matches the id
+	// specified. If there is no match, nil is returned.
+	Interface(id int) Interface
+
+	// PhysicalBlockDevices returns all the physical block devices on the machine.
+	PhysicalBlockDevices() []BlockDevice
+	// PhysicalBlockDevice returns the physical block device for the machine
+	// that matches the id specified. If there is no match, nil is returned.
+	PhysicalBlockDevice(id int) BlockDevice
+
+	// BlockDevices returns all the physical and virtual block devices on the machine.
+	BlockDevices() []BlockDevice
+
+	Zone() Zone
+
+	// Start the machine and install the operating system specified in the args.
+	Start(StartArgs) error
+
+	// CreateDevice creates a new Device with this Machine as the parent.
+	// The device will have one interface that is linked to the specified subnet.
+	CreateDevice(CreateMachineDeviceArgs) (Device, error)
+}
+
+// Space is a name for a collection of Subnets.
+type Space interface {
+	ID() int
+	Name() string
+	Subnets() []Subnet
+}
+
+// Subnet refers to an IP range on a VLAN.
+type Subnet interface {
+	ID() int
+	Name() string
+	Space() string
+	VLAN() VLAN
+
+	Gateway() string
+	CIDR() string
+	// dns_mode
+
+	// DNSServers is a list of ip addresses of the DNS servers for the subnet.
+	// This list may be empty.
+	DNSServers() []string
+}
+
+// Interface represents a physical or virtual network interface on a Machine.
+type Interface interface {
+	ID() int
+	Name() string
+	// The parents of an interface are the names of interfaces that must exist
+	// for this interface  to exist. For example a parent of "eth0.100" would be
+	// "eth0". Parents may be empty.
+	Parents() []string
+	// The children interfaces are the names of those that are dependent on this
+	// interface existing. Children may be empty.
+	Children() []string
+	Type() string
+	Enabled() bool
+	Tags() []string
+
+	VLAN() VLAN
+	Links() []Link
+
+	MACAddress() string
+	EffectiveMTU() int
+
+	// Params is a JSON field, and defaults to an empty string, but is almost
+	// always a JSON object in practice. Gleefully ignoring it until we need it.
+
+	// Update the name, mac address or VLAN.
+	Update(UpdateInterfaceArgs) error
+
+	// Delete this interface.
+	Delete() error
+
+	// LinkSubnet will attempt to make this interface available on the specified
+	// Subnet.
+	LinkSubnet(LinkSubnetArgs) error
+
+	// UnlinkSubnet will remove the Link to the subnet, and release the IP
+	// address associated if there is one.
+	UnlinkSubnet(Subnet) error
+}
+
+// Link represents a network link between an Interface and a Subnet.
+type Link interface {
+	ID() int
+	Mode() string
+	Subnet() Subnet
+	// IPAddress returns the address if one has been assigned.
+	// If unavailble, the address will be empty.
+	IPAddress() string
+}
+
+// FileSystem represents a formatted filesystem mounted at a location.
+type FileSystem interface {
+	// Type is the format type, e.g. "ext4".
+	Type() string
+
+	MountPoint() string
+	Label() string
+	UUID() string
+}
+
+// Partition represents a partition of a block device. It may be mounted
+// as a filesystem.
+type Partition interface {
+	ID() int
+	Path() string
+	// FileSystem may be nil if not mounted.
+	FileSystem() FileSystem
+	UUID() string
+	// UsedFor is a human readable string.
+	UsedFor() string
+	// Size is the number of bytes in the partition.
+	Size() uint64
+}
+
+// BlockDevice represents an entire block device on the machine.
+type BlockDevice interface {
+	ID() int
+	Name() string
+	Model() string
+	Path() string
+	UsedFor() string
+	Tags() []string
+
+	BlockSize() uint64
+	UsedSize() uint64
+	Size() uint64
+
+	Partitions() []Partition
+
+	// There are some other attributes for block devices, but we can
+	// expose them on an as needed basis.
+}
+
+// OwnerDataHolder represents any MAAS object that can store key/value
+// data.
+type OwnerDataHolder interface {
+	// OwnerData returns a copy of the key/value data stored for this
+	// object.
+	OwnerData() map[string]string
+
+	// SetOwnerData updates the key/value data stored for this object
+	// with the values passed in. Existing keys that aren't specified
+	// in the map passed in will be left in place; to clear a key set
+	// its value to "". All owner data is cleared when the object is
+	// released.
+	SetOwnerData(map[string]string) error
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/jsonobject.go b/switchq/vendor/github.com/juju/gomaasapi/jsonobject.go
new file mode 100644
index 0000000..cdd3dc1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/jsonobject.go
@@ -0,0 +1,215 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+)
+
+// JSONObject is a wrapper around a JSON structure which provides
+// methods to extract data from that structure.
+// A JSONObject provides a simple structure consisting of the data types
+// defined in JSON: string, number, object, list, and bool.  To get the
+// value you want out of a JSONObject, you must know (or figure out) which
+// kind of value you have, and then call the appropriate Get*() method to
+// get at it.  Reading an item as the wrong type will return an error.
+// For instance, if your JSONObject consists of a number, call GetFloat64()
+// to get the value as a float64.  If it's a list, call GetArray() to get
+// a slice of JSONObjects.  To read any given item from the slice, you'll
+// need to "Get" that as the right type as well.
+// There is one exception: a MAASObject is really a special kind of map,
+// so you can read it as either.
+// Reading a null item is also an error.  So before you try obj.Get*(),
+// first check obj.IsNil().
+type JSONObject struct {
+	// Parsed value.  May actually be any of the types a JSONObject can
+	// wrap, except raw bytes.  If the object can only be interpreted
+	// as raw bytes, this will be nil.
+	value interface{}
+	// Raw bytes, if this object was parsed directly from an API response.
+	// Is nil for sub-objects found within other objects.  An object that
+	// was parsed directly from a response can be both raw bytes and some
+	// other value at the same time.
+	// For example, "[]" looks like a JSON list, so you can read it as an
+	// array.  But it may also be the raw contents of a file that just
+	// happens to look like JSON, and so you can read it as raw bytes as
+	// well.
+	bytes []byte
+	// Client for further communication with the API.
+	client Client
+	// Is this a JSON null?
+	isNull bool
+}
+
+// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact
+// that it contains a key "resource_uri".  (A regular map might contain the
+// same key through sheer coincide, but never mind: you can still treat it
+// as a jsonMap and never notice the difference.)
+const resourceURI = "resource_uri"
+
+// maasify turns a completely untyped json.Unmarshal result into a JSONObject
+// (with the appropriate implementation of course).  This function is
+// recursive.  Maps and arrays are deep-copied, with each individual value
+// being converted to a JSONObject type.
+func maasify(client Client, value interface{}) JSONObject {
+	if value == nil {
+		return JSONObject{isNull: true}
+	}
+	switch value.(type) {
+	case string, float64, bool:
+		return JSONObject{value: value}
+	case map[string]interface{}:
+		original := value.(map[string]interface{})
+		result := make(map[string]JSONObject, len(original))
+		for key, value := range original {
+			result[key] = maasify(client, value)
+		}
+		return JSONObject{value: result, client: client}
+	case []interface{}:
+		original := value.([]interface{})
+		result := make([]JSONObject, len(original))
+		for index, value := range original {
+			result[index] = maasify(client, value)
+		}
+		return JSONObject{value: result}
+	}
+	msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value)
+	panic(msg)
+}
+
+// Parse a JSON blob into a JSONObject.
+func Parse(client Client, input []byte) (JSONObject, error) {
+	var obj JSONObject
+	if input == nil {
+		panic(errors.New("Parse() called with nil input"))
+	}
+	var parsed interface{}
+	err := json.Unmarshal(input, &parsed)
+	if err == nil {
+		obj = maasify(client, parsed)
+		obj.bytes = input
+	} else {
+		switch err.(type) {
+		case *json.InvalidUTF8Error:
+		case *json.SyntaxError:
+			// This isn't JSON.  Treat it as raw binary data.
+		default:
+			return obj, err
+		}
+		obj = JSONObject{value: nil, client: client, bytes: input}
+	}
+	return obj, nil
+}
+
+// JSONObjectFromStruct takes a struct and converts it to a JSONObject
+func JSONObjectFromStruct(client Client, input interface{}) (JSONObject, error) {
+	j, err := json.MarshalIndent(input, "", "  ")
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(client, j)
+}
+
+// Return error value for failed type conversion.
+func failConversion(wantedType string, obj JSONObject) error {
+	msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value)
+	return errors.New(msg)
+}
+
+// MarshalJSON tells the standard json package how to serialize a JSONObject
+// back to JSON.
+func (obj JSONObject) MarshalJSON() ([]byte, error) {
+	if obj.IsNil() {
+		return json.Marshal(nil)
+	}
+	return json.MarshalIndent(obj.value, "", "  ")
+}
+
+// With MarshalJSON, JSONObject implements json.Marshaler.
+var _ json.Marshaler = (*JSONObject)(nil)
+
+// IsNil tells you whether a JSONObject is a JSON "null."
+// There is one irregularity.  If the original JSON blob was actually raw
+// data, not JSON, then its IsNil will return false because the object
+// contains the binary data as a non-nil value.  But, if the original JSON
+// blob consisted of a null, then IsNil returns true even though you can
+// still retrieve binary data from it.
+func (obj JSONObject) IsNil() bool {
+	if obj.value != nil {
+		return false
+	}
+	if obj.bytes == nil {
+		return true
+	}
+	// This may be a JSON null.  We can't expect every JSON null to look
+	// the same; there may be leading or trailing space.
+	return obj.isNull
+}
+
+// GetString retrieves the object's value as a string.  If the value wasn't
+// a JSON string, that's an error.
+func (obj JSONObject) GetString() (value string, err error) {
+	value, ok := obj.value.(string)
+	if !ok {
+		err = failConversion("string", obj)
+	}
+	return
+}
+
+// GetFloat64 retrieves the object's value as a float64.  If the value wasn't
+// a JSON number, that's an error.
+func (obj JSONObject) GetFloat64() (value float64, err error) {
+	value, ok := obj.value.(float64)
+	if !ok {
+		err = failConversion("float64", obj)
+	}
+	return
+}
+
+// GetMap retrieves the object's value as a map.  If the value wasn't a JSON
+// object, that's an error.
+func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) {
+	value, ok := obj.value.(map[string]JSONObject)
+	if !ok {
+		err = failConversion("map", obj)
+	}
+	return
+}
+
+// GetArray retrieves the object's value as an array.  If the value wasn't a
+// JSON list, that's an error.
+func (obj JSONObject) GetArray() (value []JSONObject, err error) {
+	value, ok := obj.value.([]JSONObject)
+	if !ok {
+		err = failConversion("array", obj)
+	}
+	return
+}
+
+// GetBool retrieves the object's value as a bool.  If the value wasn't a JSON
+// bool, that's an error.
+func (obj JSONObject) GetBool() (value bool, err error) {
+	value, ok := obj.value.(bool)
+	if !ok {
+		err = failConversion("bool", obj)
+	}
+	return
+}
+
+// GetBytes retrieves the object's value as raw bytes.  A JSONObject that was
+// parsed from the original input (as opposed to one that's embedded in
+// another JSONObject) can contain both the raw bytes and the parsed JSON
+// value, but either can be the case without the other.
+// If this object wasn't parsed directly from the original input, that's an
+// error.
+// If the object was parsed from an original input that just said "null", then
+// IsNil will return true but the raw bytes are still available from GetBytes.
+func (obj JSONObject) GetBytes() ([]byte, error) {
+	if obj.bytes == nil {
+		return nil, failConversion("bytes", obj)
+	}
+	return obj.bytes, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/link.go b/switchq/vendor/github.com/juju/gomaasapi/link.go
new file mode 100644
index 0000000..9e930e1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/link.go
@@ -0,0 +1,124 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type link struct {
+	id        int
+	mode      string
+	subnet    *subnet
+	ipAddress string
+}
+
+// NOTE: not using lowercase L as the receiver as it is a horrible idea.
+// Instead using 'k'.
+
+// ID implements Link.
+func (k *link) ID() int {
+	return k.id
+}
+
+// Mode implements Link.
+func (k *link) Mode() string {
+	return k.mode
+}
+
+// Subnet implements Link.
+func (k *link) Subnet() Subnet {
+	if k.subnet == nil {
+		return nil
+	}
+	return k.subnet
+}
+
+// IPAddress implements Link.
+func (k *link) IPAddress() string {
+	return k.ipAddress
+}
+
+func readLinks(controllerVersion version.Number, source interface{}) ([]*link, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range linkDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no link read func for version %s", controllerVersion)
+	}
+	readFunc := linkDeserializationFuncs[deserialisationVersion]
+	return readLinkList(valid, readFunc)
+}
+
+// readLinkList expects the values of the sourceList to be string maps.
+func readLinkList(sourceList []interface{}, readFunc linkDeserializationFunc) ([]*link, error) {
+	result := make([]*link, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for link %d, %T", i, value)
+		}
+		link, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "link %d", i)
+		}
+		result = append(result, link)
+	}
+	return result, nil
+}
+
+type linkDeserializationFunc func(map[string]interface{}) (*link, error)
+
+var linkDeserializationFuncs = map[version.Number]linkDeserializationFunc{
+	twoDotOh: link_2_0,
+}
+
+func link_2_0(source map[string]interface{}) (*link, error) {
+	fields := schema.Fields{
+		"id":         schema.ForceInt(),
+		"mode":       schema.String(),
+		"subnet":     schema.StringMap(schema.Any()),
+		"ip_address": schema.String(),
+	}
+	defaults := schema.Defaults{
+		"ip_address": "",
+		"subnet":     schema.Omit,
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var subnet *subnet
+	if value, ok := valid["subnet"]; ok {
+		subnet, err = subnet_2_0(value.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	result := &link{
+		id:        valid["id"].(int),
+		mode:      valid["mode"].(string),
+		subnet:    subnet,
+		ipAddress: valid["ip_address"].(string),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/maas.go b/switchq/vendor/github.com/juju/gomaasapi/maas.go
new file mode 100644
index 0000000..cd6ce29
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/maas.go
@@ -0,0 +1,11 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+// NewMAAS returns an interface to the MAAS API as a *MAASObject.
+func NewMAAS(client Client) *MAASObject {
+	attrs := map[string]interface{}{resourceURI: client.APIURL.String()}
+	obj := newJSONMAASObject(attrs, client)
+	return &obj
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/maasobject.go b/switchq/vendor/github.com/juju/gomaasapi/maasobject.go
new file mode 100644
index 0000000..3978252
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/maasobject.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+)
+
+// MAASObject represents a MAAS object as returned by the MAAS API, such as a
+// Node or a Tag.
+// You can extract a MAASObject out of a JSONObject using
+// JSONObject.GetMAASObject.  A MAAS API call will usually return either a
+// MAASObject or a list of MAASObjects.  The list itself would be wrapped in
+// a JSONObject, so if an API call returns a list of objects "l," you first
+// obtain the array using l.GetArray().  Then, for each item "i" in the array,
+// obtain the matching MAASObject using i.GetMAASObject().
+type MAASObject struct {
+	values map[string]JSONObject
+	client Client
+	uri    *url.URL
+}
+
+// newJSONMAASObject creates a new MAAS object.  It will panic if the given map
+// does not contain a valid URL for the 'resource_uri' key.
+func newJSONMAASObject(jmap map[string]interface{}, client Client) MAASObject {
+	obj, err := maasify(client, jmap).GetMAASObject()
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// MarshalJSON tells the standard json package how to serialize a MAASObject.
+func (obj MAASObject) MarshalJSON() ([]byte, error) {
+	return json.MarshalIndent(obj.GetMap(), "", "  ")
+}
+
+// With MarshalJSON, MAASObject implements json.Marshaler.
+var _ json.Marshaler = (*MAASObject)(nil)
+
+func marshalNode(node MAASObject) string {
+	res, _ := json.MarshalIndent(node, "", "  ")
+	return string(res)
+
+}
+
+var noResourceURI = errors.New("not a MAAS object: no 'resource_uri' key")
+
+// extractURI obtains the "resource_uri" string from a JSONObject map.
+func extractURI(attrs map[string]JSONObject) (*url.URL, error) {
+	uriEntry, ok := attrs[resourceURI]
+	if !ok {
+		return nil, noResourceURI
+	}
+	uri, err := uriEntry.GetString()
+	if err != nil {
+		return nil, fmt.Errorf("invalid resource_uri: %v", uri)
+	}
+	resourceURL, err := url.Parse(uri)
+	if err != nil {
+		return nil, fmt.Errorf("resource_uri does not contain a valid URL: %v", uri)
+	}
+	return resourceURL, nil
+}
+
+// JSONObject getter for a MAAS object.  From a decoding perspective, a
+// MAASObject is just like a map except it contains a key "resource_uri", and
+// it keeps track of the Client you got it from so that you can invoke API
+// methods directly on their MAAS objects.
+func (obj JSONObject) GetMAASObject() (MAASObject, error) {
+	attrs, err := obj.GetMap()
+	if err != nil {
+		return MAASObject{}, err
+	}
+	uri, err := extractURI(attrs)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return MAASObject{values: attrs, client: obj.client, uri: uri}, nil
+}
+
+// GetField extracts a string field from this MAAS object.
+func (obj MAASObject) GetField(name string) (string, error) {
+	return obj.values[name].GetString()
+}
+
+// URI is the resource URI for this MAAS object.  It is an absolute path, but
+// without a network part.
+func (obj MAASObject) URI() *url.URL {
+	// Duplicate the URL.
+	uri, err := url.Parse(obj.uri.String())
+	if err != nil {
+		panic(err)
+	}
+	return uri
+}
+
+// URL returns a full absolute URL (including network part) for this MAAS
+// object on the API.
+func (obj MAASObject) URL() *url.URL {
+	return obj.client.GetURL(obj.URI())
+}
+
+// GetMap returns all of the object's attributes in the form of a map.
+func (obj MAASObject) GetMap() map[string]JSONObject {
+	return obj.values
+}
+
+// GetSubObject returns a new MAASObject representing the API resource found
+// at a given sub-path of the current object's resource URI.
+func (obj MAASObject) GetSubObject(name string) MAASObject {
+	uri := obj.URI()
+	newURL := url.URL{Path: name}
+	resUrl := uri.ResolveReference(&newURL)
+	resUrl.Path = EnsureTrailingSlash(resUrl.Path)
+	input := map[string]interface{}{resourceURI: resUrl.String()}
+	return newJSONMAASObject(input, obj.client)
+}
+
+var NotImplemented = errors.New("Not implemented")
+
+// Get retrieves a fresh copy of this MAAS object from the API.
+func (obj MAASObject) Get() (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, "", url.Values{})
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Post overwrites this object's existing value on the API with those given
+// in "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Post(params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, "", params, nil)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// Update modifies this object on the API, based on the values given in
+// "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Update(params url.Values) (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Put(uri, params)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Delete removes this object on the API.
+func (obj MAASObject) Delete() error {
+	uri := obj.URI()
+	return obj.client.Delete(uri)
+}
+
+// CallGet invokes an idempotent API method on this object.
+func (obj MAASObject) CallGet(operation string, params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, operation, params)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// CallPost invokes a non-idempotent API method on this object.
+func (obj MAASObject) CallPost(operation string, params url.Values) (JSONObject, error) {
+	return obj.CallPostFiles(operation, params, nil)
+}
+
+// CallPostFiles invokes a non-idempotent API method on this object.  It is
+// similar to CallPost but has an extra parameter, 'files', which should
+// contain the files that will be uploaded to the API.
+func (obj MAASObject) CallPostFiles(operation string, params url.Values, files map[string][]byte) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, operation, params, files)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/machine.go b/switchq/vendor/github.com/juju/gomaasapi/machine.go
new file mode 100644
index 0000000..8518d94
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/machine.go
@@ -0,0 +1,584 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type machine struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID  string
+	hostname  string
+	fqdn      string
+	tags      []string
+	ownerData map[string]string
+
+	operatingSystem string
+	distroSeries    string
+	architecture    string
+	memory          int
+	cpuCount        int
+
+	ipAddresses []string
+	powerState  string
+
+	// NOTE: consider some form of status struct
+	statusName    string
+	statusMessage string
+
+	bootInterface *interface_
+	interfaceSet  []*interface_
+	zone          *zone
+	// Don't really know the difference between these two lists:
+	physicalBlockDevices []*blockdevice
+	blockDevices         []*blockdevice
+}
+
+func (m *machine) updateFrom(other *machine) {
+	m.resourceURI = other.resourceURI
+	m.systemID = other.systemID
+	m.hostname = other.hostname
+	m.fqdn = other.fqdn
+	m.operatingSystem = other.operatingSystem
+	m.distroSeries = other.distroSeries
+	m.architecture = other.architecture
+	m.memory = other.memory
+	m.cpuCount = other.cpuCount
+	m.ipAddresses = other.ipAddresses
+	m.powerState = other.powerState
+	m.statusName = other.statusName
+	m.statusMessage = other.statusMessage
+	m.zone = other.zone
+	m.tags = other.tags
+	m.ownerData = other.ownerData
+}
+
+// SystemID implements Machine.
+func (m *machine) SystemID() string {
+	return m.systemID
+}
+
+// Hostname implements Machine.
+func (m *machine) Hostname() string {
+	return m.hostname
+}
+
+// FQDN implements Machine.
+func (m *machine) FQDN() string {
+	return m.fqdn
+}
+
+// Tags implements Machine.
+func (m *machine) Tags() []string {
+	return m.tags
+}
+
+// IPAddresses implements Machine.
+func (m *machine) IPAddresses() []string {
+	return m.ipAddresses
+}
+
+// Memory implements Machine.
+func (m *machine) Memory() int {
+	return m.memory
+}
+
+// CPUCount implements Machine.
+func (m *machine) CPUCount() int {
+	return m.cpuCount
+}
+
+// PowerState implements Machine.
+func (m *machine) PowerState() string {
+	return m.powerState
+}
+
+// Zone implements Machine.
+func (m *machine) Zone() Zone {
+	if m.zone == nil {
+		return nil
+	}
+	return m.zone
+}
+
+// BootInterface implements Machine.
+func (m *machine) BootInterface() Interface {
+	if m.bootInterface == nil {
+		return nil
+	}
+	m.bootInterface.controller = m.controller
+	return m.bootInterface
+}
+
+// InterfaceSet implements Machine.
+func (m *machine) InterfaceSet() []Interface {
+	result := make([]Interface, len(m.interfaceSet))
+	for i, v := range m.interfaceSet {
+		v.controller = m.controller
+		result[i] = v
+	}
+	return result
+}
+
+// Interface implements Machine.
+func (m *machine) Interface(id int) Interface {
+	for _, iface := range m.interfaceSet {
+		if iface.ID() == id {
+			iface.controller = m.controller
+			return iface
+		}
+	}
+	return nil
+}
+
+// OperatingSystem implements Machine.
+func (m *machine) OperatingSystem() string {
+	return m.operatingSystem
+}
+
+// DistroSeries implements Machine.
+func (m *machine) DistroSeries() string {
+	return m.distroSeries
+}
+
+// Architecture implements Machine.
+func (m *machine) Architecture() string {
+	return m.architecture
+}
+
+// StatusName implements Machine.
+func (m *machine) StatusName() string {
+	return m.statusName
+}
+
+// StatusMessage implements Machine.
+func (m *machine) StatusMessage() string {
+	return m.statusMessage
+}
+
+// PhysicalBlockDevices implements Machine.
+func (m *machine) PhysicalBlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.physicalBlockDevices))
+	for i, v := range m.physicalBlockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// PhysicalBlockDevice implements Machine.
+func (m *machine) PhysicalBlockDevice(id int) BlockDevice {
+	for _, blockDevice := range m.physicalBlockDevices {
+		if blockDevice.ID() == id {
+			return blockDevice
+		}
+	}
+	return nil
+}
+
+// BlockDevices implements Machine.
+func (m *machine) BlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.blockDevices))
+	for i, v := range m.blockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// Devices implements Machine.
+func (m *machine) Devices(args DevicesArgs) ([]Device, error) {
+	// Perhaps in the future, MAAS will give us a way to query just for the
+	// devices for a particular parent.
+	devices, err := m.controller.Devices(args)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, device := range devices {
+		if device.Parent() == m.SystemID() {
+			result = append(result, device)
+		}
+	}
+	return result, nil
+}
+
+// StartArgs is an argument struct for passing parameters to the Machine.Start
+// method.
+type StartArgs struct {
+	// UserData needs to be Base64 encoded user data for cloud-init.
+	UserData     string
+	DistroSeries string
+	Kernel       string
+	Comment      string
+}
+
+// Start implements Machine.
+func (m *machine) Start(args StartArgs) error {
+	params := NewURLParams()
+	params.MaybeAdd("user_data", args.UserData)
+	params.MaybeAdd("distro_series", args.DistroSeries)
+	params.MaybeAdd("hwe_kernel", args.Kernel)
+	params.MaybeAdd("comment", args.Comment)
+	result, err := m.controller.post(m.resourceURI, "deploy", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+// CreateMachineDeviceArgs is an argument structure for Machine.CreateDevice.
+// Only InterfaceName and MACAddress fields are required, the others are only
+// used if set. If Subnet and VLAN are both set, Subnet.VLAN() must match the
+// given VLAN. On failure, returns an error satisfying errors.IsNotValid().
+type CreateMachineDeviceArgs struct {
+	Hostname      string
+	InterfaceName string
+	MACAddress    string
+	Subnet        Subnet
+	VLAN          VLAN
+}
+
+// Validate ensures that all required values are non-emtpy.
+func (a *CreateMachineDeviceArgs) Validate() error {
+	if a.InterfaceName == "" {
+		return errors.NotValidf("missing InterfaceName")
+	}
+
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+
+	if a.Subnet != nil && a.VLAN != nil && a.Subnet.VLAN() != a.VLAN {
+		msg := fmt.Sprintf(
+			"given subnet %q on VLAN %d does not match given VLAN %d",
+			a.Subnet.CIDR(), a.Subnet.VLAN().ID(), a.VLAN.ID(),
+		)
+		return errors.NewNotValid(nil, msg)
+	}
+
+	return nil
+}
+
+// CreateDevice implements Machine
+func (m *machine) CreateDevice(args CreateMachineDeviceArgs) (_ Device, err error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	device, err := m.controller.CreateDevice(CreateDeviceArgs{
+		Hostname:     args.Hostname,
+		MACAddresses: []string{args.MACAddress},
+		Parent:       m.SystemID(),
+	})
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	defer func(err *error) {
+		// If there is an error return, at least try to delete the device we just created.
+		if *err != nil {
+			if innerErr := device.Delete(); innerErr != nil {
+				logger.Warningf("could not delete device %q", device.SystemID())
+			}
+		}
+	}(&err)
+
+	// Update the VLAN to use for the interface, if given.
+	vlanToUse := args.VLAN
+	if vlanToUse == nil && args.Subnet != nil {
+		vlanToUse = args.Subnet.VLAN()
+	}
+
+	// There should be one interface created for each MAC Address, and since we
+	// only specified one, there should just be one response.
+	interfaces := device.InterfaceSet()
+	if count := len(interfaces); count != 1 {
+		err := errors.Errorf("unexpected interface count for device: %d", count)
+		return nil, NewUnexpectedError(err)
+	}
+	iface := interfaces[0]
+	nameToUse := args.InterfaceName
+
+	if err := m.updateDeviceInterface(iface, nameToUse, vlanToUse); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	if args.Subnet == nil {
+		// Nothing further to update.
+		return device, nil
+	}
+
+	if err := m.linkDeviceInterfaceToSubnet(iface, args.Subnet); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	return device, nil
+}
+
+func (m *machine) updateDeviceInterface(iface Interface, nameToUse string, vlanToUse VLAN) error {
+	updateArgs := UpdateInterfaceArgs{}
+	updateArgs.Name = nameToUse
+
+	if vlanToUse != nil {
+		updateArgs.VLAN = vlanToUse
+	}
+
+	if err := iface.Update(updateArgs); err != nil {
+		return errors.Annotatef(err, "updating device interface %q failed", iface.Name())
+	}
+
+	return nil
+}
+
+func (m *machine) linkDeviceInterfaceToSubnet(iface Interface, subnetToUse Subnet) error {
+	err := iface.LinkSubnet(LinkSubnetArgs{
+		Mode:   LinkModeStatic,
+		Subnet: subnetToUse,
+	})
+	if err != nil {
+		return errors.Annotatef(
+			err, "linking device interface %q to subnet %q failed",
+			iface.Name(), subnetToUse.CIDR())
+	}
+
+	return nil
+}
+
+// OwnerData implements OwnerDataHolder.
+func (m *machine) OwnerData() map[string]string {
+	result := make(map[string]string)
+	for key, value := range m.ownerData {
+		result[key] = value
+	}
+	return result
+}
+
+// SetOwnerData implements OwnerDataHolder.
+func (m *machine) SetOwnerData(ownerData map[string]string) error {
+	params := make(url.Values)
+	for key, value := range ownerData {
+		params.Add(key, value)
+	}
+	result, err := m.controller.post(m.resourceURI, "set_owner_data", params)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+func readMachine(controllerVersion version.Number, source interface{}) (*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readMachines(controllerVersion version.Number, source interface{}) ([]*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readMachineList(valid, readFunc)
+}
+
+func getMachineDeserializationFunc(controllerVersion version.Number) (machineDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range machineDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no machine read func for version %s", controllerVersion)
+	}
+	return machineDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readMachineList(sourceList []interface{}, readFunc machineDeserializationFunc) ([]*machine, error) {
+	result := make([]*machine, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for machine %d, %T", i, value)
+		}
+		machine, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "machine %d", i)
+		}
+		result = append(result, machine)
+	}
+	return result, nil
+}
+
+type machineDeserializationFunc func(map[string]interface{}) (*machine, error)
+
+var machineDeserializationFuncs = map[version.Number]machineDeserializationFunc{
+	twoDotOh: machine_2_0,
+}
+
+func machine_2_0(source map[string]interface{}) (*machine, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id":  schema.String(),
+		"hostname":   schema.String(),
+		"fqdn":       schema.String(),
+		"tag_names":  schema.List(schema.String()),
+		"owner_data": schema.StringMap(schema.String()),
+
+		"osystem":       schema.String(),
+		"distro_series": schema.String(),
+		"architecture":  schema.OneOf(schema.Nil(""), schema.String()),
+		"memory":        schema.ForceInt(),
+		"cpu_count":     schema.ForceInt(),
+
+		"ip_addresses":   schema.List(schema.String()),
+		"power_state":    schema.String(),
+		"status_name":    schema.String(),
+		"status_message": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"boot_interface": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"interface_set":  schema.List(schema.StringMap(schema.Any())),
+		"zone":           schema.StringMap(schema.Any()),
+
+		"physicalblockdevice_set": schema.List(schema.StringMap(schema.Any())),
+		"blockdevice_set":         schema.List(schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"architecture": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var bootInterface *interface_
+	if ifaceMap, ok := valid["boot_interface"].(map[string]interface{}); ok {
+		bootInterface, err = interface_2_0(ifaceMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	physicalBlockDevices, err := readBlockDeviceList(valid["physicalblockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	blockDevices, err := readBlockDeviceList(valid["blockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	architecture, _ := valid["architecture"].(string)
+	statusMessage, _ := valid["status_message"].(string)
+	result := &machine{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID:  valid["system_id"].(string),
+		hostname:  valid["hostname"].(string),
+		fqdn:      valid["fqdn"].(string),
+		tags:      convertToStringSlice(valid["tag_names"]),
+		ownerData: convertToStringMap(valid["owner_data"]),
+
+		operatingSystem: valid["osystem"].(string),
+		distroSeries:    valid["distro_series"].(string),
+		architecture:    architecture,
+		memory:          valid["memory"].(int),
+		cpuCount:        valid["cpu_count"].(int),
+
+		ipAddresses:   convertToStringSlice(valid["ip_addresses"]),
+		powerState:    valid["power_state"].(string),
+		statusName:    valid["status_name"].(string),
+		statusMessage: statusMessage,
+
+		bootInterface:        bootInterface,
+		interfaceSet:         interfaceSet,
+		zone:                 zone,
+		physicalBlockDevices: physicalBlockDevices,
+		blockDevices:         blockDevices,
+	}
+
+	return result, nil
+}
+
+func convertToStringSlice(field interface{}) []string {
+	if field == nil {
+		return nil
+	}
+	fieldSlice := field.([]interface{})
+	result := make([]string, len(fieldSlice))
+	for i, value := range fieldSlice {
+		result[i] = value.(string)
+	}
+	return result
+}
+
+func convertToStringMap(field interface{}) map[string]string {
+	if field == nil {
+		return nil
+	}
+	// This function is only called after a schema Coerce, so it's
+	// safe.
+	fieldMap := field.(map[string]interface{})
+	result := make(map[string]string)
+	for key, value := range fieldMap {
+		result[key] = value.(string)
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/oauth.go b/switchq/vendor/github.com/juju/gomaasapi/oauth.go
new file mode 100644
index 0000000..920960d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/oauth.go
@@ -0,0 +1,80 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"crypto/rand"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Not a true uuidgen, but at least creates same length random
+func generateNonce() (string, error) {
+	randBytes := make([]byte, 16)
+	_, err := rand.Read(randBytes)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%16x", randBytes), nil
+}
+
+func generateTimestamp() string {
+	return strconv.Itoa(int(time.Now().Unix()))
+}
+
+type OAuthSigner interface {
+	OAuthSign(request *http.Request) error
+}
+
+type OAuthToken struct {
+	ConsumerKey    string
+	ConsumerSecret string
+	TokenKey       string
+	TokenSecret    string
+}
+
+// Trick to ensure *plainTextOAuthSigner implements the OAuthSigner interface.
+var _ OAuthSigner = (*plainTextOAuthSigner)(nil)
+
+type plainTextOAuthSigner struct {
+	token *OAuthToken
+	realm string
+}
+
+func NewPlainTestOAuthSigner(token *OAuthToken, realm string) (OAuthSigner, error) {
+	return &plainTextOAuthSigner{token, realm}, nil
+}
+
+// OAuthSignPLAINTEXT signs the provided request using the OAuth PLAINTEXT
+// method: http://oauth.net/core/1.0/#anchor22.
+func (signer plainTextOAuthSigner) OAuthSign(request *http.Request) error {
+
+	signature := signer.token.ConsumerSecret + `&` + signer.token.TokenSecret
+	nonce, err := generateNonce()
+	if err != nil {
+		return err
+	}
+	authData := map[string]string{
+		"realm":                  signer.realm,
+		"oauth_consumer_key":     signer.token.ConsumerKey,
+		"oauth_token":            signer.token.TokenKey,
+		"oauth_signature_method": "PLAINTEXT",
+		"oauth_signature":        signature,
+		"oauth_timestamp":        generateTimestamp(),
+		"oauth_nonce":            nonce,
+		"oauth_version":          "1.0",
+	}
+	// Build OAuth header.
+	var authHeader []string
+	for key, value := range authData {
+		authHeader = append(authHeader, fmt.Sprintf(`%s="%s"`, key, url.QueryEscape(value)))
+	}
+	strHeader := "OAuth " + strings.Join(authHeader, ", ")
+	request.Header.Add("Authorization", strHeader)
+	return nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/partition.go b/switchq/vendor/github.com/juju/gomaasapi/partition.go
new file mode 100644
index 0000000..f6d6afa
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/partition.go
@@ -0,0 +1,145 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type partition struct {
+	resourceURI string
+
+	id   int
+	path string
+	uuid string
+
+	usedFor string
+	size    uint64
+
+	filesystem *filesystem
+}
+
+// ID implements Partition.
+func (p *partition) ID() int {
+	return p.id
+}
+
+// Path implements Partition.
+func (p *partition) Path() string {
+	return p.path
+}
+
+// FileSystem implements Partition.
+func (p *partition) FileSystem() FileSystem {
+	if p.filesystem == nil {
+		return nil
+	}
+	return p.filesystem
+}
+
+// UUID implements Partition.
+func (p *partition) UUID() string {
+	return p.uuid
+}
+
+// UsedFor implements Partition.
+func (p *partition) UsedFor() string {
+	return p.usedFor
+}
+
+// Size implements Partition.
+func (p *partition) Size() uint64 {
+	return p.size
+}
+
+func readPartitions(controllerVersion version.Number, source interface{}) ([]*partition, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range partitionDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no partition read func for version %s", controllerVersion)
+	}
+	readFunc := partitionDeserializationFuncs[deserialisationVersion]
+	return readPartitionList(valid, readFunc)
+}
+
+// readPartitionList expects the values of the sourceList to be string maps.
+func readPartitionList(sourceList []interface{}, readFunc partitionDeserializationFunc) ([]*partition, error) {
+	result := make([]*partition, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for partition %d, %T", i, value)
+		}
+		partition, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "partition %d", i)
+		}
+		result = append(result, partition)
+	}
+	return result, nil
+}
+
+type partitionDeserializationFunc func(map[string]interface{}) (*partition, error)
+
+var partitionDeserializationFuncs = map[version.Number]partitionDeserializationFunc{
+	twoDotOh: partition_2_0,
+}
+
+func partition_2_0(source map[string]interface{}) (*partition, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":   schema.ForceInt(),
+		"path": schema.String(),
+		"uuid": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"used_for": schema.String(),
+		"size":     schema.ForceUint(),
+
+		"filesystem": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"uuid": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var filesystem *filesystem
+	if fsSource := valid["filesystem"]; fsSource != nil {
+		filesystem, err = filesystem2_0(fsSource.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+	uuid, _ := valid["uuid"].(string)
+	result := &partition{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		path:        valid["path"].(string),
+		uuid:        uuid,
+		usedFor:     valid["used_for"].(string),
+		size:        valid["size"].(uint64),
+		filesystem:  filesystem,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/space.go b/switchq/vendor/github.com/juju/gomaasapi/space.go
new file mode 100644
index 0000000..5b8b8cf
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/space.go
@@ -0,0 +1,115 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type space struct {
+	// Add the controller in when we need to do things with the space.
+	// controller Controller
+
+	resourceURI string
+
+	id   int
+	name string
+
+	subnets []*subnet
+}
+
+// Id implements Space.
+func (s *space) ID() int {
+	return s.id
+}
+
+// Name implements Space.
+func (s *space) Name() string {
+	return s.name
+}
+
+// Subnets implements Space.
+func (s *space) Subnets() []Subnet {
+	var result []Subnet
+	for _, subnet := range s.subnets {
+		result = append(result, subnet)
+	}
+	return result
+}
+
+func readSpaces(controllerVersion version.Number, source interface{}) ([]*space, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range spaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no space read func for version %s", controllerVersion)
+	}
+	readFunc := spaceDeserializationFuncs[deserialisationVersion]
+	return readSpaceList(valid, readFunc)
+}
+
+// readSpaceList expects the values of the sourceList to be string maps.
+func readSpaceList(sourceList []interface{}, readFunc spaceDeserializationFunc) ([]*space, error) {
+	result := make([]*space, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for space %d, %T", i, value)
+		}
+		space, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "space %d", i)
+		}
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+type spaceDeserializationFunc func(map[string]interface{}) (*space, error)
+
+var spaceDeserializationFuncs = map[version.Number]spaceDeserializationFunc{
+	twoDotOh: space_2_0,
+}
+
+func space_2_0(source map[string]interface{}) (*space, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"subnets":      schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	subnets, err := readSubnetList(valid["subnets"].([]interface{}), subnet_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	result := &space{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		subnets:     subnets,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/subnet.go b/switchq/vendor/github.com/juju/gomaasapi/subnet.go
new file mode 100644
index 0000000..f509ccd
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/subnet.go
@@ -0,0 +1,152 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type subnet struct {
+	// Add the controller in when we need to do things with the subnet.
+	// controller Controller
+
+	resourceURI string
+
+	id    int
+	name  string
+	space string
+	vlan  *vlan
+
+	gateway string
+	cidr    string
+
+	dnsServers []string
+}
+
+// ID implements Subnet.
+func (s *subnet) ID() int {
+	return s.id
+}
+
+// Name implements Subnet.
+func (s *subnet) Name() string {
+	return s.name
+}
+
+// Space implements Subnet.
+func (s *subnet) Space() string {
+	return s.space
+}
+
+// VLAN implements Subnet.
+func (s *subnet) VLAN() VLAN {
+	if s.vlan == nil {
+		return nil
+	}
+	return s.vlan
+}
+
+// Gateway implements Subnet.
+func (s *subnet) Gateway() string {
+	return s.gateway
+}
+
+// CIDR implements Subnet.
+func (s *subnet) CIDR() string {
+	return s.cidr
+}
+
+// DNSServers implements Subnet.
+func (s *subnet) DNSServers() []string {
+	return s.dnsServers
+}
+
+func readSubnets(controllerVersion version.Number, source interface{}) ([]*subnet, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range subnetDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no subnet read func for version %s", controllerVersion)
+	}
+	readFunc := subnetDeserializationFuncs[deserialisationVersion]
+	return readSubnetList(valid, readFunc)
+}
+
+// readSubnetList expects the values of the sourceList to be string maps.
+func readSubnetList(sourceList []interface{}, readFunc subnetDeserializationFunc) ([]*subnet, error) {
+	result := make([]*subnet, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for subnet %d, %T", i, value)
+		}
+		subnet, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "subnet %d", i)
+		}
+		result = append(result, subnet)
+	}
+	return result, nil
+}
+
+type subnetDeserializationFunc func(map[string]interface{}) (*subnet, error)
+
+var subnetDeserializationFuncs = map[version.Number]subnetDeserializationFunc{
+	twoDotOh: subnet_2_0,
+}
+
+func subnet_2_0(source map[string]interface{}) (*subnet, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"space":        schema.String(),
+		"gateway_ip":   schema.OneOf(schema.Nil(""), schema.String()),
+		"cidr":         schema.String(),
+		"vlan":         schema.StringMap(schema.Any()),
+		"dns_servers":  schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlan, err := vlan_2_0(valid["vlan"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the gateway_ip is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	gateway, _ := valid["gateway_ip"].(string)
+
+	result := &subnet{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		space:       valid["space"].(string),
+		vlan:        vlan,
+		gateway:     gateway,
+		cidr:        valid["cidr"].(string),
+		dnsServers:  convertToStringSlice(valid["dns_servers"]),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testing.go b/switchq/vendor/github.com/juju/gomaasapi/testing.go
new file mode 100644
index 0000000..54d67aa
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testing.go
@@ -0,0 +1,222 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+)
+
+type singleServingServer struct {
+	*httptest.Server
+	requestContent *string
+	requestHeader  *http.Header
+}
+
+// newSingleServingServer creates a single-serving test http server which will
+// return only one response as defined by the passed arguments.
+func newSingleServingServer(uri string, response string, code int) *singleServingServer {
+	var requestContent string
+	var requestHeader http.Header
+	var requested bool
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		if requested {
+			http.Error(writer, "Already requested", http.StatusServiceUnavailable)
+		}
+		res, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requestContent = string(res)
+		requestHeader = request.Header
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else {
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, response)
+		}
+		requested = true
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &singleServingServer{server, &requestContent, &requestHeader}
+}
+
+type flakyServer struct {
+	*httptest.Server
+	nbRequests *int
+	requests   *[][]byte
+}
+
+// newFlakyServer creates a "flaky" test http server which will
+// return `nbFlakyResponses` responses with the given code and then a 200 response.
+func newFlakyServer(uri string, code int, nbFlakyResponses int) *flakyServer {
+	nbRequests := 0
+	requests := make([][]byte, nbFlakyResponses+1)
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		nbRequests += 1
+		body, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requests[nbRequests-1] = body
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else if nbRequests <= nbFlakyResponses {
+			if code == http.StatusServiceUnavailable {
+				writer.Header().Set("Retry-After", "0")
+			}
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, "flaky")
+		} else {
+			writer.WriteHeader(http.StatusOK)
+			fmt.Fprint(writer, "ok")
+		}
+
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &flakyServer{server, &nbRequests, &requests}
+}
+
+type simpleResponse struct {
+	status int
+	body   string
+}
+
+type SimpleTestServer struct {
+	*httptest.Server
+
+	getResponses        map[string][]simpleResponse
+	getResponseIndex    map[string]int
+	putResponses        map[string][]simpleResponse
+	putResponseIndex    map[string]int
+	postResponses       map[string][]simpleResponse
+	postResponseIndex   map[string]int
+	deleteResponses     map[string][]simpleResponse
+	deleteResponseIndex map[string]int
+
+	requests []*http.Request
+}
+
+func NewSimpleServer() *SimpleTestServer {
+	server := &SimpleTestServer{
+		getResponses:        make(map[string][]simpleResponse),
+		getResponseIndex:    make(map[string]int),
+		putResponses:        make(map[string][]simpleResponse),
+		putResponseIndex:    make(map[string]int),
+		postResponses:       make(map[string][]simpleResponse),
+		postResponseIndex:   make(map[string]int),
+		deleteResponses:     make(map[string][]simpleResponse),
+		deleteResponseIndex: make(map[string]int),
+	}
+	server.Server = httptest.NewUnstartedServer(http.HandlerFunc(server.handler))
+	return server
+}
+
+func (s *SimpleTestServer) AddGetResponse(path string, status int, body string) {
+	logger.Debugf("add get response for: %s, %d", path, status)
+	s.getResponses[path] = append(s.getResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPutResponse(path string, status int, body string) {
+	logger.Debugf("add put response for: %s, %d", path, status)
+	s.putResponses[path] = append(s.putResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPostResponse(path string, status int, body string) {
+	logger.Debugf("add post response for: %s, %d", path, status)
+	s.postResponses[path] = append(s.postResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddDeleteResponse(path string, status int, body string) {
+	logger.Debugf("add delete response for: %s, %d", path, status)
+	s.deleteResponses[path] = append(s.deleteResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) LastRequest() *http.Request {
+	pos := len(s.requests) - 1
+	if pos < 0 {
+		return nil
+	}
+	return s.requests[pos]
+}
+
+func (s *SimpleTestServer) LastNRequests(n int) []*http.Request {
+	start := len(s.requests) - n
+	if start < 0 {
+		start = 0
+	}
+	return s.requests[start:]
+}
+
+func (s *SimpleTestServer) RequestCount() int {
+	return len(s.requests)
+}
+
+func (s *SimpleTestServer) ResetRequests() {
+	s.requests = nil
+}
+
+func (s *SimpleTestServer) handler(writer http.ResponseWriter, request *http.Request) {
+	method := request.Method
+	var (
+		err           error
+		responses     map[string][]simpleResponse
+		responseIndex map[string]int
+	)
+	switch method {
+	case "GET":
+		responses = s.getResponses
+		responseIndex = s.getResponseIndex
+		_, err = readAndClose(request.Body)
+		if err != nil {
+			panic(err) // it is a test, panic should be fine
+		}
+	case "PUT":
+		responses = s.putResponses
+		responseIndex = s.putResponseIndex
+		err = request.ParseForm()
+		if err != nil {
+			panic(err)
+		}
+	case "POST":
+		responses = s.postResponses
+		responseIndex = s.postResponseIndex
+		contentType := request.Header.Get("Content-Type")
+		if strings.HasPrefix(contentType, "multipart/form-data;") {
+			err = request.ParseMultipartForm(2 << 20)
+		} else {
+			err = request.ParseForm()
+		}
+		if err != nil {
+			panic(err)
+		}
+	case "DELETE":
+		responses = s.deleteResponses
+		responseIndex = s.deleteResponseIndex
+		_, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+	default:
+		panic("unsupported method " + method)
+	}
+	s.requests = append(s.requests, request)
+	uri := request.URL.String()
+	testResponses, found := responses[uri]
+	if !found {
+		errorMsg := fmt.Sprintf("Error 404: page not found ('%v').", uri)
+		http.Error(writer, errorMsg, http.StatusNotFound)
+	} else {
+		index := responseIndex[uri]
+		response := testResponses[index]
+		responseIndex[uri] = index + 1
+
+		writer.WriteHeader(response.status)
+		fmt.Fprint(writer, response.body)
+	}
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testservice.go b/switchq/vendor/github.com/juju/gomaasapi/testservice.go
new file mode 100644
index 0000000..aa582da
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testservice.go
@@ -0,0 +1,1672 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"mime/multipart"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"text/template"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// TestMAASObject is a fake MAAS server MAASObject.
+type TestMAASObject struct {
+	MAASObject
+	TestServer *TestServer
+}
+
+// checkError is a shorthand helper that panics if err is not nil.
+func checkError(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
+
+// NewTestMAAS returns a TestMAASObject that implements the MAASObject
+// interface and thus can be used as a test object instead of the one returned
+// by gomaasapi.NewMAAS().
+func NewTestMAAS(version string) *TestMAASObject {
+	server := NewTestServer(version)
+	authClient, err := NewAnonymousClient(server.URL, version)
+	checkError(err)
+	maas := NewMAAS(*authClient)
+	return &TestMAASObject{*maas, server}
+}
+
+// Close shuts down the test server.
+func (testMAASObject *TestMAASObject) Close() {
+	testMAASObject.TestServer.Close()
+}
+
+// A TestServer is an HTTP server listening on a system-chosen port on the
+// local loopback interface, which simulates the behavior of a MAAS server.
+// It is intendend for use in end-to-end HTTP tests using the gomaasapi
+// library.
+type TestServer struct {
+	*httptest.Server
+	serveMux   *http.ServeMux
+	client     Client
+	nodes      map[string]MAASObject
+	ownedNodes map[string]bool
+	// mapping system_id -> list of operations performed.
+	nodeOperations map[string][]string
+	// list of operations performed at the /nodes/ level.
+	nodesOperations []string
+	// mapping system_id -> list of Values passed when performing
+	// operations
+	nodeOperationRequestValues map[string][]url.Values
+	// list of Values passed when performing operations at the
+	// /nodes/ level.
+	nodesOperationRequestValues []url.Values
+	nodeMetadata                map[string]Node
+	files                       map[string]MAASObject
+	networks                    map[string]MAASObject
+	networksPerNode             map[string][]string
+	ipAddressesPerNetwork       map[string][]string
+	version                     string
+	macAddressesPerNetwork      map[string]map[string]JSONObject
+	nodeDetails                 map[string]string
+	zones                       map[string]JSONObject
+	// bootImages is a map of nodegroup UUIDs to boot-image objects.
+	bootImages map[string][]JSONObject
+	// nodegroupsInterfaces is a map of nodegroup UUIDs to interface
+	// objects.
+	nodegroupsInterfaces map[string][]JSONObject
+
+	// versionJSON is the response to the /version/ endpoint listing the
+	// capabilities of the MAAS server.
+	versionJSON string
+
+	// devices is a map of device UUIDs to devices.
+	devices map[string]*TestDevice
+
+	subnets        map[uint]TestSubnet
+	subnetNameToID map[string]uint
+	nextSubnet     uint
+	spaces         map[uint]*TestSpace
+	spaceNameToID  map[string]uint
+	nextSpace      uint
+	vlans          map[int]TestVLAN
+	nextVLAN       int
+}
+
+type TestDevice struct {
+	IPAddresses  []string
+	SystemId     string
+	MACAddresses []string
+	Parent       string
+	Hostname     string
+
+	// Not part of the device definition but used by the template.
+	APIVersion string
+}
+
+func getNodesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodes/", version)
+}
+
+func getNodeURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/", version, systemId)
+}
+
+func getNodeURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodes/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getDevicesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/devices/", version)
+}
+
+func getDeviceURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/devices/%s/", version, systemId)
+}
+
+func getDeviceURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/devices/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getFilesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/files/", version)
+}
+
+func getFileURL(version, filename string) string {
+	// Uses URL object so filename is correctly percent-escaped
+	url := url.URL{}
+	url.Path = fmt.Sprintf("/api/%s/files/%s/", version, filename)
+	return url.String()
+}
+
+func getFileURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/files/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getNetworksEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/networks/", version)
+}
+
+func getNetworkURL(version, name string) string {
+	return fmt.Sprintf("/api/%s/networks/%s/", version, name)
+}
+
+func getNetworkURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/networks/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getIPAddressesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/ipaddresses/", version)
+}
+
+func getMACAddressURL(version, systemId, macAddress string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/macs/%s/", version, systemId, url.QueryEscape(macAddress))
+}
+
+func getVersionURL(version string) string {
+	return fmt.Sprintf("/api/%s/version/", version)
+}
+
+func getNodegroupsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/", version)
+}
+
+func getNodegroupURL(version, uuid string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/%s/", version, uuid)
+}
+
+func getNodegroupsInterfacesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/interfaces/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getBootimagesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/boot-images/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getZonesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/zones/", version)
+}
+
+// Clear clears all the fake data stored and recorded by the test server
+// (nodes, recorded operations, etc.).
+func (server *TestServer) Clear() {
+	server.nodes = make(map[string]MAASObject)
+	server.ownedNodes = make(map[string]bool)
+	server.nodesOperations = make([]string, 0)
+	server.nodeOperations = make(map[string][]string)
+	server.nodesOperationRequestValues = make([]url.Values, 0)
+	server.nodeOperationRequestValues = make(map[string][]url.Values)
+	server.nodeMetadata = make(map[string]Node)
+	server.files = make(map[string]MAASObject)
+	server.networks = make(map[string]MAASObject)
+	server.networksPerNode = make(map[string][]string)
+	server.ipAddressesPerNetwork = make(map[string][]string)
+	server.macAddressesPerNetwork = make(map[string]map[string]JSONObject)
+	server.nodeDetails = make(map[string]string)
+	server.bootImages = make(map[string][]JSONObject)
+	server.nodegroupsInterfaces = make(map[string][]JSONObject)
+	server.zones = make(map[string]JSONObject)
+	server.versionJSON = `{"capabilities": ["networks-management","static-ipaddresses","devices-management","network-deployment-ubuntu"]}`
+	server.devices = make(map[string]*TestDevice)
+	server.subnets = make(map[uint]TestSubnet)
+	server.subnetNameToID = make(map[string]uint)
+	server.nextSubnet = 1
+	server.spaces = make(map[uint]*TestSpace)
+	server.spaceNameToID = make(map[string]uint)
+	server.nextSpace = 1
+	server.vlans = make(map[int]TestVLAN)
+	server.nextVLAN = 1
+}
+
+// SetVersionJSON sets the JSON response (capabilities) returned from the
+// /version/ endpoint.
+func (server *TestServer) SetVersionJSON(json string) {
+	server.versionJSON = json
+}
+
+// NodesOperations returns the list of operations performed at the /nodes/
+// level.
+func (server *TestServer) NodesOperations() []string {
+	return server.nodesOperations
+}
+
+// NodeOperations returns the map containing the list of the operations
+// performed for each node.
+func (server *TestServer) NodeOperations() map[string][]string {
+	return server.nodeOperations
+}
+
+// NodesOperationRequestValues returns the list of url.Values extracted
+// from the request used when performing operations at the /nodes/ level.
+func (server *TestServer) NodesOperationRequestValues() []url.Values {
+	return server.nodesOperationRequestValues
+}
+
+// NodeOperationRequestValues returns the map containing the list of the
+// url.Values extracted from the request used when performing operations
+// on nodes.
+func (server *TestServer) NodeOperationRequestValues() map[string][]url.Values {
+	return server.nodeOperationRequestValues
+}
+
+func parseRequestValues(request *http.Request) url.Values {
+	var requestValues url.Values
+	if request.Header.Get("Content-Type") == "application/x-www-form-urlencoded" {
+		if request.PostForm == nil {
+			if err := request.ParseForm(); err != nil {
+				panic(err)
+			}
+		}
+		requestValues = request.PostForm
+	}
+	return requestValues
+}
+
+func (server *TestServer) addNodesOperation(operation string, request *http.Request) url.Values {
+	requestValues := parseRequestValues(request)
+	server.nodesOperations = append(server.nodesOperations, operation)
+	server.nodesOperationRequestValues = append(server.nodesOperationRequestValues, requestValues)
+	return requestValues
+}
+
+func (server *TestServer) addNodeOperation(systemId, operation string, request *http.Request) url.Values {
+	operations, present := server.nodeOperations[systemId]
+	operationRequestValues, present2 := server.nodeOperationRequestValues[systemId]
+	if present != present2 {
+		panic("inconsistent state: nodeOperations and nodeOperationRequestValues don't have the same keys.")
+	}
+	requestValues := parseRequestValues(request)
+	if !present {
+		operations = []string{operation}
+		operationRequestValues = []url.Values{requestValues}
+	} else {
+		operations = append(operations, operation)
+		operationRequestValues = append(operationRequestValues, requestValues)
+	}
+	server.nodeOperations[systemId] = operations
+	server.nodeOperationRequestValues[systemId] = operationRequestValues
+	return requestValues
+}
+
+// NewNode creates a MAAS node.  The provided string should be a valid json
+// string representing a map and contain a string value for the key
+// 'system_id'.  e.g. `{"system_id": "mysystemid"}`.
+// If one of these conditions is not met, NewNode panics.
+func (server *TestServer) NewNode(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	systemIdEntry, hasSystemId := attrs["system_id"]
+	if !hasSystemId {
+		panic("The given map json string does not contain a 'system_id' value.")
+	}
+	systemId := systemIdEntry.(string)
+	attrs[resourceURI] = getNodeURL(server.version, systemId)
+	if _, hasStatus := attrs["status"]; !hasStatus {
+		attrs["status"] = NodeStatusDeployed
+	}
+	obj := newJSONMAASObject(attrs, server.client)
+	server.nodes[systemId] = obj
+	return obj
+}
+
+// Nodes returns a map associating all the nodes' system ids with the nodes'
+// objects.
+func (server *TestServer) Nodes() map[string]MAASObject {
+	return server.nodes
+}
+
+// OwnedNodes returns a map whose keys represent the nodes that are currently
+// allocated.
+func (server *TestServer) OwnedNodes() map[string]bool {
+	return server.ownedNodes
+}
+
+// NewFile creates a file in the test MAAS server.
+func (server *TestServer) NewFile(filename string, filecontent []byte) MAASObject {
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getFileURL(server.version, filename)
+	base64Content := base64.StdEncoding.EncodeToString(filecontent)
+	attrs["content"] = base64Content
+	attrs["filename"] = filename
+
+	// Allocate an arbitrary URL here.  It would be nice if the caller
+	// could do this, but that would change the API and require many
+	// changes.
+	escapedName := url.QueryEscape(filename)
+	attrs["anon_resource_uri"] = "/maas/1.0/files/?op=get_by_key&key=" + escapedName + "_key"
+
+	obj := newJSONMAASObject(attrs, server.client)
+	server.files[filename] = obj
+	return obj
+}
+
+func (server *TestServer) Files() map[string]MAASObject {
+	return server.files
+}
+
+// ChangeNode updates a node with the given key/value.
+func (server *TestServer) ChangeNode(systemId, key, value string) {
+	node, found := server.nodes[systemId]
+	if !found {
+		panic("No node with such 'system_id'.")
+	}
+	node.GetMap()[key] = maasify(server.client, value)
+}
+
+// NewIPAddress creates a new static IP address reservation for the
+// given network/subnet and ipAddress. While networks is being deprecated
+// try the given name as both a netowrk and a subnet.
+func (server *TestServer) NewIPAddress(ipAddress, networkOrSubnet string) {
+	_, foundNetwork := server.networks[networkOrSubnet]
+	subnetID, foundSubnet := server.subnetNameToID[networkOrSubnet]
+
+	if (foundNetwork || foundSubnet) == false {
+		panic("No such network or subnet: " + networkOrSubnet)
+	}
+	if foundNetwork {
+		ips, found := server.ipAddressesPerNetwork[networkOrSubnet]
+		if found {
+			ips = append(ips, ipAddress)
+		} else {
+			ips = []string{ipAddress}
+		}
+		server.ipAddressesPerNetwork[networkOrSubnet] = ips
+	} else {
+		subnet := server.subnets[subnetID]
+		netIp := net.ParseIP(ipAddress)
+		if netIp == nil {
+			panic(ipAddress + " is invalid")
+		}
+		ip := IPFromNetIP(netIp)
+		ip.Purpose = []string{"assigned-ip"}
+		subnet.InUseIPAddresses = append(subnet.InUseIPAddresses, ip)
+		server.subnets[subnetID] = subnet
+	}
+}
+
+// RemoveIPAddress removes the given existing ipAddress and returns
+// whether it was actually removed.
+func (server *TestServer) RemoveIPAddress(ipAddress string) bool {
+	for network, ips := range server.ipAddressesPerNetwork {
+		for i, ip := range ips {
+			if ip == ipAddress {
+				ips = append(ips[:i], ips[i+1:]...)
+				server.ipAddressesPerNetwork[network] = ips
+				return true
+			}
+		}
+	}
+	for _, device := range server.devices {
+		for i, addr := range device.IPAddresses {
+			if addr == ipAddress {
+				device.IPAddresses = append(device.IPAddresses[:i], device.IPAddresses[i+1:]...)
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IPAddresses returns the map with network names as keys and slices
+// of IP addresses belonging to each network as values.
+func (server *TestServer) IPAddresses() map[string][]string {
+	return server.ipAddressesPerNetwork
+}
+
+// NewNetwork creates a network in the test MAAS server
+func (server *TestServer) NewNetwork(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	nameEntry, hasName := attrs["name"]
+	_, hasIP := attrs["ip"]
+	_, hasNetmask := attrs["netmask"]
+	if !hasName || !hasIP || !hasNetmask {
+		panic("The given map json string does not contain a 'name', 'ip', or 'netmask' value.")
+	}
+	// TODO(gz): Sanity checking done on other fields
+	name := nameEntry.(string)
+	attrs[resourceURI] = getNetworkURL(server.version, name)
+	obj := newJSONMAASObject(attrs, server.client)
+	server.networks[name] = obj
+	return obj
+}
+
+// NewNodegroupInterface adds a nodegroup-interface, for the specified
+// nodegroup,  in the test MAAS server.
+func (server *TestServer) NewNodegroupInterface(uuid, jsonText string) JSONObject {
+	_, ok := server.bootImages[uuid]
+	if !ok {
+		panic("no nodegroup with the given UUID")
+	}
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	requiredMembers := []string{"ip_range_high", "ip_range_low", "broadcast_ip", "static_ip_range_low", "static_ip_range_high", "name", "ip", "subnet_mask", "management", "interface"}
+	for _, member := range requiredMembers {
+		_, hasMember := attrs[member]
+		if !hasMember {
+			panic(fmt.Sprintf("The given map json string does not contain a required %q", member))
+		}
+	}
+	obj := maasify(server.client, attrs)
+	server.nodegroupsInterfaces[uuid] = append(server.nodegroupsInterfaces[uuid], obj)
+	return obj
+}
+
+func (server *TestServer) ConnectNodeToNetwork(systemId, name string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	_, hasNetwork := server.networks[name]
+	if !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, name)
+}
+
+func (server *TestServer) ConnectNodeToNetworkWithMACAddress(systemId, networkName, macAddress string) {
+	node, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	if _, hasNetwork := server.networks[networkName]; !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, networkName)
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getMACAddressURL(server.version, systemId, macAddress)
+	attrs["mac_address"] = macAddress
+	array := []JSONObject{}
+	if set, ok := node.GetMap()["macaddress_set"]; ok {
+		var err error
+		array, err = set.GetArray()
+		if err != nil {
+			panic(err)
+		}
+	}
+	array = append(array, maasify(server.client, attrs))
+	node.GetMap()["macaddress_set"] = JSONObject{value: array, client: server.client}
+	if _, ok := server.macAddressesPerNetwork[networkName]; !ok {
+		server.macAddressesPerNetwork[networkName] = map[string]JSONObject{}
+	}
+	server.macAddressesPerNetwork[networkName][systemId] = maasify(server.client, attrs)
+}
+
+// AddBootImage adds a boot-image object to the specified nodegroup.
+func (server *TestServer) AddBootImage(nodegroupUUID string, jsonText string) {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	if _, ok := attrs["architecture"]; !ok {
+		panic("The boot-image json string does not contain an 'architecture' value.")
+	}
+	if _, ok := attrs["release"]; !ok {
+		panic("The boot-image json string does not contain a 'release' value.")
+	}
+	obj := maasify(server.client, attrs)
+	server.bootImages[nodegroupUUID] = append(server.bootImages[nodegroupUUID], obj)
+}
+
+// AddZone adds a physical zone to the server.
+func (server *TestServer) AddZone(name, description string) {
+	attrs := map[string]interface{}{
+		"name":        name,
+		"description": description,
+	}
+	obj := maasify(server.client, attrs)
+	server.zones[name] = obj
+}
+
+func (server *TestServer) AddDevice(device *TestDevice) {
+	server.devices[device.SystemId] = device
+}
+
+func (server *TestServer) Devices() map[string]*TestDevice {
+	return server.devices
+}
+
+// NewTestServer starts and returns a new MAAS test server. The caller should call Close when finished, to shut it down.
+func NewTestServer(version string) *TestServer {
+	server := &TestServer{version: version}
+
+	serveMux := http.NewServeMux()
+	devicesURL := getDevicesEndpoint(server.version)
+	// Register handler for '/api/<version>/devices/*'.
+	serveMux.HandleFunc(devicesURL, func(w http.ResponseWriter, r *http.Request) {
+		devicesHandler(server, w, r)
+	})
+	nodesURL := getNodesEndpoint(server.version)
+	// Register handler for '/api/<version>/nodes/*'.
+	serveMux.HandleFunc(nodesURL, func(w http.ResponseWriter, r *http.Request) {
+		nodesHandler(server, w, r)
+	})
+	filesURL := getFilesEndpoint(server.version)
+	// Register handler for '/api/<version>/files/*'.
+	serveMux.HandleFunc(filesURL, func(w http.ResponseWriter, r *http.Request) {
+		filesHandler(server, w, r)
+	})
+	networksURL := getNetworksEndpoint(server.version)
+	// Register handler for '/api/<version>/networks/'.
+	serveMux.HandleFunc(networksURL, func(w http.ResponseWriter, r *http.Request) {
+		networksHandler(server, w, r)
+	})
+	ipAddressesURL := getIPAddressesEndpoint(server.version)
+	// Register handler for '/api/<version>/ipaddresses/'.
+	serveMux.HandleFunc(ipAddressesURL, func(w http.ResponseWriter, r *http.Request) {
+		ipAddressesHandler(server, w, r)
+	})
+	versionURL := getVersionURL(server.version)
+	// Register handler for '/api/<version>/version/'.
+	serveMux.HandleFunc(versionURL, func(w http.ResponseWriter, r *http.Request) {
+		versionHandler(server, w, r)
+	})
+	// Register handler for '/api/<version>/nodegroups/*'.
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	serveMux.HandleFunc(nodegroupsURL, func(w http.ResponseWriter, r *http.Request) {
+		nodegroupsHandler(server, w, r)
+	})
+
+	// Register handler for '/api/<version>/zones/*'.
+	zonesURL := getZonesEndpoint(server.version)
+	serveMux.HandleFunc(zonesURL, func(w http.ResponseWriter, r *http.Request) {
+		zonesHandler(server, w, r)
+	})
+
+	subnetsURL := getSubnetsEndpoint(server.version)
+	serveMux.HandleFunc(subnetsURL, func(w http.ResponseWriter, r *http.Request) {
+		subnetsHandler(server, w, r)
+	})
+
+	spacesURL := getSpacesEndpoint(server.version)
+	serveMux.HandleFunc(spacesURL, func(w http.ResponseWriter, r *http.Request) {
+		spacesHandler(server, w, r)
+	})
+
+	vlansURL := getVLANsEndpoint(server.version)
+	serveMux.HandleFunc(vlansURL, func(w http.ResponseWriter, r *http.Request) {
+		vlansHandler(server, w, r)
+	})
+
+	var mu sync.Mutex
+	singleFile := func(w http.ResponseWriter, req *http.Request) {
+		mu.Lock()
+		defer mu.Unlock()
+		serveMux.ServeHTTP(w, req)
+	}
+
+	newServer := httptest.NewServer(http.HandlerFunc(singleFile))
+	client, err := NewAnonymousClient(newServer.URL, "1.0")
+	checkError(err)
+	server.Server = newServer
+	server.serveMux = serveMux
+	server.client = *client
+	server.Clear()
+	return server
+}
+
+// devicesHandler handles requests for '/api/<version>/devices/*'.
+func devicesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	deviceURLRE := getDeviceURLRE(server.version)
+	deviceURLMatch := deviceURLRE.FindStringSubmatch(r.URL.Path)
+	devicesURL := getDevicesEndpoint(server.version)
+	switch {
+	case r.URL.Path == devicesURL:
+		devicesTopLevelHandler(server, w, r, op)
+	case deviceURLMatch != nil:
+		// Request for a single device.
+		deviceHandler(server, w, r, deviceURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// devicesTopLevelHandler handles a request for /api/<version>/devices/
+// (with no device id following as part of the path).
+func devicesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Device listing operation.
+		deviceListingHandler(server, w, r)
+	case r.Method == "POST" && op == "new":
+		newDeviceHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+func macMatches(mac string, device *TestDevice) bool {
+	return contains(device.MACAddresses, mac)
+}
+
+// deviceListingHandler handles requests for '/devices/'.
+func deviceListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	// TODO(mfoord): support filtering by hostname and id
+	macs, hasMac := values["mac_address"]
+	var matchedDevices []*TestDevice
+	if !hasMac {
+		for _, device := range server.devices {
+			matchedDevices = append(matchedDevices, device)
+		}
+	} else {
+		for _, mac := range macs {
+			for _, device := range server.devices {
+				if macMatches(mac, device) {
+					matchedDevices = append(matchedDevices, device)
+				}
+			}
+		}
+	}
+	deviceChunks := make([]string, len(matchedDevices))
+	for i := range matchedDevices {
+		deviceChunks[i] = renderDevice(matchedDevices[i])
+	}
+	json := fmt.Sprintf("[%v]", strings.Join(deviceChunks, ", "))
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, json)
+}
+
+var templateFuncs = template.FuncMap{
+	"quotedList": func(items []string) string {
+		var pieces []string
+		for _, item := range items {
+			pieces = append(pieces, fmt.Sprintf("%q", item))
+		}
+		return strings.Join(pieces, ", ")
+	},
+	"last": func(items []string) []string {
+		if len(items) == 0 {
+			return []string{}
+		}
+		return items[len(items)-1:]
+	},
+	"allButLast": func(items []string) []string {
+		if len(items) < 2 {
+			return []string{}
+		}
+		return items[0 : len(items)-1]
+	},
+}
+
+const (
+	// The json template for generating new devices.
+	// TODO(mfoord): set resource_uri in MAC addresses
+	deviceTemplate = `{
+	"macaddress_set": [{{range .MACAddresses | allButLast}}
+	    {
+		"mac_address": "{{.}}"
+	    },{{end}}{{range .MACAddresses | last}}
+	    {
+		"mac_address": "{{.}}"
+	    }{{end}}
+	],
+	"zone": {
+	    "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/",
+	    "name": "default",
+	    "description": ""
+	},
+	"parent": "{{.Parent}}",
+	"ip_addresses": [{{.IPAddresses | quotedList }}],
+	"hostname": "{{.Hostname}}",
+	"tag_names": [],
+	"owner": "maas-admin",
+	"system_id": "{{.SystemId}}",
+	"resource_uri": "/MAAS/api/{{.APIVersion}}/devices/{{.SystemId}}/"
+}`
+)
+
+func renderDevice(device *TestDevice) string {
+	t := template.New("Device template")
+	t = t.Funcs(templateFuncs)
+	t, err := t.Parse(deviceTemplate)
+	checkError(err)
+	var buf bytes.Buffer
+	err = t.Execute(&buf, device)
+	checkError(err)
+	return buf.String()
+}
+
+func getValue(values url.Values, value string) (string, bool) {
+	result, hasResult := values[value]
+	if !hasResult || len(result) != 1 || result[0] == "" {
+		return "", false
+	}
+	return result[0], true
+}
+
+func getValues(values url.Values, key string) ([]string, bool) {
+	result, hasResult := values[key]
+	if !hasResult {
+		return nil, false
+	}
+	var output []string
+	for _, val := range result {
+		if val != "" {
+			output = append(output, val)
+		}
+	}
+	if len(output) == 0 {
+		return nil, false
+	}
+	return output, true
+}
+
+// newDeviceHandler creates, stores and returns new devices.
+func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.PostForm
+
+	// TODO(mfood): generate a "proper" uuid for the system Id.
+	uuid, err := generateNonce()
+	checkError(err)
+	systemId := fmt.Sprintf("node-%v", uuid)
+	// At least one MAC address must be specified.
+	// TODO(mfoord) we only support a single MAC in the test server.
+	macs, hasMacs := getValues(values, "mac_addresses")
+
+	// hostname and parent are optional.
+	// TODO(mfoord): we require both to be set in the test server.
+	hostname, hasHostname := getValue(values, "hostname")
+	parent, hasParent := getValue(values, "parent")
+	if !hasHostname || !hasMacs || !hasParent {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	device := &TestDevice{
+		MACAddresses: macs,
+		APIVersion:   server.version,
+		Parent:       parent,
+		Hostname:     hostname,
+		SystemId:     systemId,
+	}
+
+	deviceJSON := renderDevice(device)
+	server.devices[systemId] = device
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, deviceJSON)
+	return
+}
+
+// deviceHandler handles requests for '/api/<version>/devices/<system_id>/'.
+func deviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	device, ok := server.devices[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if r.Method == "GET" {
+		deviceJSON := renderDevice(device)
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		if operation == "claim_sticky_ip_address" {
+			err := r.ParseForm()
+			checkError(err)
+			values := r.PostForm
+			// TODO(mfoord): support optional mac_address parameter
+			// TODO(mfoord): requested_address should be optional
+			// and we should generate one if it isn't provided.
+			address, hasAddress := getValue(values, "requested_address")
+			if !hasAddress {
+				w.WriteHeader(http.StatusBadRequest)
+				return
+			}
+			checkError(err)
+			device.IPAddresses = append(device.IPAddresses, address)
+			deviceJSON := renderDevice(device)
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	} else if r.Method == "DELETE" {
+		delete(server.devices, systemId)
+		w.WriteHeader(http.StatusNoContent)
+		return
+
+	}
+
+	// TODO(mfoord): support PUT method for updating device
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// nodesHandler handles requests for '/api/<version>/nodes/*'.
+func nodesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	nodeURLRE := getNodeURLRE(server.version)
+	nodeURLMatch := nodeURLRE.FindStringSubmatch(r.URL.Path)
+	nodesURL := getNodesEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodesURL:
+		nodesTopLevelHandler(server, w, r, op)
+	case nodeURLMatch != nil:
+		// Request for a single node.
+		nodeHandler(server, w, r, nodeURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodeHandler handles requests for '/api/<version>/nodes/<system_id>/'.
+func nodeHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	node, ok := server.nodes[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	UUID, UUIDError := node.values["system_id"].GetString()
+	if UUIDError == nil {
+		i, err := JSONObjectFromStruct(server.client, server.nodeMetadata[UUID].Interfaces)
+		checkError(err)
+		node.values["interface_set"] = i
+	}
+
+	if r.Method == "GET" {
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		} else if operation == "details" {
+			nodeDetailsHandler(server, w, r, systemId)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		// The only operations supported are "start", "stop" and "release".
+		if operation == "start" || operation == "stop" || operation == "release" {
+			// Record operation on node.
+			server.addNodeOperation(systemId, operation, r)
+
+			if operation == "release" {
+				delete(server.OwnedNodes(), systemId)
+			}
+
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		}
+
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	if r.Method == "DELETE" {
+		delete(server.nodes, systemId)
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func contains(slice []string, val string) bool {
+	for _, item := range slice {
+		if item == val {
+			return true
+		}
+	}
+	return false
+}
+
+// nodeListingHandler handles requests for '/nodes/'.
+func nodeListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	ids, hasId := values["id"]
+	var convertedNodes = []map[string]JSONObject{}
+	for systemId, node := range server.nodes {
+		if !hasId || contains(ids, systemId) {
+			convertedNodes = append(convertedNodes, node.GetMap())
+		}
+	}
+	res, err := json.MarshalIndent(convertedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodeDeploymentStatusHandler handles requests for '/nodes/?op=deployment_status'.
+func nodeDeploymentStatusHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	nodes, _ := values["nodes"]
+	var nodeStatus = make(map[string]interface{})
+	for _, systemId := range nodes {
+		node := server.nodes[systemId]
+		field, err := node.GetField("status")
+		if err != nil {
+			continue
+		}
+		switch field {
+		case NodeStatusDeployed:
+			nodeStatus[systemId] = "Deployed"
+		case NodeStatusFailedDeployment:
+			nodeStatus[systemId] = "Failed deployment"
+		default:
+			nodeStatus[systemId] = "Not in Deployment"
+		}
+	}
+	obj := maasify(server.client, nodeStatus)
+	res, err := json.MarshalIndent(obj, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// findFreeNode looks for a node that is currently available, and
+// matches the specified filter.
+func findFreeNode(server *TestServer, filter url.Values) *MAASObject {
+	for systemID, node := range server.Nodes() {
+		_, present := server.OwnedNodes()[systemID]
+		if !present {
+			var agentName, nodeName, zoneName, mem, cpuCores, arch string
+			for k := range filter {
+				switch k {
+				case "agent_name":
+					agentName = filter.Get(k)
+				case "name":
+					nodeName = filter.Get(k)
+				case "zone":
+					zoneName = filter.Get(k)
+				case "mem":
+					mem = filter.Get(k)
+				case "arch":
+					arch = filter.Get(k)
+				case "cpu-cores":
+					cpuCores = filter.Get(k)
+				}
+			}
+			if nodeName != "" && !matchField(node, "hostname", nodeName) {
+				continue
+			}
+			if zoneName != "" && !matchField(node, "zone", zoneName) {
+				continue
+			}
+			if mem != "" && !matchNumericField(node, "memory", mem) {
+				continue
+			}
+			if arch != "" && !matchArchitecture(node, "architecture", arch) {
+				continue
+			}
+			if cpuCores != "" && !matchNumericField(node, "cpu_count", cpuCores) {
+				continue
+			}
+			if agentName != "" {
+				agentNameObj := maasify(server.client, agentName)
+				node.GetMap()["agent_name"] = agentNameObj
+			} else {
+				delete(node.GetMap(), "agent_name")
+			}
+			return &node
+		}
+	}
+	return nil
+}
+
+func matchArchitecture(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	baseArch := strings.Split(field, "/")
+	return v == baseArch[0]
+}
+
+func matchNumericField(node MAASObject, k, v string) bool {
+	field, ok := node.GetMap()[k]
+	if !ok {
+		return false
+	}
+	nodeVal, err := field.GetFloat64()
+	if err != nil {
+		return false
+	}
+	constraintVal, err := strconv.ParseFloat(v, 64)
+	if err != nil {
+		return false
+	}
+	return constraintVal <= nodeVal
+}
+
+func matchField(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	return field == v
+}
+
+// nodesAcquireHandler simulates acquiring a node.
+func nodesAcquireHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	requestValues := server.addNodesOperation("acquire", r)
+	node := findFreeNode(server, requestValues)
+	if node == nil {
+		w.WriteHeader(http.StatusConflict)
+	} else {
+		systemId, err := node.GetField("system_id")
+		checkError(err)
+		server.OwnedNodes()[systemId] = true
+		res, err := json.MarshalIndent(node, "", "  ")
+		checkError(err)
+		// Record operation.
+		server.addNodeOperation(systemId, "acquire", r)
+		w.WriteHeader(http.StatusOK)
+		fmt.Fprint(w, string(res))
+	}
+}
+
+// nodesReleaseHandler simulates releasing multiple nodes.
+func nodesReleaseHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	server.addNodesOperation("release", r)
+	values := server.NodesOperationRequestValues()
+	systemIds := values[len(values)-1]["nodes"]
+	var unknown []string
+	for _, systemId := range systemIds {
+		if _, ok := server.Nodes()[systemId]; !ok {
+			unknown = append(unknown, systemId)
+		}
+	}
+	if len(unknown) > 0 {
+		w.WriteHeader(http.StatusBadRequest)
+		fmt.Fprintf(w, "Unknown node(s): %s.", strings.Join(unknown, ", "))
+		return
+	}
+	var releasedNodes = []map[string]JSONObject{}
+	for _, systemId := range systemIds {
+		if _, ok := server.OwnedNodes()[systemId]; !ok {
+			continue
+		}
+		delete(server.OwnedNodes(), systemId)
+		node := server.Nodes()[systemId]
+		releasedNodes = append(releasedNodes, node.GetMap())
+	}
+	res, err := json.MarshalIndent(releasedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodesTopLevelHandler handles a request for /api/<version>/nodes/
+// (with no node id following as part of the path).
+func nodesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Node listing operation.
+		nodeListingHandler(server, w, r)
+	case r.Method == "GET" && op == "deployment_status":
+		// Node deployment_status operation.
+		nodeDeploymentStatusHandler(server, w, r)
+	case r.Method == "POST" && op == "acquire":
+		nodesAcquireHandler(server, w, r)
+	case r.Method == "POST" && op == "release":
+		nodesReleaseHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// AddNodeDetails stores node details, expected in XML format.
+func (server *TestServer) AddNodeDetails(systemId, xmlText string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	server.nodeDetails[systemId] = xmlText
+}
+
+const lldpXML = `
+<?xml version="1.0" encoding="UTF-8"?>
+<lldp label="LLDP neighbors"/>`
+
+// nodeDetailesHandler handles requests for '/api/<version>/nodes/<system_id>/?op=details'.
+func nodeDetailsHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string) {
+	attrs := make(map[string]interface{})
+	attrs["lldp"] = lldpXML
+	xmlText, _ := server.nodeDetails[systemId]
+	attrs["lshw"] = []byte(xmlText)
+	res, err := bson.Marshal(attrs)
+	checkError(err)
+	w.Header().Set("Content-Type", "application/bson")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// filesHandler handles requests for '/api/<version>/files/*'.
+func filesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	fileURLRE := getFileURLRE(server.version)
+	fileURLMatch := fileURLRE.FindStringSubmatch(r.URL.Path)
+	fileListingURL := getFilesEndpoint(server.version)
+	switch {
+	case r.Method == "GET" && op == "list" && r.URL.Path == fileListingURL:
+		// File listing operation.
+		fileListingHandler(server, w, r)
+	case op == "get" && r.Method == "GET" && r.URL.Path == fileListingURL:
+		getFileHandler(server, w, r)
+	case op == "add" && r.Method == "POST" && r.URL.Path == fileListingURL:
+		addFileHandler(server, w, r)
+	case fileURLMatch != nil:
+		// Request for a single file.
+		fileHandler(server, w, r, fileURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+
+}
+
+// listFilenames returns the names of those uploaded files whose names start
+// with the given prefix, sorted lexicographically.
+func listFilenames(server *TestServer, prefix string) []string {
+	var filenames = make([]string, 0)
+	for filename := range server.files {
+		if strings.HasPrefix(filename, prefix) {
+			filenames = append(filenames, filename)
+		}
+	}
+	sort.Strings(filenames)
+	return filenames
+}
+
+// stripFileContent copies a map of attributes representing an uploaded file,
+// but with the "content" attribute removed.
+func stripContent(original map[string]JSONObject) map[string]JSONObject {
+	newMap := make(map[string]JSONObject, len(original)-1)
+	for key, value := range original {
+		if key != "content" {
+			newMap[key] = value
+		}
+	}
+	return newMap
+}
+
+// fileListingHandler handles requests for '/api/<version>/files/?op=list'.
+func fileListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	prefix := values.Get("prefix")
+	filenames := listFilenames(server, prefix)
+
+	// Build a sorted list of the files as map[string]JSONObject objects.
+	convertedFiles := make([]map[string]JSONObject, 0)
+	for _, filename := range filenames {
+		// The "content" attribute is not in the listing.
+		fileMap := stripContent(server.files[filename].GetMap())
+		convertedFiles = append(convertedFiles, fileMap)
+	}
+	res, err := json.MarshalIndent(convertedFiles, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// fileHandler handles requests for '/api/<version>/files/<filename>/'.
+func fileHandler(server *TestServer, w http.ResponseWriter, r *http.Request, filename string, operation string) {
+	switch {
+	case r.Method == "DELETE":
+		delete(server.files, filename)
+		w.WriteHeader(http.StatusOK)
+	case r.Method == "GET":
+		// Retrieve a file's information (including content) as a JSON
+		// object.
+		file, ok := server.files[filename]
+		if !ok {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+		jsonText, err := json.MarshalIndent(file, "", "  ")
+		if err != nil {
+			panic(err)
+		}
+		w.WriteHeader(http.StatusOK)
+		w.Write(jsonText)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// InternalError replies to the request with an HTTP 500 internal error.
+func InternalError(w http.ResponseWriter, r *http.Request, err error) {
+	http.Error(w, err.Error(), http.StatusInternalServerError)
+}
+
+// getFileHandler handles requests for
+// '/api/<version>/files/?op=get&filename=filename'.
+func getFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	filename := values.Get("filename")
+	file, found := server.files[filename]
+	if !found {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	base64Content, err := file.GetField("content")
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	content, err := base64.StdEncoding.DecodeString(base64Content)
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	w.Write(content)
+}
+
+func readMultipart(upload *multipart.FileHeader) ([]byte, error) {
+	file, err := upload.Open()
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	reader := bufio.NewReader(file)
+	return ioutil.ReadAll(reader)
+}
+
+// filesHandler handles requests for '/api/<version>/files/?op=add&filename=filename'.
+func addFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseMultipartForm(10000000)
+	checkError(err)
+
+	filename := r.Form.Get("filename")
+	if filename == "" {
+		panic("upload has no filename")
+	}
+
+	uploads := r.MultipartForm.File
+	if len(uploads) != 1 {
+		panic("the payload should contain one file and one file only")
+	}
+	var upload *multipart.FileHeader
+	for _, uploadContent := range uploads {
+		upload = uploadContent[0]
+	}
+	content, err := readMultipart(upload)
+	checkError(err)
+	server.NewFile(filename, content)
+	w.WriteHeader(http.StatusOK)
+}
+
+// networkListConnectedMACSHandler handles requests for '/api/<version>/networks/<network>/?op=list_connected_macs'
+func networkListConnectedMACSHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	networkURLRE := getNetworkURLRE(server.version)
+	networkURLREMatch := networkURLRE.FindStringSubmatch(r.URL.Path)
+	if networkURLREMatch == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	networkName := networkURLREMatch[1]
+	convertedMacAddresses := []map[string]JSONObject{}
+	if macAddresses, ok := server.macAddressesPerNetwork[networkName]; ok {
+		for _, macAddress := range macAddresses {
+			m, err := macAddress.GetMap()
+			checkError(err)
+			convertedMacAddresses = append(convertedMacAddresses, m)
+		}
+	}
+	res, err := json.MarshalIndent(convertedMacAddresses, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// networksHandler handles requests for '/api/<version>/networks/?node=system_id'.
+func networksHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only networks GET operation implemented")
+	}
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	systemId := values.Get("node")
+	if op == "list_connected_macs" {
+		networkListConnectedMACSHandler(server, w, r)
+		return
+	}
+	if op != "" {
+		panic("only list_connected_macs and default operations implemented")
+	}
+	if systemId == "" {
+		panic("network missing associated node system id")
+	}
+	networks := []MAASObject{}
+	if networkNames, hasNetworks := server.networksPerNode[systemId]; hasNetworks {
+		networks = make([]MAASObject, len(networkNames))
+		for i, networkName := range networkNames {
+			networks[i] = server.networks[networkName]
+		}
+	}
+	res, err := json.MarshalIndent(networks, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// ipAddressesHandler handles requests for '/api/<version>/ipaddresses/'.
+func ipAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.Form
+	op := values.Get("op")
+
+	switch r.Method {
+	case "GET":
+		if op != "" {
+			panic("expected empty op for GET, got " + op)
+		}
+		listIPAddressesHandler(server, w, r)
+		return
+	case "POST":
+		switch op {
+		case "reserve":
+			reserveIPAddressHandler(server, w, r, values.Get("network"), values.Get("requested_address"))
+			return
+		case "release":
+			releaseIPAddressHandler(server, w, r, values.Get("ip"))
+			return
+		default:
+			panic("expected op=release|reserve for POST, got " + op)
+		}
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func marshalIPAddress(server *TestServer, ipAddress string) (JSONObject, error) {
+	jsonTemplate := `{"alloc_type": 4, "ip": %q, "resource_uri": %q, "created": %q}`
+	uri := getIPAddressesEndpoint(server.version)
+	now := time.Now().UTC().Format(time.RFC3339)
+	bytes := []byte(fmt.Sprintf(jsonTemplate, ipAddress, uri, now))
+	return Parse(server.client, bytes)
+}
+
+func badRequestError(w http.ResponseWriter, err error) {
+	w.WriteHeader(http.StatusBadRequest)
+	fmt.Fprint(w, err.Error())
+}
+
+func listIPAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	results := []MAASObject{}
+	for _, ips := range server.IPAddresses() {
+		for _, ip := range ips {
+			jsonObj, err := marshalIPAddress(server, ip)
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			maasObj, err := jsonObj.GetMAASObject()
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			results = append(results, maasObj)
+		}
+	}
+	res, err := json.MarshalIndent(results, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func reserveIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, network, reqAddress string) {
+	_, ipNet, err := net.ParseCIDR(network)
+	if err != nil {
+		badRequestError(w, fmt.Errorf("Invalid network parameter %s", network))
+		return
+	}
+	if reqAddress != "" {
+		// Validate "requested_address" parameter.
+		reqIP := net.ParseIP(reqAddress)
+		if reqIP == nil {
+			badRequestError(w, fmt.Errorf("failed to detect a valid IP address from u'%s'", reqAddress))
+			return
+		}
+		if !ipNet.Contains(reqIP) {
+			badRequestError(w, fmt.Errorf("%s is not inside the range %s", reqAddress, ipNet.String()))
+			return
+		}
+	}
+	// Find the network name matching the parsed CIDR.
+	foundNetworkName := ""
+	for netName, netObj := range server.networks {
+		// Get the "ip" and "netmask" attributes of the network.
+		netIP, err := netObj.GetField("ip")
+		checkError(err)
+		netMask, err := netObj.GetField("netmask")
+		checkError(err)
+
+		// Convert the netmask string to net.IPMask.
+		parts := strings.Split(netMask, ".")
+		ipMask := make(net.IPMask, len(parts))
+		for i, part := range parts {
+			intPart, err := strconv.Atoi(part)
+			checkError(err)
+			ipMask[i] = byte(intPart)
+		}
+		netNet := &net.IPNet{IP: net.ParseIP(netIP), Mask: ipMask}
+		if netNet.String() == network {
+			// Exact match found.
+			foundNetworkName = netName
+			break
+		}
+	}
+	if foundNetworkName == "" {
+		badRequestError(w, fmt.Errorf("No network found matching %s", network))
+		return
+	}
+	ips, found := server.ipAddressesPerNetwork[foundNetworkName]
+	if !found {
+		// This will be the first address.
+		ips = []string{}
+	}
+	reservedIP := ""
+	if reqAddress != "" {
+		// Use what the user provided. NOTE: Because this is testing
+		// code, no duplicates check is done.
+		reservedIP = reqAddress
+	} else {
+		// Generate an IP in the network range by incrementing the
+		// last byte of the network's IP.
+		firstIP := ipNet.IP
+		firstIP[len(firstIP)-1] += byte(len(ips) + 1)
+		reservedIP = firstIP.String()
+	}
+	ips = append(ips, reservedIP)
+	server.ipAddressesPerNetwork[foundNetworkName] = ips
+	jsonObj, err := marshalIPAddress(server, reservedIP)
+	checkError(err)
+	maasObj, err := jsonObj.GetMAASObject()
+	checkError(err)
+	res, err := json.MarshalIndent(maasObj, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func releaseIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, ip string) {
+	if netIP := net.ParseIP(ip); netIP == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if server.RemoveIPAddress(ip) {
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// versionHandler handles requests for '/api/<version>/version/'.
+func versionHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only version GET operation implemented")
+	}
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, server.versionJSON)
+}
+
+// nodegroupsHandler handles requests for '/api/<version>/nodegroups/*'.
+func nodegroupsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	bootimagesURLRE := getBootimagesURLRE(server.version)
+	bootimagesURLMatch := bootimagesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsInterfacesURLRE := getNodegroupsInterfacesURLRE(server.version)
+	nodegroupsInterfacesURLMatch := nodegroupsInterfacesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodegroupsURL:
+		nodegroupsTopLevelHandler(server, w, r, op)
+	case bootimagesURLMatch != nil:
+		bootimagesHandler(server, w, r, bootimagesURLMatch[1], op)
+	case nodegroupsInterfacesURLMatch != nil:
+		nodegroupsInterfacesHandler(server, w, r, nodegroupsInterfacesURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodegroupsTopLevelHandler handles requests for '/api/<version>/nodegroups/'.
+func nodegroupsTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	if r.Method != "GET" || op != "list" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	nodegroups := []JSONObject{}
+	for uuid := range server.bootImages {
+		attrs := map[string]interface{}{
+			"uuid":      uuid,
+			resourceURI: getNodegroupURL(server.version, uuid),
+		}
+		obj := maasify(server.client, attrs)
+		nodegroups = append(nodegroups, obj)
+	}
+
+	res, err := json.MarshalIndent(nodegroups, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// bootimagesHandler handles requests for '/api/<version>/nodegroups/<uuid>/boot-images/'.
+func bootimagesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	bootImages, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	res, err := json.MarshalIndent(bootImages, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodegroupsInterfacesHandler handles requests for '/api/<version>/nodegroups/<uuid>/interfaces/'
+func nodegroupsInterfacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	_, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	interfaces, ok := server.nodegroupsInterfaces[nodegroupUUID]
+	if !ok {
+		// we already checked the nodegroup exists, so return an empty list
+		interfaces = []JSONObject{}
+	}
+	res, err := json.MarshalIndent(interfaces, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// zonesHandler handles requests for '/api/<version>/zones/'.
+func zonesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	if len(server.zones) == 0 {
+		// Until a zone is registered, behave as if the endpoint
+		// does not exist. This way we can simulate older MAAS
+		// servers that do not support zones.
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	zones := make([]JSONObject, 0, len(server.zones))
+	for _, zone := range server.zones {
+		zones = append(zones, zone)
+	}
+	res, err := json.MarshalIndent(zones, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testservice_spaces.go b/switchq/vendor/github.com/juju/gomaasapi/testservice_spaces.go
new file mode 100644
index 0000000..c6c1617
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testservice_spaces.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+)
+
+func getSpacesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/spaces/", version)
+}
+
+// TestSpace is the MAAS API space representation
+type TestSpace struct {
+	Name        string       `json:"name"`
+	Subnets     []TestSubnet `json:"subnets"`
+	ResourceURI string       `json:"resource_uri"`
+	ID          uint         `json:"id"`
+}
+
+// spacesHandler handles requests for '/api/<version>/spaces/'.
+func spacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	if op != "" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	spacesURLRE := regexp.MustCompile(`/spaces/(.+?)/`)
+	spacesURLMatch := spacesURLRE.FindStringSubmatch(r.URL.Path)
+	spacesURL := getSpacesEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if spacesURLMatch != nil {
+		ID, err = NameOrIDToID(spacesURLMatch[1], server.spaceNameToID, 1, uint(len(server.spaces)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.spaces) == 0 {
+			// Until a space is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support spaces.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == spacesURL {
+			var spaces []*TestSpace
+			// Iterating by id rather than a dictionary iteration
+			// preserves the order of the spaces in the result.
+			for i := uint(1); i < server.nextSpace; i++ {
+				s, ok := server.spaces[i]
+				if ok {
+					server.setSubnetsOnSpace(s)
+					spaces = append(spaces, s)
+				}
+			}
+			err = json.NewEncoder(w).Encode(spaces)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			err = json.NewEncoder(w).Encode(server.spaces[ID])
+		}
+		checkError(err)
+	case "POST":
+		//server.NewSpace(r.Body)
+	case "PUT":
+		//server.UpdateSpace(r.Body)
+	case "DELETE":
+		delete(server.spaces, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// CreateSpace is used to create new spaces on the server.
+type CreateSpace struct {
+	Name string `json:"name"`
+}
+
+func decodePostedSpace(spaceJSON io.Reader) CreateSpace {
+	var postedSpace CreateSpace
+	decoder := json.NewDecoder(spaceJSON)
+	err := decoder.Decode(&postedSpace)
+	checkError(err)
+	return postedSpace
+}
+
+// NewSpace creates a space in the test server
+func (server *TestServer) NewSpace(spaceJSON io.Reader) *TestSpace {
+	postedSpace := decodePostedSpace(spaceJSON)
+	newSpace := &TestSpace{Name: postedSpace.Name}
+	newSpace.ID = server.nextSpace
+	newSpace.ResourceURI = fmt.Sprintf("/api/%s/spaces/%d/", server.version, int(server.nextSpace))
+	server.spaces[server.nextSpace] = newSpace
+	server.spaceNameToID[newSpace.Name] = newSpace.ID
+
+	server.nextSpace++
+	return newSpace
+}
+
+// setSubnetsOnSpace fetches the subnets for the specified space and adds them
+// to it.
+func (server *TestServer) setSubnetsOnSpace(space *TestSpace) {
+	subnets := []TestSubnet{}
+	for i := uint(1); i < server.nextSubnet; i++ {
+		subnet, ok := server.subnets[i]
+		if ok && subnet.Space == space.Name {
+			subnets = append(subnets, subnet)
+		}
+	}
+	space.Subnets = subnets
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testservice_subnets.go b/switchq/vendor/github.com/juju/gomaasapi/testservice_subnets.go
new file mode 100644
index 0000000..5438669
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testservice_subnets.go
@@ -0,0 +1,396 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+func getSubnetsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/subnets/", version)
+}
+
+// CreateSubnet is used to receive new subnets via the MAAS API
+type CreateSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	// VLAN this subnet belongs to. Currently ignored.
+	// TODO: Defaults to the default VLAN
+	// for the provided fabric or defaults to the default VLAN
+	// in the default fabric.
+	VLAN *uint `json:"vlan"`
+
+	// Fabric for the subnet. Currently ignored.
+	// TODO: Defaults to the fabric the provided
+	// VLAN belongs to or defaults to the default fabric.
+	Fabric *uint `json:"fabric"`
+
+	// VID of the VLAN this subnet belongs to. Currently ignored.
+	// TODO: Only used when vlan
+	// is not provided. Picks the VLAN with this VID in the provided
+	// fabric or the default fabric if one is not given.
+	VID *uint `json:"vid"`
+
+	// This is used for updates (PUT) and is ignored by create (POST)
+	ID uint `json:"id"`
+}
+
+// TestSubnet is the MAAS API subnet representation
+type TestSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	VLAN       TestVLAN `json:"vlan"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	ResourceURI        string         `json:"resource_uri"`
+	ID                 uint           `json:"id"`
+	InUseIPAddresses   []IP           `json:"-"`
+	FixedAddressRanges []AddressRange `json:"-"`
+}
+
+// AddFixedAddressRange adds an AddressRange to the list of fixed address ranges
+// that subnet stores.
+func (server *TestServer) AddFixedAddressRange(subnetID uint, ar AddressRange) {
+	subnet := server.subnets[subnetID]
+	ar.startUint = IPFromString(ar.Start).UInt64()
+	ar.endUint = IPFromString(ar.End).UInt64()
+	subnet.FixedAddressRanges = append(subnet.FixedAddressRanges, ar)
+	server.subnets[subnetID] = subnet
+}
+
+// subnetsHandler handles requests for '/api/<version>/subnets/'.
+func subnetsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	var err error
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	includeRangesString := strings.ToLower(values.Get("include_ranges"))
+	subnetsURLRE := regexp.MustCompile(`/subnets/(.+?)/`)
+	subnetsURLMatch := subnetsURLRE.FindStringSubmatch(r.URL.Path)
+	subnetsURL := getSubnetsEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if subnetsURLMatch != nil {
+		ID, err = NameOrIDToID(subnetsURLMatch[1], server.subnetNameToID, 1, uint(len(server.subnets)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	var includeRanges bool
+	switch includeRangesString {
+	case "true", "yes", "1":
+		includeRanges = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.subnets) == 0 {
+			// Until a subnet is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support subnets.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == subnetsURL {
+			var subnets []TestSubnet
+			for i := uint(1); i < server.nextSubnet; i++ {
+				s, ok := server.subnets[i]
+				if ok {
+					subnets = append(subnets, s)
+				}
+			}
+			PrettyJsonWriter(subnets, w)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			switch op {
+			case "unreserved_ip_ranges":
+				PrettyJsonWriter(server.subnetUnreservedIPRanges(server.subnets[ID]), w)
+			case "reserved_ip_ranges":
+				PrettyJsonWriter(server.subnetReservedIPRanges(server.subnets[ID]), w)
+			case "statistics":
+				PrettyJsonWriter(server.subnetStatistics(server.subnets[ID], includeRanges), w)
+			default:
+				PrettyJsonWriter(server.subnets[ID], w)
+			}
+		}
+		checkError(err)
+	case "POST":
+		server.NewSubnet(r.Body)
+	case "PUT":
+		server.UpdateSubnet(r.Body)
+	case "DELETE":
+		delete(server.subnets, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+type addressList []IP
+
+func (a addressList) Len() int           { return len(a) }
+func (a addressList) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a addressList) Less(i, j int) bool { return a[i].UInt64() < a[j].UInt64() }
+
+// AddressRange is used to generate reserved IP address range lists
+type AddressRange struct {
+	Start        string `json:"start"`
+	startUint    uint64
+	End          string `json:"end"`
+	endUint      uint64
+	Purpose      []string `json:"purpose,omitempty"`
+	NumAddresses uint     `json:"num_addresses"`
+}
+
+// AddressRangeList is a list of AddressRange
+type AddressRangeList struct {
+	ar []AddressRange
+}
+
+// Append appends a new AddressRange to an AddressRangeList
+func (ranges *AddressRangeList) Append(startIP, endIP IP) {
+	var i AddressRange
+	i.Start, i.End = startIP.String(), endIP.String()
+	i.startUint, i.endUint = startIP.UInt64(), endIP.UInt64()
+	i.NumAddresses = uint(1 + endIP.UInt64() - startIP.UInt64())
+	i.Purpose = startIP.Purpose
+	ranges.ar = append(ranges.ar, i)
+}
+
+func appendRangesToIPList(subnet TestSubnet, ipAddresses *[]IP) {
+	for _, r := range subnet.FixedAddressRanges {
+		for v := r.startUint; v <= r.endUint; v++ {
+			ip := IPFromInt64(v)
+			ip.Purpose = r.Purpose
+			*ipAddresses = append(*ipAddresses, ip)
+		}
+	}
+}
+
+func (server *TestServer) subnetUnreservedIPRanges(subnet TestSubnet) []AddressRange {
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+
+	// We need the first and last address in the subnet
+	var ranges AddressRangeList
+	var startIP, endIP, lastUsableIP IP
+
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+	startIP = IPFromNetIP(ipNet.IP)
+	// Start with the lowest usable address in the range, which is 1 above
+	// what net.ParseCIDR will give back.
+	startIP.SetUInt64(startIP.UInt64() + 1)
+
+	ones, bits := ipNet.Mask.Size()
+	set := ^((^uint64(0)) << uint(bits-ones))
+
+	// The last usable address is one below the broadcast address, which is
+	// what you get by bitwise ORing 'set' with any IP address in the subnet.
+	lastUsableIP.SetUInt64((startIP.UInt64() | set) - 1)
+
+	for _, endIP = range ipAddresses {
+		end := endIP.UInt64()
+
+		if endIP.UInt64() == startIP.UInt64() {
+			if endIP.UInt64() != lastUsableIP.UInt64() {
+				startIP.SetUInt64(end + 1)
+			}
+			continue
+		}
+
+		if end == lastUsableIP.UInt64() {
+			continue
+		}
+
+		ranges.Append(startIP, IPFromInt64(end-1))
+		startIP.SetUInt64(end + 1)
+	}
+
+	if startIP.UInt64() != lastUsableIP.UInt64() {
+		ranges.Append(startIP, lastUsableIP)
+	}
+
+	return ranges.ar
+}
+
+func (server *TestServer) subnetReservedIPRanges(subnet TestSubnet) []AddressRange {
+	var ranges AddressRangeList
+	var startIP, thisIP IP
+
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+	if len(ipAddresses) == 0 {
+		ar := ranges.ar
+		if ar == nil {
+			ar = []AddressRange{}
+		}
+		return ar
+	}
+
+	startIP = ipAddresses[0]
+	lastIP := ipAddresses[0]
+	for _, thisIP = range ipAddresses {
+		var purposeMissmatch bool
+		for i, p := range thisIP.Purpose {
+			if startIP.Purpose[i] != p {
+				purposeMissmatch = true
+			}
+		}
+		if (thisIP.UInt64() != lastIP.UInt64() && thisIP.UInt64() != lastIP.UInt64()+1) || purposeMissmatch {
+			ranges.Append(startIP, lastIP)
+			startIP = thisIP
+		}
+		lastIP = thisIP
+	}
+
+	if len(ranges.ar) == 0 || ranges.ar[len(ranges.ar)-1].endUint != lastIP.UInt64() {
+		ranges.Append(startIP, lastIP)
+	}
+
+	return ranges.ar
+}
+
+// SubnetStats holds statistics about a subnet
+type SubnetStats struct {
+	NumAvailable     uint           `json:"num_available"`
+	LargestAvailable uint           `json:"largest_available"`
+	NumUnavailable   uint           `json:"num_unavailable"`
+	TotalAddresses   uint           `json:"total_addresses"`
+	Usage            float32        `json:"usage"`
+	UsageString      string         `json:"usage_string"`
+	Ranges           []AddressRange `json:"ranges"`
+}
+
+func (server *TestServer) subnetStatistics(subnet TestSubnet, includeRanges bool) SubnetStats {
+	var stats SubnetStats
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+
+	ones, bits := ipNet.Mask.Size()
+	stats.TotalAddresses = (1 << uint(bits-ones)) - 2
+	stats.NumUnavailable = uint(len(subnet.InUseIPAddresses))
+	stats.NumAvailable = stats.TotalAddresses - stats.NumUnavailable
+	stats.Usage = float32(stats.NumUnavailable) / float32(stats.TotalAddresses)
+	stats.UsageString = fmt.Sprintf("%0.1f%%", stats.Usage*100)
+
+	// Calculate stats.LargestAvailable - the largest contiguous block of IP addresses available
+	reserved := server.subnetUnreservedIPRanges(subnet)
+	for _, addressRange := range reserved {
+		if addressRange.NumAddresses > stats.LargestAvailable {
+			stats.LargestAvailable = addressRange.NumAddresses
+		}
+	}
+
+	if includeRanges {
+		stats.Ranges = reserved
+	}
+
+	return stats
+}
+
+func decodePostedSubnet(subnetJSON io.Reader) CreateSubnet {
+	var postedSubnet CreateSubnet
+	decoder := json.NewDecoder(subnetJSON)
+	err := decoder.Decode(&postedSubnet)
+	checkError(err)
+	if postedSubnet.DNSServers == nil {
+		postedSubnet.DNSServers = []string{}
+	}
+	return postedSubnet
+}
+
+// UpdateSubnet creates a subnet in the test server
+func (server *TestServer) UpdateSubnet(subnetJSON io.Reader) TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	updatedSubnet := subnetFromCreateSubnet(postedSubnet)
+	server.subnets[updatedSubnet.ID] = updatedSubnet
+	return updatedSubnet
+}
+
+// NewSubnet creates a subnet in the test server
+func (server *TestServer) NewSubnet(subnetJSON io.Reader) *TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	newSubnet := subnetFromCreateSubnet(postedSubnet)
+	newSubnet.ID = server.nextSubnet
+	server.subnets[server.nextSubnet] = newSubnet
+	server.subnetNameToID[newSubnet.Name] = newSubnet.ID
+
+	server.nextSubnet++
+	return &newSubnet
+}
+
+// NodeNetworkInterface represents a network interface attached to a node
+type NodeNetworkInterface struct {
+	Name  string        `json:"name"`
+	Links []NetworkLink `json:"links"`
+}
+
+// Node represents a node
+type Node struct {
+	SystemID   string                 `json:"system_id"`
+	Interfaces []NodeNetworkInterface `json:"interface_set"`
+}
+
+// NetworkLink represents a MAAS network link
+type NetworkLink struct {
+	ID     uint        `json:"id"`
+	Mode   string      `json:"mode"`
+	Subnet *TestSubnet `json:"subnet"`
+}
+
+// SetNodeNetworkLink records that the given node + interface are in subnet
+func (server *TestServer) SetNodeNetworkLink(SystemID string, nodeNetworkInterface NodeNetworkInterface) {
+	for i, ni := range server.nodeMetadata[SystemID].Interfaces {
+		if ni.Name == nodeNetworkInterface.Name {
+			server.nodeMetadata[SystemID].Interfaces[i] = nodeNetworkInterface
+			return
+		}
+	}
+	n := server.nodeMetadata[SystemID]
+	n.Interfaces = append(n.Interfaces, nodeNetworkInterface)
+	server.nodeMetadata[SystemID] = n
+}
+
+// subnetFromCreateSubnet creates a subnet in the test server
+func subnetFromCreateSubnet(postedSubnet CreateSubnet) TestSubnet {
+	var newSubnet TestSubnet
+	newSubnet.DNSServers = postedSubnet.DNSServers
+	newSubnet.Name = postedSubnet.Name
+	newSubnet.Space = postedSubnet.Space
+	//TODO: newSubnet.VLAN = server.postedSubnetVLAN
+	newSubnet.GatewayIP = postedSubnet.GatewayIP
+	newSubnet.CIDR = postedSubnet.CIDR
+	newSubnet.ID = postedSubnet.ID
+	return newSubnet
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testservice_utils.go b/switchq/vendor/github.com/juju/gomaasapi/testservice_utils.go
new file mode 100644
index 0000000..8f941f1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testservice_utils.go
@@ -0,0 +1,119 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"errors"
+	"net"
+	"net/http"
+	"strconv"
+)
+
+// NameOrIDToID takes a string that contains eiter an integer ID or the
+// name of a thing. It returns the integer ID contained or mapped to or panics.
+func NameOrIDToID(v string, nameToID map[string]uint, minID, maxID uint) (ID uint, err error) {
+	ID, ok := nameToID[v]
+	if !ok {
+		intID, err := strconv.Atoi(v)
+		if err != nil {
+			return 0, err
+		}
+		ID = uint(intID)
+	}
+
+	if ID < minID || ID > maxID {
+		return 0, errors.New("ID out of range")
+	}
+
+	return ID, nil
+}
+
+// IP is an enhanced net.IP
+type IP struct {
+	netIP   net.IP
+	Purpose []string
+}
+
+// IPFromNetIP creates a IP from a net.IP.
+func IPFromNetIP(netIP net.IP) IP {
+	var ip IP
+	ip.netIP = netIP
+	return ip
+}
+
+// IPFromString creates a new IP from a string IP address representation
+func IPFromString(v string) IP {
+	return IPFromNetIP(net.ParseIP(v))
+}
+
+// IPFromInt64 creates a new IP from a uint64 IP address representation
+func IPFromInt64(v uint64) IP {
+	var ip IP
+	ip.SetUInt64(v)
+	return ip
+}
+
+// To4 converts the IPv4 address ip to a 4-byte representation. If ip is not
+// an IPv4 address, To4 returns nil.
+func (ip IP) To4() net.IP {
+	return ip.netIP.To4()
+}
+
+// To16 converts the IP address ip to a 16-byte representation. If ip is not
+// an IP address (it is the wrong length), To16 returns nil.
+func (ip IP) To16() net.IP {
+	return ip.netIP.To16()
+}
+
+func (ip IP) String() string {
+	return ip.netIP.String()
+}
+
+// UInt64 returns a uint64 holding the IP address
+func (ip IP) UInt64() uint64 {
+	if len(ip.netIP) == 0 {
+		return uint64(0)
+	}
+
+	if ip.To4() != nil {
+		return uint64(binary.BigEndian.Uint32([]byte(ip.To4())))
+	}
+
+	return binary.BigEndian.Uint64([]byte(ip.To16()))
+}
+
+// SetUInt64 sets the IP value to v
+func (ip *IP) SetUInt64(v uint64) {
+	if len(ip.netIP) == 0 {
+		// If we don't have allocated storage make an educated guess
+		// at if the address we received is an IPv4 or IPv6 address.
+		if v == (v & 0x00000000ffffFFFF) {
+			// Guessing IPv4
+			ip.netIP = net.ParseIP("0.0.0.0")
+		} else {
+			ip.netIP = net.ParseIP("2001:4860:0:2001::68")
+		}
+	}
+
+	bb := new(bytes.Buffer)
+	var first int
+	if ip.To4() != nil {
+		binary.Write(bb, binary.BigEndian, uint32(v))
+		first = len(ip.netIP) - 4
+	} else {
+		binary.Write(bb, binary.BigEndian, v)
+	}
+	copy(ip.netIP[first:], bb.Bytes())
+}
+
+func PrettyJsonWriter(thing interface{}, w http.ResponseWriter) {
+	var out bytes.Buffer
+	b, err := json.MarshalIndent(thing, "", "  ")
+	checkError(err)
+	out.Write(b)
+	out.WriteTo(w)
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/testservice_vlan.go b/switchq/vendor/github.com/juju/gomaasapi/testservice_vlan.go
new file mode 100644
index 0000000..e81eaaa
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/testservice_vlan.go
@@ -0,0 +1,33 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+)
+
+func getVLANsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/vlans/", version)
+}
+
+// TestVLAN is the MAAS API VLAN representation
+type TestVLAN struct {
+	Name   string `json:"name"`
+	Fabric string `json:"fabric"`
+	VID    uint   `json:"vid"`
+
+	ResourceURI string `json:"resource_uri"`
+	ID          uint   `json:"id"`
+}
+
+// PostedVLAN is the MAAS API posted VLAN representation
+type PostedVLAN struct {
+	Name string `json:"name"`
+	VID  uint   `json:"vid"`
+}
+
+func vlansHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	//TODO
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/urlparams.go b/switchq/vendor/github.com/juju/gomaasapi/urlparams.go
new file mode 100644
index 0000000..a6bab6e
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/urlparams.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// URLParams wraps url.Values to easily add values, but skipping empty ones.
+type URLParams struct {
+	Values url.Values
+}
+
+// NewURLParams allocates a new URLParams type.
+func NewURLParams() *URLParams {
+	return &URLParams{Values: make(url.Values)}
+}
+
+// MaybeAdd adds the (name, value) pair iff value is not empty.
+func (p *URLParams) MaybeAdd(name, value string) {
+	if value != "" {
+		p.Values.Add(name, value)
+	}
+}
+
+// MaybeAddInt adds the (name, value) pair iff value is not zero.
+func (p *URLParams) MaybeAddInt(name string, value int) {
+	if value != 0 {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddBool adds the (name, value) pair iff value is true.
+func (p *URLParams) MaybeAddBool(name string, value bool) {
+	if value {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddMany adds the (name, value) for each value in values iff
+// value is not empty.
+func (p *URLParams) MaybeAddMany(name string, values []string) {
+	for _, value := range values {
+		p.MaybeAdd(name, value)
+	}
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/util.go b/switchq/vendor/github.com/juju/gomaasapi/util.go
new file mode 100644
index 0000000..3f95ac9
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/util.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+)
+
+// JoinURLs joins a base URL and a subpath together.
+// Regardless of whether baseURL ends in a trailing slash (or even multiple
+// trailing slashes), or whether there are any leading slashes at the begining
+// of path, the two will always be joined together by a single slash.
+func JoinURLs(baseURL, path string) string {
+	return strings.TrimRight(baseURL, "/") + "/" + strings.TrimLeft(path, "/")
+}
+
+// EnsureTrailingSlash appends a slash at the end of the given string unless
+// there already is one.
+// This is used to create the kind of normalized URLs that Django expects.
+// (to avoid Django's redirection when an URL does not ends with a slash.)
+func EnsureTrailingSlash(URL string) string {
+	if strings.HasSuffix(URL, "/") {
+		return URL
+	}
+	return URL + "/"
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/vlan.go b/switchq/vendor/github.com/juju/gomaasapi/vlan.go
new file mode 100644
index 0000000..c509d42
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/vlan.go
@@ -0,0 +1,154 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type vlan struct {
+	// Add the controller in when we need to do things with the vlan.
+	// controller Controller
+
+	resourceURI string
+
+	id     int
+	name   string
+	fabric string
+
+	vid  int
+	mtu  int
+	dhcp bool
+
+	primaryRack   string
+	secondaryRack string
+}
+
+// ID implements VLAN.
+func (v *vlan) ID() int {
+	return v.id
+}
+
+// Name implements VLAN.
+func (v *vlan) Name() string {
+	return v.name
+}
+
+// Fabric implements VLAN.
+func (v *vlan) Fabric() string {
+	return v.fabric
+}
+
+// VID implements VLAN.
+func (v *vlan) VID() int {
+	return v.vid
+}
+
+// MTU implements VLAN.
+func (v *vlan) MTU() int {
+	return v.mtu
+}
+
+// DHCP implements VLAN.
+func (v *vlan) DHCP() bool {
+	return v.dhcp
+}
+
+// PrimaryRack implements VLAN.
+func (v *vlan) PrimaryRack() string {
+	return v.primaryRack
+}
+
+// SecondaryRack implements VLAN.
+func (v *vlan) SecondaryRack() string {
+	return v.secondaryRack
+}
+
+func readVLANs(controllerVersion version.Number, source interface{}) ([]*vlan, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range vlanDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no vlan read func for version %s", controllerVersion)
+	}
+	readFunc := vlanDeserializationFuncs[deserialisationVersion]
+	return readVLANList(valid, readFunc)
+}
+
+func readVLANList(sourceList []interface{}, readFunc vlanDeserializationFunc) ([]*vlan, error) {
+	result := make([]*vlan, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for vlan %d, %T", i, value)
+		}
+		vlan, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "vlan %d", i)
+		}
+		result = append(result, vlan)
+	}
+	return result, nil
+}
+
+type vlanDeserializationFunc func(map[string]interface{}) (*vlan, error)
+
+var vlanDeserializationFuncs = map[version.Number]vlanDeserializationFunc{
+	twoDotOh: vlan_2_0,
+}
+
+func vlan_2_0(source map[string]interface{}) (*vlan, error) {
+	fields := schema.Fields{
+		"id":           schema.ForceInt(),
+		"resource_uri": schema.String(),
+		"name":         schema.OneOf(schema.Nil(""), schema.String()),
+		"fabric":       schema.String(),
+		"vid":          schema.ForceInt(),
+		"mtu":          schema.ForceInt(),
+		"dhcp_on":      schema.Bool(),
+		// racks are not always set.
+		"primary_rack":   schema.OneOf(schema.Nil(""), schema.String()),
+		"secondary_rack": schema.OneOf(schema.Nil(""), schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	// Since the primary and secondary racks are optional, we use the two
+	// part cast assignment. If the case fails, then we get the default value
+	// we care about, which is the empty string.
+	primary_rack, _ := valid["primary_rack"].(string)
+	secondary_rack, _ := valid["secondary_rack"].(string)
+	name, _ := valid["name"].(string)
+
+	result := &vlan{
+		resourceURI:   valid["resource_uri"].(string),
+		id:            valid["id"].(int),
+		name:          name,
+		fabric:        valid["fabric"].(string),
+		vid:           valid["vid"].(int),
+		mtu:           valid["mtu"].(int),
+		dhcp:          valid["dhcp_on"].(bool),
+		primaryRack:   primary_rack,
+		secondaryRack: secondary_rack,
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/gomaasapi/zone.go b/switchq/vendor/github.com/juju/gomaasapi/zone.go
new file mode 100644
index 0000000..6f10cb4
--- /dev/null
+++ b/switchq/vendor/github.com/juju/gomaasapi/zone.go
@@ -0,0 +1,97 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type zone struct {
+	// Add the controller in when we need to do things with the zone.
+	// controller Controller
+
+	resourceURI string
+
+	name        string
+	description string
+}
+
+// Name implements Zone.
+func (z *zone) Name() string {
+	return z.name
+}
+
+// Description implements Zone.
+func (z *zone) Description() string {
+	return z.description
+}
+
+func readZones(controllerVersion version.Number, source interface{}) ([]*zone, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range zoneDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no zone read func for version %s", controllerVersion)
+	}
+	readFunc := zoneDeserializationFuncs[deserialisationVersion]
+	return readZoneList(valid, readFunc)
+}
+
+// readZoneList expects the values of the sourceList to be string maps.
+func readZoneList(sourceList []interface{}, readFunc zoneDeserializationFunc) ([]*zone, error) {
+	result := make([]*zone, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for zone %d, %T", i, value)
+		}
+		zone, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "zone %d", i)
+		}
+		result = append(result, zone)
+	}
+	return result, nil
+}
+
+type zoneDeserializationFunc func(map[string]interface{}) (*zone, error)
+
+var zoneDeserializationFuncs = map[version.Number]zoneDeserializationFunc{
+	twoDotOh: zone_2_0,
+}
+
+func zone_2_0(source map[string]interface{}) (*zone, error) {
+	fields := schema.Fields{
+		"name":         schema.String(),
+		"description":  schema.String(),
+		"resource_uri": schema.String(),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &zone{
+		name:        valid["name"].(string),
+		description: valid["description"].(string),
+		resourceURI: valid["resource_uri"].(string),
+	}
+	return result, nil
+}
diff --git a/switchq/vendor/github.com/juju/loggo/LICENSE b/switchq/vendor/github.com/juju/loggo/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/loggo/Makefile b/switchq/vendor/github.com/juju/loggo/Makefile
new file mode 100644
index 0000000..89afa49
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/loggo > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/switchq/vendor/github.com/juju/loggo/README.md b/switchq/vendor/github.com/juju/loggo/README.md
new file mode 100644
index 0000000..a73a9db
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/README.md
@@ -0,0 +1,711 @@
+
+# loggo
+    import "github.com/juju/loggo"
+
+[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)
+
+### Module level logging for Go
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+
+
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+
+
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+
+
+
+## Constants
+``` go
+const DefaultWriterName = "default"
+```
+DefaultWriterName is the name of the default writer for
+a Context.
+
+
+## Variables
+``` go
+var (
+    // SeverityColor defines the colors for the levels output by the ColorWriter.
+    SeverityColor = map[Level]*ansiterm.Context{
+        TRACE:   ansiterm.Foreground(ansiterm.Default),
+        DEBUG:   ansiterm.Foreground(ansiterm.Green),
+        INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+        WARNING: ansiterm.Foreground(ansiterm.Yellow),
+        ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+        CRITICAL: &ansiterm.Context{
+            Foreground: ansiterm.White,
+            Background: ansiterm.Red,
+        },
+    }
+    // LocationColor defines the colors for the location output by the ColorWriter.
+    LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+```
+``` go
+var TimeFormat = initTimeFormat()
+```
+TimeFormat is the time format used for the default writer.
+This can be set with the environment variable LOGGO_TIME_FORMAT.
+
+
+## func ConfigureLoggers
+``` go
+func ConfigureLoggers(specification string) error
+```
+ConfigureLoggers configures loggers according to the given string
+specification, which specifies a set of modules and their associated
+logging levels.  Loggers are colon- or semicolon-separated; each
+module is specified as <modulename>=<level>.  White space outside of
+module names and levels is ignored.  The root module is specified
+with the name "<root>".
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+## func DefaultFormatter
+``` go
+func DefaultFormatter(entry Entry) string
+```
+DefaultFormatter returns the parameters separated by spaces except for
+filename and line which are separated by a colon.  The timestamp is shown
+to second resolution in UTC. For example:
+
+
+	2016-07-02 15:04:05
+
+
+## func LoggerInfo
+``` go
+func LoggerInfo() string
+```
+LoggerInfo returns information about the configured loggers and their
+logging levels. The information is returned in the format expected by
+ConfigureLoggers. Loggers with UNSPECIFIED level will not
+be included.
+
+
+## func RegisterWriter
+``` go
+func RegisterWriter(name string, writer Writer) error
+```
+RegisterWriter adds the writer to the list of writers in the DefaultContext
+that get notified when logging.  If there is already a registered writer
+with that name, an error is returned.
+
+
+## func ResetLogging
+``` go
+func ResetLogging()
+```
+ResetLogging iterates through the known modules and sets the levels of all
+to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+removes all writers in the DefaultContext and puts the original default
+writer back as the only writer.
+
+
+## func ResetWriters
+``` go
+func ResetWriters()
+```
+ResetWriters puts the list of writers back into the initial state.
+
+
+
+## type Config
+``` go
+type Config map[string]Level
+```
+Config is a mapping of logger module names to logging severity levels.
+
+
+
+
+
+
+
+
+
+### func ParseConfigString
+``` go
+func ParseConfigString(specification string) (Config, error)
+```
+ParseConfigString parses a logger configuration string into a map of logger
+names and their associated log level. This method is provided to allow
+other programs to pre-validate a configuration string rather than just
+calling ConfigureLoggers.
+
+Logging modules are colon- or semicolon-separated; each module is specified
+as <modulename>=<level>.  White space outside of module names and levels is
+ignored.  The root module is specified with the name "<root>".
+
+As a special case, a log level may be specified on its own.
+This is equivalent to specifying the level of the root module,
+so "DEBUG" is equivalent to `<root>=DEBUG`
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+
+
+### func (Config) String
+``` go
+func (c Config) String() string
+```
+String returns a logger configuration string that may be parsed
+using ParseConfigurationString.
+
+
+
+## type Context
+``` go
+type Context struct {
+    // contains filtered or unexported fields
+}
+```
+Context produces loggers for a hierarchy of modules. The context holds
+a collection of hierarchical loggers and their writers.
+
+
+
+
+
+
+
+
+
+### func DefaultContext
+``` go
+func DefaultContext() *Context
+```
+DefaultContext returns the global default logging context.
+
+
+### func NewContext
+``` go
+func NewContext(rootLevel Level) *Context
+```
+NewLoggers returns a new Context with no writers set.
+If the root level is UNSPECIFIED, WARNING is used.
+
+
+
+
+### func (\*Context) AddWriter
+``` go
+func (c *Context) AddWriter(name string, writer Writer) error
+```
+AddWriter adds a writer to the list to be called for each logging call.
+The name cannot be empty, and the writer cannot be nil. If an existing
+writer exists with the specified name, an error is returned.
+
+
+
+### func (\*Context) ApplyConfig
+``` go
+func (c *Context) ApplyConfig(config Config)
+```
+ApplyConfig configures the logging modules according to the provided config.
+
+
+
+### func (\*Context) CompleteConfig
+``` go
+func (c *Context) CompleteConfig() Config
+```
+CompleteConfig returns all the loggers and their defined levels,
+even if that level is UNSPECIFIED.
+
+
+
+### func (\*Context) Config
+``` go
+func (c *Context) Config() Config
+```
+Config returns the current configuration of the Loggers. Loggers
+with UNSPECIFIED level will not be included.
+
+
+
+### func (\*Context) GetLogger
+``` go
+func (c *Context) GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name, creating it and
+its parents if necessary.
+
+
+
+### func (\*Context) RemoveWriter
+``` go
+func (c *Context) RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter remotes the specified writer. If a writer is not found with
+the specified name an error is returned. The writer that was removed is also
+returned.
+
+
+
+### func (\*Context) ReplaceWriter
+``` go
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error)
+```
+ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+followed by AddWriter with the same name. The replaced writer is returned.
+
+
+
+### func (\*Context) ResetLoggerLevels
+``` go
+func (c *Context) ResetLoggerLevels()
+```
+ResetLoggerLevels iterates through the known logging modules and sets the
+levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+
+
+
+### func (\*Context) ResetWriters
+``` go
+func (c *Context) ResetWriters()
+```
+ResetWriters is generally only used in testing and removes all the writers.
+
+
+
+## type Entry
+``` go
+type Entry struct {
+    // Level is the severity of the log message.
+    Level Level
+    // Module is the dotted module name from the logger.
+    Module string
+    // Filename is the full path the file that logged the message.
+    Filename string
+    // Line is the line number of the Filename.
+    Line int
+    // Timestamp is when the log message was created
+    Timestamp time.Time
+    // Message is the formatted string from teh log call.
+    Message string
+}
+```
+Entry represents a single log message.
+
+
+
+
+
+
+
+
+
+
+
+## type Level
+``` go
+type Level uint32
+```
+Level holds a severity level.
+
+
+
+``` go
+const (
+    UNSPECIFIED Level = iota
+    TRACE
+    DEBUG
+    INFO
+    WARNING
+    ERROR
+    CRITICAL
+)
+```
+The severity levels. Higher values are more considered more
+important.
+
+
+
+
+
+
+
+### func ParseLevel
+``` go
+func ParseLevel(level string) (Level, bool)
+```
+ParseLevel converts a string representation of a logging level to a
+Level. It returns the level and whether it was valid or not.
+
+
+
+
+### func (Level) Short
+``` go
+func (level Level) Short() string
+```
+Short returns a five character string to use in
+aligned logging output.
+
+
+
+### func (Level) String
+``` go
+func (level Level) String() string
+```
+String implements Stringer.
+
+
+
+## type Logger
+``` go
+type Logger struct {
+    // contains filtered or unexported fields
+}
+```
+A Logger represents a logging module. It has an associated logging
+level which can be changed; messages of lesser severity will
+be dropped. Loggers have a hierarchical relationship - see
+the package documentation.
+
+The zero Logger value is usable - any messages logged
+to it will be sent to the root Logger.
+
+
+
+
+
+
+
+
+
+### func GetLogger
+``` go
+func GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name,
+creating it and its parents if necessary.
+
+
+
+
+### func (Logger) Criticalf
+``` go
+func (logger Logger) Criticalf(message string, args ...interface{})
+```
+Criticalf logs the printf-formatted message at critical level.
+
+
+
+### func (Logger) Debugf
+``` go
+func (logger Logger) Debugf(message string, args ...interface{})
+```
+Debugf logs the printf-formatted message at debug level.
+
+
+
+### func (Logger) EffectiveLogLevel
+``` go
+func (logger Logger) EffectiveLogLevel() Level
+```
+EffectiveLogLevel returns the effective min log level of
+the receiver - that is, messages with a lesser severity
+level will be discarded.
+
+If the log level of the receiver is unspecified,
+it will be taken from the effective log level of its
+parent.
+
+
+
+### func (Logger) Errorf
+``` go
+func (logger Logger) Errorf(message string, args ...interface{})
+```
+Errorf logs the printf-formatted message at error level.
+
+
+
+### func (Logger) Infof
+``` go
+func (logger Logger) Infof(message string, args ...interface{})
+```
+Infof logs the printf-formatted message at info level.
+
+
+
+### func (Logger) IsDebugEnabled
+``` go
+func (logger Logger) IsDebugEnabled() bool
+```
+IsDebugEnabled returns whether debugging is enabled
+at debug level.
+
+
+
+### func (Logger) IsErrorEnabled
+``` go
+func (logger Logger) IsErrorEnabled() bool
+```
+IsErrorEnabled returns whether debugging is enabled
+at error level.
+
+
+
+### func (Logger) IsInfoEnabled
+``` go
+func (logger Logger) IsInfoEnabled() bool
+```
+IsInfoEnabled returns whether debugging is enabled
+at info level.
+
+
+
+### func (Logger) IsLevelEnabled
+``` go
+func (logger Logger) IsLevelEnabled(level Level) bool
+```
+IsLevelEnabled returns whether debugging is enabled
+for the given log level.
+
+
+
+### func (Logger) IsTraceEnabled
+``` go
+func (logger Logger) IsTraceEnabled() bool
+```
+IsTraceEnabled returns whether debugging is enabled
+at trace level.
+
+
+
+### func (Logger) IsWarningEnabled
+``` go
+func (logger Logger) IsWarningEnabled() bool
+```
+IsWarningEnabled returns whether debugging is enabled
+at warning level.
+
+
+
+### func (Logger) LogCallf
+``` go
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{})
+```
+LogCallf logs a printf-formatted message at the given level.
+The location of the call is indicated by the calldepth argument.
+A calldepth of 1 means the function that called this function.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) LogLevel
+``` go
+func (logger Logger) LogLevel() Level
+```
+LogLevel returns the configured min log level of the logger.
+
+
+
+### func (Logger) Logf
+``` go
+func (logger Logger) Logf(level Level, message string, args ...interface{})
+```
+Logf logs a printf-formatted message at the given level.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) Name
+``` go
+func (logger Logger) Name() string
+```
+Name returns the logger's module name.
+
+
+
+### func (Logger) SetLogLevel
+``` go
+func (logger Logger) SetLogLevel(level Level)
+```
+SetLogLevel sets the severity level of the given logger.
+The root logger cannot be set to UNSPECIFIED level.
+See EffectiveLogLevel for how this affects the
+actual messages logged.
+
+
+
+### func (Logger) Tracef
+``` go
+func (logger Logger) Tracef(message string, args ...interface{})
+```
+Tracef logs the printf-formatted message at trace level.
+
+
+
+### func (Logger) Warningf
+``` go
+func (logger Logger) Warningf(message string, args ...interface{})
+```
+Warningf logs the printf-formatted message at warning level.
+
+
+
+## type TestWriter
+``` go
+type TestWriter struct {
+    // contains filtered or unexported fields
+}
+```
+TestWriter is a useful Writer for testing purposes.  Each component of the
+logging message is stored in the Log array.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*TestWriter) Clear
+``` go
+func (writer *TestWriter) Clear()
+```
+Clear removes any saved log messages.
+
+
+
+### func (\*TestWriter) Log
+``` go
+func (writer *TestWriter) Log() []Entry
+```
+Log returns a copy of the current logged values.
+
+
+
+### func (\*TestWriter) Write
+``` go
+func (writer *TestWriter) Write(entry Entry)
+```
+Write saves the params as members in the TestLogValues struct appended to the Log array.
+
+
+
+## type Writer
+``` go
+type Writer interface {
+    // Write writes a message to the Writer with the given level and module
+    // name. The filename and line hold the file name and line number of the
+    // code that is generating the log message; the time stamp holds the time
+    // the log message was generated, and message holds the log message
+    // itself.
+    Write(entry Entry)
+}
+```
+Writer is implemented by any recipient of log messages.
+
+
+
+
+
+
+
+
+
+### func NewColorWriter
+``` go
+func NewColorWriter(writer io.Writer) Writer
+```
+NewColorWriter will write out colored severity levels if the writer is
+outputting to a terminal.
+
+
+### func NewMinimumLevelWriter
+``` go
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer
+```
+NewMinLevelWriter returns a Writer that will only pass on the Write calls
+to the provided writer if the log level is at or above the specified
+minimum level.
+
+
+### func NewSimpleWriter
+``` go
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer
+```
+NewSimpleWriter returns a new writer that writes log messages to the given
+io.Writer formatting the messages with the given formatter.
+
+
+### func RemoveWriter
+``` go
+func RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter removes the Writer identified by 'name' and returns it.
+If the Writer is not found, an error is returned.
+
+
+### func ReplaceDefaultWriter
+``` go
+func ReplaceDefaultWriter(writer Writer) (Writer, error)
+```
+ReplaceDefaultWriter is a convenience method that does the equivalent of
+RemoveWriter and then RegisterWriter with the name "default".  The previous
+default writer, if any is returned.
+
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/switchq/vendor/github.com/juju/loggo/config.go b/switchq/vendor/github.com/juju/loggo/config.go
new file mode 100644
index 0000000..1b3eaa5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/config.go
@@ -0,0 +1,96 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Config is a mapping of logger module names to logging severity levels.
+type Config map[string]Level
+
+// String returns a logger configuration string that may be parsed
+// using ParseConfigurationString.
+func (c Config) String() string {
+	if c == nil {
+		return ""
+	}
+	// output in alphabetical order.
+	names := []string{}
+	for name := range c {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	var entries []string
+	for _, name := range names {
+		level := c[name]
+		if name == "" {
+			name = rootString
+		}
+		entry := fmt.Sprintf("%s=%s", name, level)
+		entries = append(entries, entry)
+	}
+	return strings.Join(entries, ";")
+}
+
+func parseConfigValue(value string) (string, Level, error) {
+	pair := strings.SplitN(value, "=", 2)
+	if len(pair) < 2 {
+		return "", UNSPECIFIED, fmt.Errorf("config value expected '=', found %q", value)
+	}
+	name := strings.TrimSpace(pair[0])
+	if name == "" {
+		return "", UNSPECIFIED, fmt.Errorf("config value %q has missing module name", value)
+	}
+
+	levelStr := strings.TrimSpace(pair[1])
+	level, ok := ParseLevel(levelStr)
+	if !ok {
+		return "", UNSPECIFIED, fmt.Errorf("unknown severity level %q", levelStr)
+	}
+	if name == rootString {
+		name = ""
+	}
+	return name, level, nil
+}
+
+// ParseConfigString parses a logger configuration string into a map of logger
+// names and their associated log level. This method is provided to allow
+// other programs to pre-validate a configuration string rather than just
+// calling ConfigureLoggers.
+//
+// Logging modules are colon- or semicolon-separated; each module is specified
+// as <modulename>=<level>.  White space outside of module names and levels is
+// ignored.  The root module is specified with the name "<root>".
+//
+// As a special case, a log level may be specified on its own.
+// This is equivalent to specifying the level of the root module,
+// so "DEBUG" is equivalent to `<root>=DEBUG`
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ParseConfigString(specification string) (Config, error) {
+	specification = strings.TrimSpace(specification)
+	if specification == "" {
+		return nil, nil
+	}
+	cfg := make(Config)
+	if level, ok := ParseLevel(specification); ok {
+		cfg[""] = level
+		return cfg, nil
+	}
+
+	values := strings.FieldsFunc(specification, func(r rune) bool { return r == ';' || r == ':' })
+	for _, value := range values {
+		name, level, err := parseConfigValue(value)
+		if err != nil {
+			return nil, err
+		}
+		cfg[name] = level
+	}
+	return cfg, nil
+}
diff --git a/switchq/vendor/github.com/juju/loggo/context.go b/switchq/vendor/github.com/juju/loggo/context.go
new file mode 100644
index 0000000..f5739d9
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/context.go
@@ -0,0 +1,198 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+// Context produces loggers for a hierarchy of modules. The context holds
+// a collection of hierarchical loggers and their writers.
+type Context struct {
+	root *module
+
+	// Perhaps have one mutex?
+	modulesMutex sync.Mutex
+	modules      map[string]*module
+
+	writersMutex sync.Mutex
+	writers      map[string]Writer
+
+	// writeMuxtex is used to serialise write operations.
+	writeMutex sync.Mutex
+}
+
+// NewLoggers returns a new Context with no writers set.
+// If the root level is UNSPECIFIED, WARNING is used.
+func NewContext(rootLevel Level) *Context {
+	if rootLevel < TRACE || rootLevel > CRITICAL {
+		rootLevel = WARNING
+	}
+	context := &Context{
+		modules: make(map[string]*module),
+		writers: make(map[string]Writer),
+	}
+	context.root = &module{
+		level:   rootLevel,
+		context: context,
+	}
+	context.modules[""] = context.root
+	return context
+}
+
+// GetLogger returns a Logger for the given module name, creating it and
+// its parents if necessary.
+func (c *Context) GetLogger(name string) Logger {
+	name = strings.TrimSpace(strings.ToLower(name))
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	return Logger{c.getLoggerModule(name)}
+}
+
+func (c *Context) getLoggerModule(name string) *module {
+	if name == rootString {
+		name = ""
+	}
+	impl, found := c.modules[name]
+	if found {
+		return impl
+	}
+	parentName := ""
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		parentName = name[0:i]
+	}
+	parent := c.getLoggerModule(parentName)
+	impl = &module{name, UNSPECIFIED, parent, c}
+	c.modules[name] = impl
+	return impl
+}
+
+// Config returns the current configuration of the Loggers. Loggers
+// with UNSPECIFIED level will not be included.
+func (c *Context) Config() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		if module.level != UNSPECIFIED {
+			result[name] = module.level
+		}
+	}
+	return result
+}
+
+// CompleteConfig returns all the loggers and their defined levels,
+// even if that level is UNSPECIFIED.
+func (c *Context) CompleteConfig() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		result[name] = module.level
+	}
+	return result
+}
+
+// ApplyConfig configures the logging modules according to the provided config.
+func (c *Context) ApplyConfig(config Config) {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	for name, level := range config {
+		module := c.getLoggerModule(name)
+		module.setLevel(level)
+	}
+}
+
+// ResetLoggerLevels iterates through the known logging modules and sets the
+// levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+func (c *Context) ResetLoggerLevels() {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	// Setting the root module to UNSPECIFIED will set it to WARNING.
+	for _, module := range c.modules {
+		module.setLevel(UNSPECIFIED)
+	}
+}
+
+func (c *Context) write(entry Entry) {
+	c.writeMutex.Lock()
+	defer c.writeMutex.Unlock()
+	for _, writer := range c.getWriters() {
+		writer.Write(entry)
+	}
+}
+
+func (c *Context) getWriters() []Writer {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	var result []Writer
+	for _, writer := range c.writers {
+		result = append(result, writer)
+	}
+	return result
+}
+
+// AddWriter adds a writer to the list to be called for each logging call.
+// The name cannot be empty, and the writer cannot be nil. If an existing
+// writer exists with the specified name, an error is returned.
+func (c *Context) AddWriter(name string, writer Writer) error {
+	if name == "" {
+		return fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	if _, found := c.writers[name]; found {
+		return fmt.Errorf("context already has a writer named %q", name)
+	}
+	c.writers[name] = writer
+	return nil
+}
+
+// RemoveWriter remotes the specified writer. If a writer is not found with
+// the specified name an error is returned. The writer that was removed is also
+// returned.
+func (c *Context) RemoveWriter(name string) (Writer, error) {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	delete(c.writers, name)
+	return reg, nil
+}
+
+// ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+// followed by AddWriter with the same name. The replaced writer is returned.
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error) {
+	if name == "" {
+		return nil, fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return nil, fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	oldWriter := reg
+	c.writers[name] = writer
+	return oldWriter, nil
+}
+
+// ResetWriters is generally only used in testing and removes all the writers.
+func (c *Context) ResetWriters() {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	c.writers = make(map[string]Writer)
+}
diff --git a/switchq/vendor/github.com/juju/loggo/dependencies.tsv b/switchq/vendor/github.com/juju/loggo/dependencies.tsv
new file mode 100644
index 0000000..20daf32
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/dependencies.tsv
@@ -0,0 +1,5 @@
+github.com/juju/ansiterm	git	c368f42cb4b32a70389cded05c7345d9ccdce889	2016-08-17T02:52:20Z
+github.com/lunixbochs/vtclean	git	4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36	2016-01-25T03:51:06Z
+github.com/mattn/go-colorable	git	ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8	2016-07-31T23:54:17Z
+github.com/mattn/go-isatty	git	66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8	2016-08-06T12:27:52Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
diff --git a/switchq/vendor/github.com/juju/loggo/doc.go b/switchq/vendor/github.com/juju/loggo/doc.go
new file mode 100644
index 0000000..bb19cd5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+Module level logging for Go
+
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+*/
+package loggo
diff --git a/switchq/vendor/github.com/juju/loggo/entry.go b/switchq/vendor/github.com/juju/loggo/entry.go
new file mode 100644
index 0000000..b61aa54
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/entry.go
@@ -0,0 +1,22 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import "time"
+
+// Entry represents a single log message.
+type Entry struct {
+	// Level is the severity of the log message.
+	Level Level
+	// Module is the dotted module name from the logger.
+	Module string
+	// Filename is the full path the file that logged the message.
+	Filename string
+	// Line is the line number of the Filename.
+	Line int
+	// Timestamp is when the log message was created
+	Timestamp time.Time
+	// Message is the formatted string from teh log call.
+	Message string
+}
diff --git a/switchq/vendor/github.com/juju/loggo/formatter.go b/switchq/vendor/github.com/juju/loggo/formatter.go
new file mode 100644
index 0000000..ef8aa7a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/formatter.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// DefaultFormatter returns the parameters separated by spaces except for
+// filename and line which are separated by a colon.  The timestamp is shown
+// to second resolution in UTC. For example:
+//   2016-07-02 15:04:05
+func DefaultFormatter(entry Entry) string {
+	ts := entry.Timestamp.In(time.UTC).Format("2006-01-02 15:04:05")
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+	return fmt.Sprintf("%s %s %s %s:%d %s", ts, entry.Level, entry.Module, filename, entry.Line, entry.Message)
+}
+
+// TimeFormat is the time format used for the default writer.
+// This can be set with the environment variable LOGGO_TIME_FORMAT.
+var TimeFormat = initTimeFormat()
+
+func initTimeFormat() string {
+	format := os.Getenv("LOGGO_TIME_FORMAT")
+	if format != "" {
+		return format
+	}
+	return "15:04:05"
+}
+
+func formatTime(ts time.Time) string {
+	return ts.Format(TimeFormat)
+}
diff --git a/switchq/vendor/github.com/juju/loggo/global.go b/switchq/vendor/github.com/juju/loggo/global.go
new file mode 100644
index 0000000..7cf95ca
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/global.go
@@ -0,0 +1,85 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+var (
+	defaultContext = newDefaultContxt()
+)
+
+func newDefaultContxt() *Context {
+	ctx := NewContext(WARNING)
+	ctx.AddWriter(DefaultWriterName, defaultWriter())
+	return ctx
+}
+
+// DefaultContext returns the global default logging context.
+func DefaultContext() *Context {
+	return defaultContext
+}
+
+// LoggerInfo returns information about the configured loggers and their
+// logging levels. The information is returned in the format expected by
+// ConfigureLoggers. Loggers with UNSPECIFIED level will not
+// be included.
+func LoggerInfo() string {
+	return defaultContext.Config().String()
+}
+
+// GetLogger returns a Logger for the given module name,
+// creating it and its parents if necessary.
+func GetLogger(name string) Logger {
+	return defaultContext.GetLogger(name)
+}
+
+// ResetLogging iterates through the known modules and sets the levels of all
+// to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+// removes all writers in the DefaultContext and puts the original default
+// writer back as the only writer.
+func ResetLogging() {
+	defaultContext.ResetLoggerLevels()
+	defaultContext.ResetWriters()
+}
+
+// ResetWriters puts the list of writers back into the initial state.
+func ResetWriters() {
+	defaultContext.ResetWriters()
+}
+
+// ReplaceDefaultWriter is a convenience method that does the equivalent of
+// RemoveWriter and then RegisterWriter with the name "default".  The previous
+// default writer, if any is returned.
+func ReplaceDefaultWriter(writer Writer) (Writer, error) {
+	return defaultContext.ReplaceWriter(DefaultWriterName, writer)
+}
+
+// RegisterWriter adds the writer to the list of writers in the DefaultContext
+// that get notified when logging.  If there is already a registered writer
+// with that name, an error is returned.
+func RegisterWriter(name string, writer Writer) error {
+	return defaultContext.AddWriter(name, writer)
+}
+
+// RemoveWriter removes the Writer identified by 'name' and returns it.
+// If the Writer is not found, an error is returned.
+func RemoveWriter(name string) (Writer, error) {
+	return defaultContext.RemoveWriter(name)
+}
+
+// ConfigureLoggers configures loggers according to the given string
+// specification, which specifies a set of modules and their associated
+// logging levels.  Loggers are colon- or semicolon-separated; each
+// module is specified as <modulename>=<level>.  White space outside of
+// module names and levels is ignored.  The root module is specified
+// with the name "<root>".
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ConfigureLoggers(specification string) error {
+	config, err := ParseConfigString(specification)
+	if err != nil {
+		return err
+	}
+	defaultContext.ApplyConfig(config)
+	return nil
+}
diff --git a/switchq/vendor/github.com/juju/loggo/level.go b/switchq/vendor/github.com/juju/loggo/level.go
new file mode 100644
index 0000000..f6a5c4f
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/level.go
@@ -0,0 +1,102 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"strings"
+	"sync/atomic"
+)
+
+// The severity levels. Higher values are more considered more
+// important.
+const (
+	UNSPECIFIED Level = iota
+	TRACE
+	DEBUG
+	INFO
+	WARNING
+	ERROR
+	CRITICAL
+)
+
+// Level holds a severity level.
+type Level uint32
+
+// ParseLevel converts a string representation of a logging level to a
+// Level. It returns the level and whether it was valid or not.
+func ParseLevel(level string) (Level, bool) {
+	level = strings.ToUpper(level)
+	switch level {
+	case "UNSPECIFIED":
+		return UNSPECIFIED, true
+	case "TRACE":
+		return TRACE, true
+	case "DEBUG":
+		return DEBUG, true
+	case "INFO":
+		return INFO, true
+	case "WARN", "WARNING":
+		return WARNING, true
+	case "ERROR":
+		return ERROR, true
+	case "CRITICAL":
+		return CRITICAL, true
+	default:
+		return UNSPECIFIED, false
+	}
+}
+
+// String implements Stringer.
+func (level Level) String() string {
+	switch level {
+	case UNSPECIFIED:
+		return "UNSPECIFIED"
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO"
+	case WARNING:
+		return "WARNING"
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITICAL"
+	default:
+		return "<unknown>"
+	}
+}
+
+// Short returns a five character string to use in
+// aligned logging output.
+func (level Level) Short() string {
+	switch level {
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO "
+	case WARNING:
+		return "WARN "
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITC"
+	default:
+		return "     "
+	}
+}
+
+// get atomically gets the value of the given level.
+func (level *Level) get() Level {
+	return Level(atomic.LoadUint32((*uint32)(level)))
+}
+
+// set atomically sets the value of the receiver
+// to the given level.
+func (level *Level) set(newLevel Level) {
+	atomic.StoreUint32((*uint32)(level), uint32(newLevel))
+}
diff --git a/switchq/vendor/github.com/juju/loggo/logger.go b/switchq/vendor/github.com/juju/loggo/logger.go
new file mode 100644
index 0000000..fbdfd9e
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/logger.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+)
+
+// A Logger represents a logging module. It has an associated logging
+// level which can be changed; messages of lesser severity will
+// be dropped. Loggers have a hierarchical relationship - see
+// the package documentation.
+//
+// The zero Logger value is usable - any messages logged
+// to it will be sent to the root Logger.
+type Logger struct {
+	impl *module
+}
+
+func (logger Logger) getModule() *module {
+	if logger.impl == nil {
+		return defaultContext.root
+	}
+	return logger.impl
+}
+
+// Name returns the logger's module name.
+func (logger Logger) Name() string {
+	return logger.getModule().Name()
+}
+
+// LogLevel returns the configured min log level of the logger.
+func (logger Logger) LogLevel() Level {
+	return logger.getModule().level
+}
+
+// EffectiveLogLevel returns the effective min log level of
+// the receiver - that is, messages with a lesser severity
+// level will be discarded.
+//
+// If the log level of the receiver is unspecified,
+// it will be taken from the effective log level of its
+// parent.
+func (logger Logger) EffectiveLogLevel() Level {
+	return logger.getModule().getEffectiveLogLevel()
+}
+
+// SetLogLevel sets the severity level of the given logger.
+// The root logger cannot be set to UNSPECIFIED level.
+// See EffectiveLogLevel for how this affects the
+// actual messages logged.
+func (logger Logger) SetLogLevel(level Level) {
+	logger.getModule().setLevel(level)
+}
+
+// Logf logs a printf-formatted message at the given level.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) Logf(level Level, message string, args ...interface{}) {
+	logger.LogCallf(2, level, message, args...)
+}
+
+// LogCallf logs a printf-formatted message at the given level.
+// The location of the call is indicated by the calldepth argument.
+// A calldepth of 1 means the function that called this function.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{}) {
+	module := logger.getModule()
+	if !module.willWrite(level) {
+		return
+	}
+	// Gather time, and filename, line number.
+	now := time.Now() // get this early.
+	// Param to Caller is the call depth.  Since this method is called from
+	// the Logger methods, we want the place that those were called from.
+	_, file, line, ok := runtime.Caller(calldepth + 1)
+	if !ok {
+		file = "???"
+		line = 0
+	}
+	// Trim newline off format string, following usual
+	// Go logging conventions.
+	if len(message) > 0 && message[len(message)-1] == '\n' {
+		message = message[0 : len(message)-1]
+	}
+
+	// To avoid having a proliferation of Info/Infof methods,
+	// only use Sprintf if there are any args, and rely on the
+	// `go vet` tool for the obvious cases where someone has forgotten
+	// to provide an arg.
+	formattedMessage := message
+	if len(args) > 0 {
+		formattedMessage = fmt.Sprintf(message, args...)
+	}
+	module.write(Entry{
+		Level:     level,
+		Filename:  file,
+		Line:      line,
+		Timestamp: now,
+		Message:   formattedMessage,
+	})
+}
+
+// Criticalf logs the printf-formatted message at critical level.
+func (logger Logger) Criticalf(message string, args ...interface{}) {
+	logger.Logf(CRITICAL, message, args...)
+}
+
+// Errorf logs the printf-formatted message at error level.
+func (logger Logger) Errorf(message string, args ...interface{}) {
+	logger.Logf(ERROR, message, args...)
+}
+
+// Warningf logs the printf-formatted message at warning level.
+func (logger Logger) Warningf(message string, args ...interface{}) {
+	logger.Logf(WARNING, message, args...)
+}
+
+// Infof logs the printf-formatted message at info level.
+func (logger Logger) Infof(message string, args ...interface{}) {
+	logger.Logf(INFO, message, args...)
+}
+
+// Debugf logs the printf-formatted message at debug level.
+func (logger Logger) Debugf(message string, args ...interface{}) {
+	logger.Logf(DEBUG, message, args...)
+}
+
+// Tracef logs the printf-formatted message at trace level.
+func (logger Logger) Tracef(message string, args ...interface{}) {
+	logger.Logf(TRACE, message, args...)
+}
+
+// IsLevelEnabled returns whether debugging is enabled
+// for the given log level.
+func (logger Logger) IsLevelEnabled(level Level) bool {
+	return logger.getModule().willWrite(level)
+}
+
+// IsErrorEnabled returns whether debugging is enabled
+// at error level.
+func (logger Logger) IsErrorEnabled() bool {
+	return logger.IsLevelEnabled(ERROR)
+}
+
+// IsWarningEnabled returns whether debugging is enabled
+// at warning level.
+func (logger Logger) IsWarningEnabled() bool {
+	return logger.IsLevelEnabled(WARNING)
+}
+
+// IsInfoEnabled returns whether debugging is enabled
+// at info level.
+func (logger Logger) IsInfoEnabled() bool {
+	return logger.IsLevelEnabled(INFO)
+}
+
+// IsDebugEnabled returns whether debugging is enabled
+// at debug level.
+func (logger Logger) IsDebugEnabled() bool {
+	return logger.IsLevelEnabled(DEBUG)
+}
+
+// IsTraceEnabled returns whether debugging is enabled
+// at trace level.
+func (logger Logger) IsTraceEnabled() bool {
+	return logger.IsLevelEnabled(TRACE)
+}
diff --git a/switchq/vendor/github.com/juju/loggo/module.go b/switchq/vendor/github.com/juju/loggo/module.go
new file mode 100644
index 0000000..8153be5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/module.go
@@ -0,0 +1,61 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+// Do not change rootName: modules.resolve() will misbehave if it isn't "".
+const (
+	rootString       = "<root>"
+	defaultRootLevel = WARNING
+	defaultLevel     = UNSPECIFIED
+)
+
+type module struct {
+	name    string
+	level   Level
+	parent  *module
+	context *Context
+}
+
+// Name returns the module's name.
+func (module *module) Name() string {
+	if module.name == "" {
+		return rootString
+	}
+	return module.name
+}
+
+func (m *module) willWrite(level Level) bool {
+	if level < TRACE || level > CRITICAL {
+		return false
+	}
+	return level >= m.getEffectiveLogLevel()
+}
+
+func (module *module) getEffectiveLogLevel() Level {
+	// Note: the root module is guaranteed to have a
+	// specified logging level, so acts as a suitable sentinel
+	// for this loop.
+	for {
+		if level := module.level.get(); level != UNSPECIFIED {
+			return level
+		}
+		module = module.parent
+	}
+	panic("unreachable")
+}
+
+// setLevel sets the severity level of the given module.
+// The root module cannot be set to UNSPECIFIED level.
+func (module *module) setLevel(level Level) {
+	// The root module can't be unspecified.
+	if module.name == "" && level == UNSPECIFIED {
+		level = WARNING
+	}
+	module.level.set(level)
+}
+
+func (m *module) write(entry Entry) {
+	entry.Module = m.name
+	m.context.write(entry)
+}
diff --git a/switchq/vendor/github.com/juju/loggo/testwriter.go b/switchq/vendor/github.com/juju/loggo/testwriter.go
new file mode 100644
index 0000000..b20e470
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/testwriter.go
@@ -0,0 +1,40 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"path"
+	"sync"
+)
+
+// TestWriter is a useful Writer for testing purposes.  Each component of the
+// logging message is stored in the Log array.
+type TestWriter struct {
+	mu  sync.Mutex
+	log []Entry
+}
+
+// Write saves the params as members in the TestLogValues struct appended to the Log array.
+func (writer *TestWriter) Write(entry Entry) {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	entry.Filename = path.Base(entry.Filename)
+	writer.log = append(writer.log, entry)
+}
+
+// Clear removes any saved log messages.
+func (writer *TestWriter) Clear() {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	writer.log = nil
+}
+
+// Log returns a copy of the current logged values.
+func (writer *TestWriter) Log() []Entry {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	v := make([]Entry, len(writer.log))
+	copy(v, writer.log)
+	return v
+}
diff --git a/switchq/vendor/github.com/juju/loggo/writer.go b/switchq/vendor/github.com/juju/loggo/writer.go
new file mode 100644
index 0000000..b3fe3e5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/loggo/writer.go
@@ -0,0 +1,113 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/ansiterm"
+)
+
+// DefaultWriterName is the name of the default writer for
+// a Context.
+const DefaultWriterName = "default"
+
+// Writer is implemented by any recipient of log messages.
+type Writer interface {
+	// Write writes a message to the Writer with the given level and module
+	// name. The filename and line hold the file name and line number of the
+	// code that is generating the log message; the time stamp holds the time
+	// the log message was generated, and message holds the log message
+	// itself.
+	Write(entry Entry)
+}
+
+// NewMinLevelWriter returns a Writer that will only pass on the Write calls
+// to the provided writer if the log level is at or above the specified
+// minimum level.
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer {
+	return &minLevelWriter{
+		writer: writer,
+		level:  minLevel,
+	}
+}
+
+type minLevelWriter struct {
+	writer Writer
+	level  Level
+}
+
+// Write writes the log record.
+func (w minLevelWriter) Write(entry Entry) {
+	if entry.Level < w.level {
+		return
+	}
+	w.writer.Write(entry)
+}
+
+type simpleWriter struct {
+	writer    io.Writer
+	formatter func(entry Entry) string
+}
+
+// NewSimpleWriter returns a new writer that writes log messages to the given
+// io.Writer formatting the messages with the given formatter.
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer {
+	if formatter == nil {
+		formatter = DefaultFormatter
+	}
+	return &simpleWriter{writer, formatter}
+}
+
+func (simple *simpleWriter) Write(entry Entry) {
+	logLine := simple.formatter(entry)
+	fmt.Fprintln(simple.writer, logLine)
+}
+
+func defaultWriter() Writer {
+	return NewColorWriter(os.Stderr)
+}
+
+type colorWriter struct {
+	writer *ansiterm.Writer
+}
+
+var (
+	// SeverityColor defines the colors for the levels output by the ColorWriter.
+	SeverityColor = map[Level]*ansiterm.Context{
+		TRACE:   ansiterm.Foreground(ansiterm.Default),
+		DEBUG:   ansiterm.Foreground(ansiterm.Green),
+		INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+		WARNING: ansiterm.Foreground(ansiterm.Yellow),
+		ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+		CRITICAL: &ansiterm.Context{
+			Foreground: ansiterm.White,
+			Background: ansiterm.Red,
+		},
+	}
+	// LocationColor defines the colors for the location output by the ColorWriter.
+	LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+
+// NewColorWriter will write out colored severity levels if the writer is
+// outputting to a terminal.
+func NewColorWriter(writer io.Writer) Writer {
+	return &colorWriter{ansiterm.NewWriter(writer)}
+}
+
+// Write implements Writer.
+func (w *colorWriter) Write(entry Entry) {
+	ts := formatTime(entry.Timestamp)
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+
+	fmt.Fprintf(w.writer, "%s ", ts)
+	SeverityColor[entry.Level].Fprintf(w.writer, entry.Level.Short())
+	fmt.Fprintf(w.writer, " %s ", entry.Module)
+	LocationColor.Fprintf(w.writer, "%s:%d ", filename, entry.Line)
+	fmt.Fprintln(w.writer, entry.Message)
+}
diff --git a/switchq/vendor/github.com/juju/schema/LICENSE b/switchq/vendor/github.com/juju/schema/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/schema/README.md b/switchq/vendor/github.com/juju/schema/README.md
new file mode 100644
index 0000000..78ad8a1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/README.md
@@ -0,0 +1,5 @@
+juju/schema
+===========
+
+This package provides helpers for coercing dynamically typed data structures
+into known forms.
diff --git a/switchq/vendor/github.com/juju/schema/checker.go b/switchq/vendor/github.com/juju/schema/checker.go
new file mode 100644
index 0000000..8682136
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/checker.go
@@ -0,0 +1,71 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"strings"
+)
+
+// The Coerce method of the Checker interface is called recursively when
+// v is being validated.  If err is nil, newv is used as the new value
+// at the recursion point.  If err is non-nil, v is taken as invalid and
+// may be either ignored or error out depending on where in the schema
+// checking process the error happened. Checkers like OneOf may continue
+// with an alternative, for instance.
+type Checker interface {
+	Coerce(v interface{}, path []string) (newv interface{}, err error)
+}
+
+// Any returns a Checker that succeeds with any input value and
+// results in the value itself unprocessed.
+func Any() Checker {
+	return anyC{}
+}
+
+type anyC struct{}
+
+func (c anyC) Coerce(v interface{}, path []string) (interface{}, error) {
+	return v, nil
+}
+
+// OneOf returns a Checker that attempts to Coerce the value with each
+// of the provided checkers. The value returned by the first checker
+// that succeeds will be returned by the OneOf checker itself.  If no
+// checker succeeds, OneOf will return an error on coercion.
+func OneOf(options ...Checker) Checker {
+	return oneOfC{options}
+}
+
+type oneOfC struct {
+	options []Checker
+}
+
+func (c oneOfC) Coerce(v interface{}, path []string) (interface{}, error) {
+	for _, o := range c.options {
+		newv, err := o.Coerce(v, path)
+		if err == nil {
+			return newv, nil
+		}
+	}
+	return nil, error_{"", v, path}
+}
+
+// pathAsPrefix returns a string consisting of the path elements
+// suitable for using as the prefix of an error message. If path
+// starts with a ".", the dot is omitted.
+func pathAsPrefix(path []string) string {
+	if len(path) == 0 {
+		return ""
+	}
+	var s string
+	if path[0] == "." {
+		s = strings.Join(path[1:], "")
+	} else {
+		s = strings.Join(path, "")
+	}
+	if s == "" {
+		return ""
+	}
+	return s + ": "
+}
diff --git a/switchq/vendor/github.com/juju/schema/const.go b/switchq/vendor/github.com/juju/schema/const.go
new file mode 100644
index 0000000..cbd03b8
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/const.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Const returns a Checker that only succeeds if the input matches
+// value exactly.  The value is compared with reflect.DeepEqual.
+func Const(value interface{}) Checker {
+	return constC{value}
+}
+
+type constC struct {
+	value interface{}
+}
+
+func (c constC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, c.value) {
+		return v, nil
+	}
+	return nil, error_{fmt.Sprintf("%#v", c.value), v, path}
+}
+
+// Nil returns a Checker that only succeeds if the input is nil. To tweak the
+// error message, valueLabel can contain a label of the value being checked to
+// be empty, e.g. "my special name". If valueLabel is "", "value" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.Nil("widget").Coerce(42, nil) will return an error message
+// like `expected empty widget, got int(42)`.
+//
+// Example 2:
+// schema.Nil("").Coerce("", nil) will return an error message like
+// `expected empty value, got string("")`.
+func Nil(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "value"
+	}
+	return nilC{valueLabel}
+}
+
+type nilC struct {
+	valueLabel string
+}
+
+func (c nilC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, nil) {
+		return v, nil
+	}
+	label := fmt.Sprintf("empty %s", c.valueLabel)
+	return nil, error_{label, v, path}
+}
diff --git a/switchq/vendor/github.com/juju/schema/errors.go b/switchq/vendor/github.com/juju/schema/errors.go
new file mode 100644
index 0000000..f62f58e
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/errors.go
@@ -0,0 +1,25 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+)
+
+type error_ struct {
+	want string
+	got  interface{}
+	path []string
+}
+
+func (e error_) Error() string {
+	path := pathAsPrefix(e.path)
+	if e.want == "" {
+		return fmt.Sprintf("%sunexpected value %#v", path, e.got)
+	}
+	if e.got == nil {
+		return fmt.Sprintf("%sexpected %s, got nothing", path, e.want)
+	}
+	return fmt.Sprintf("%sexpected %s, got %T(%#v)", path, e.want, e.got, e.got)
+}
diff --git a/switchq/vendor/github.com/juju/schema/fieldmap.go b/switchq/vendor/github.com/juju/schema/fieldmap.go
new file mode 100644
index 0000000..765f7e3
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/fieldmap.go
@@ -0,0 +1,170 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Omit is a marker for FieldMap and StructFieldMap defaults parameter.
+// If a field is not present in the map and defaults to Omit, the missing
+// field will be ommitted from the coerced map as well.
+var Omit omit
+
+type omit struct{}
+
+type Fields map[string]Checker
+type Defaults map[string]interface{}
+
+// FieldMap returns a Checker that accepts a map value with defined
+// string keys. Every key has an independent checker associated,
+// and processing will only succeed if all the values succeed
+// individually. If a field fails to be processed, processing stops
+// and returns with the underlying error.
+//
+// Fields in defaults will be set to the provided value if not present
+// in the coerced map. If the default value is schema.Omit, the
+// missing field will be omitted from the coerced map.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, false}
+}
+
+// StrictFieldMap returns a Checker that acts as the one returned by FieldMap,
+// but the Checker returns an error if it encounters an unknown key.
+func StrictFieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, true}
+}
+
+type fieldMapC struct {
+	fields   Fields
+	defaults Defaults
+	strict   bool
+}
+
+var stringType = reflect.TypeOf("")
+
+func hasStrictStringKeys(rv reflect.Value) bool {
+	if rv.Type().Key() == stringType {
+		return true
+	}
+	if rv.Type().Key().Kind() != reflect.Interface {
+		return false
+	}
+	for _, k := range rv.MapKeys() {
+		if k.Elem().Type() != stringType {
+			return false
+		}
+	}
+	return true
+}
+
+func (c fieldMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+	if !hasStrictStringKeys(rv) {
+		return nil, error_{"map[string]", v, path}
+	}
+
+	if c.strict {
+		for _, k := range rv.MapKeys() {
+			ks := k.String()
+			if _, ok := c.fields[ks]; !ok {
+				return nil, fmt.Errorf("%sunknown key %q (value %#v)", pathAsPrefix(path), ks, rv.MapIndex(k).Interface())
+			}
+		}
+	}
+
+	vpath := append(path, ".", "?")
+
+	out := make(map[string]interface{}, rv.Len())
+	for k, checker := range c.fields {
+		valuev := rv.MapIndex(reflect.ValueOf(k))
+		var value interface{}
+		if valuev.IsValid() {
+			value = valuev.Interface()
+		} else if dflt, ok := c.defaults[k]; ok {
+			if dflt == Omit {
+				continue
+			}
+			value = dflt
+		}
+		vpath[len(vpath)-1] = k
+		newv, err := checker.Coerce(value, vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[k] = newv
+	}
+	for k, v := range c.defaults {
+		if v == Omit {
+			continue
+		}
+		if _, ok := out[k]; !ok {
+			checker, ok := c.fields[k]
+			if !ok {
+				return nil, fmt.Errorf("got default value for unknown field %q", k)
+			}
+			vpath[len(vpath)-1] = k
+			newv, err := checker.Coerce(v, vpath)
+			if err != nil {
+				return nil, err
+			}
+			out[k] = newv
+		}
+	}
+	return out, nil
+}
+
+// FieldMapSet returns a Checker that accepts a map value checked
+// against one of several FieldMap checkers.  The actual checker
+// used is the first one whose checker associated with the selector
+// field processes the map correctly. If no checker processes
+// the selector value correctly, an error is returned.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMapSet(selector string, maps []Checker) Checker {
+	fmaps := make([]fieldMapC, len(maps))
+	for i, m := range maps {
+		if fmap, ok := m.(fieldMapC); ok {
+			if checker, _ := fmap.fields[selector]; checker == nil {
+				panic("FieldMapSet has a FieldMap with a missing selector")
+			}
+			fmaps[i] = fmap
+		} else {
+			panic("FieldMapSet got a non-FieldMap checker")
+		}
+	}
+	return mapSetC{selector, fmaps}
+}
+
+type mapSetC struct {
+	selector string
+	fmaps    []fieldMapC
+}
+
+func (c mapSetC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	var selector interface{}
+	selectorv := rv.MapIndex(reflect.ValueOf(c.selector))
+	if selectorv.IsValid() {
+		selector = selectorv.Interface()
+		for _, fmap := range c.fmaps {
+			_, err := fmap.fields[c.selector].Coerce(selector, path)
+			if err != nil {
+				continue
+			}
+			return fmap.Coerce(v, path)
+		}
+	}
+	return nil, error_{"supported selector", selector, append(path, ".", c.selector)}
+}
diff --git a/switchq/vendor/github.com/juju/schema/lists.go b/switchq/vendor/github.com/juju/schema/lists.go
new file mode 100644
index 0000000..635425a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/lists.go
@@ -0,0 +1,44 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// List returns a Checker that accepts a slice value with values
+// that are processed with the elem checker.  If any element of the
+// provided slice value fails to be processed, processing will stop
+// and return with the obtained error.
+//
+// The coerced output value has type []interface{}.
+func List(elem Checker) Checker {
+	return listC{elem}
+}
+
+type listC struct {
+	elem Checker
+}
+
+func (c listC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Slice {
+		return nil, error_{"list", v, path}
+	}
+
+	path = append(path, "[", "?", "]")
+
+	l := rv.Len()
+	out := make([]interface{}, 0, l)
+	for i := 0; i != l; i++ {
+		path[len(path)-2] = strconv.Itoa(i)
+		elem, err := c.elem.Coerce(rv.Index(i).Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, elem)
+	}
+	return out, nil
+}
diff --git a/switchq/vendor/github.com/juju/schema/maps.go b/switchq/vendor/github.com/juju/schema/maps.go
new file mode 100644
index 0000000..31242a0
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/maps.go
@@ -0,0 +1,93 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Map returns a Checker that accepts a map value. Every key and value
+// in the map are processed with the respective checker, and if any
+// value fails to be coerced, processing stops and returns with the
+// underlying error.
+//
+// The coerced output value has type map[interface{}]interface{}.
+func Map(key Checker, value Checker) Checker {
+	return mapC{key, value}
+}
+
+type mapC struct {
+	key   Checker
+	value Checker
+}
+
+func (c mapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+
+	l := rv.Len()
+	out := make(map[interface{}]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := c.key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk] = newv
+	}
+	return out, nil
+}
+
+// StringMap returns a Checker that accepts a map value. Every key in
+// the map must be a string, and every value in the map are processed
+// with the provided checker. If any value fails to be coerced,
+// processing stops and returns with the underlying error.
+//
+// The coerced output value has type map[string]interface{}.
+func StringMap(value Checker) Checker {
+	return stringMapC{value}
+}
+
+type stringMapC struct {
+	value Checker
+}
+
+func (c stringMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+	key := String()
+
+	l := rv.Len()
+	out := make(map[string]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk.(string)] = newv
+	}
+	return out, nil
+}
diff --git a/switchq/vendor/github.com/juju/schema/numeric.go b/switchq/vendor/github.com/juju/schema/numeric.go
new file mode 100644
index 0000000..ec88e56
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/numeric.go
@@ -0,0 +1,197 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// Bool returns a Checker that accepts boolean values only.
+func Bool() Checker {
+	return boolC{}
+}
+
+type boolC struct{}
+
+func (c boolC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch reflect.TypeOf(v).Kind() {
+		case reflect.Bool:
+			return v, nil
+		case reflect.String:
+			val, err := strconv.ParseBool(reflect.ValueOf(v).String())
+			if err == nil {
+				return val, nil
+			}
+		}
+	}
+	return nil, error_{"bool", v, path}
+}
+
+// Int returns a Checker that accepts any integer value, and returns
+// the same value consistently typed as an int64.
+func Int() Checker {
+	return intC{}
+}
+
+type intC struct{}
+
+func (c intC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"int", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Int:
+	case reflect.Int8:
+	case reflect.Int16:
+	case reflect.Int32:
+	case reflect.Int64:
+	case reflect.String:
+		val, err := strconv.ParseInt(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"int", v, path}
+		}
+	default:
+		return nil, error_{"int", v, path}
+	}
+	return reflect.ValueOf(v).Int(), nil
+}
+
+// Uint returns a Checker that accepts any integer or unsigned value, and
+// returns the same value consistently typed as an uint64. If the integer
+// value is negative an error is raised.
+func Uint() Checker {
+	return uintC{}
+}
+
+type uintC struct{}
+
+func (c uintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"uint", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return reflect.ValueOf(v).Uint(), nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		val := reflect.ValueOf(v).Int()
+		if val < 0 {
+			return nil, error_{"uint", v, path}
+		}
+		// All positive int64 values fit into uint64.
+		return uint64(val), nil
+	case reflect.String:
+		val, err := strconv.ParseUint(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"uint", v, path}
+		}
+	default:
+		return nil, error_{"uint", v, path}
+	}
+}
+
+// ForceInt returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an int. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure.
+func ForceInt() Checker {
+	return forceIntC{}
+}
+
+type forceIntC struct{}
+
+func (c forceIntC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseInt(vstr, 0, 64)
+			if err == nil {
+				return int(intValue), nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				return int(floatValue), nil
+			}
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return int(reflect.ValueOf(v).Int()), nil
+		case reflect.Float32, reflect.Float64:
+			return int(reflect.ValueOf(v).Float()), nil
+		}
+	}
+	return nil, error_{"number", v, path}
+}
+
+// ForceUint returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an uint64. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure. If the integer
+// value is negative an error is raised.
+func ForceUint() Checker {
+	return forceUintC{}
+}
+
+type forceUintC struct{}
+
+func (c forceUintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseUint(vstr, 0, 64)
+			if err == nil {
+				return intValue, nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				if floatValue < 0 {
+					return nil, error_{"uint", v, path}
+				}
+				return uint64(floatValue), nil
+			}
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return reflect.ValueOf(v).Uint(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			val := reflect.ValueOf(v).Int()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			// All positive int64 values fit into uint64.
+			return uint64(val), nil
+		case reflect.Float32, reflect.Float64:
+			val := reflect.ValueOf(v).Float()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			return uint64(val), nil
+		}
+	}
+	return nil, error_{"uint", v, path}
+}
+
+// Float returns a Checker that accepts any float value, and returns
+// the same value consistently typed as a float64.
+func Float() Checker {
+	return floatC{}
+}
+
+type floatC struct{}
+
+func (c floatC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"float", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Float32:
+	case reflect.Float64:
+	default:
+		return nil, error_{"float", v, path}
+	}
+	return reflect.ValueOf(v).Float(), nil
+}
diff --git a/switchq/vendor/github.com/juju/schema/size.go b/switchq/vendor/github.com/juju/schema/size.go
new file mode 100644
index 0000000..1ad0caa
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/size.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"github.com/juju/utils"
+	"reflect"
+)
+
+// Size returns a Checker that accepts a string value, and returns
+// the parsed string as a size in mebibytes see: https://godoc.org/github.com/juju/utils#ParseSize
+func Size() Checker {
+	return sizeC{}
+}
+
+type sizeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c sizeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string", v, path}
+	}
+
+	typeOf := reflect.TypeOf(v).Kind()
+	if typeOf != reflect.String {
+		return nil, error_{"string", v, path}
+	}
+
+	value := reflect.ValueOf(v).String()
+	if value == "" {
+		return nil, error_{"empty string", v, path}
+	}
+
+	v, err := utils.ParseSize(value)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return v, nil
+}
diff --git a/switchq/vendor/github.com/juju/schema/strings.go b/switchq/vendor/github.com/juju/schema/strings.go
new file mode 100644
index 0000000..75f6120
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/strings.go
@@ -0,0 +1,155 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+)
+
+// String returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func String() Checker {
+	return stringC{}
+}
+
+type stringC struct{}
+
+func (c stringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		return reflect.ValueOf(v).String(), nil
+	}
+	return nil, error_{"string", v, path}
+}
+
+// URL returns a Checker that accepts a string value that must be parseable as a
+// URL, and returns a *net.URL.
+func URL() Checker {
+	return urlC{}
+}
+
+type urlC struct{}
+
+func (c urlC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		u, err := url.Parse(s)
+		if err != nil {
+			return nil, error_{"valid url", s, path}
+		}
+		return u, nil
+	}
+	return nil, error_{"url string", v, path}
+}
+
+// SimpleRegexp returns a checker that accepts a string value that is
+// a valid regular expression and returns it unprocessed.
+func SimpleRegexp() Checker {
+	return sregexpC{}
+}
+
+type sregexpC struct{}
+
+func (c sregexpC) Coerce(v interface{}, path []string) (interface{}, error) {
+	// XXX The regexp package happens to be extremely simple right now.
+	//     Once exp/regexp goes mainstream, we'll have to update this
+	//     logic to use a more widely accepted regexp subset.
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		_, err := regexp.Compile(s)
+		if err != nil {
+			return nil, error_{"valid regexp", s, path}
+		}
+		return v, nil
+	}
+	return nil, error_{"regexp string", v, path}
+}
+
+// UUID returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func UUID() Checker {
+	return uuidC{}
+}
+
+type uuidC struct{}
+
+var uuidregex = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`)
+
+func (c uuidC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		uuid := reflect.ValueOf(v).String()
+		if uuidregex.MatchString(uuid) {
+			return uuid, nil
+		}
+	}
+	return nil, error_{"uuid", v, path}
+}
+
+// Stringified returns a checker that accepts a bool/int/float/string
+// value and returns its string. Other value types may be supported by
+// passing in their checkers.
+func Stringified(checkers ...Checker) Checker {
+	return stringifiedC{
+		checkers: checkers,
+	}
+}
+
+type stringifiedC struct {
+	checkers []Checker
+}
+
+func (c stringifiedC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if newStr, err := String().Coerce(v, path); err == nil {
+		return newStr, nil
+	}
+	_, err := OneOf(append(c.checkers,
+		Bool(),
+		Int(),
+		Float(),
+		String(),
+		URL(),
+	)...).Coerce(v, path)
+	if err != nil {
+		return nil, err
+	}
+	return fmt.Sprintf("%#v", v), nil
+}
+
+// NonEmptyString returns a Checker that only accepts non-empty strings. To
+// tweak the error message, valueLabel can contain a label of the value being
+// checked, e.g. "my special name". If valueLabel is "", "string" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.NonEmptyString("widget").Coerce("", nil) will return an error message
+// like `expected non-empty widget, got string("")`.
+//
+// Example 2:
+// schema.NonEmptyString("").Coerce("", nil) will return an error message like
+// `expected non-empty string, got string("")`.
+func NonEmptyString(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "string"
+	}
+	return nonEmptyStringC{valueLabel}
+}
+
+type nonEmptyStringC struct {
+	valueLabel string
+}
+
+func (c nonEmptyStringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	label := fmt.Sprintf("non-empty %s", c.valueLabel)
+	invalidError := error_{label, v, path}
+
+	if v == nil || reflect.TypeOf(v).Kind() != reflect.String {
+		return nil, invalidError
+	}
+	if stringValue := reflect.ValueOf(v).String(); stringValue != "" {
+		return stringValue, nil
+	}
+	return nil, invalidError
+}
diff --git a/switchq/vendor/github.com/juju/schema/time.go b/switchq/vendor/github.com/juju/schema/time.go
new file mode 100644
index 0000000..9521a2a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/time.go
@@ -0,0 +1,41 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// Time returns a Checker that accepts a string value, and returns
+// the parsed time.Time value. Emtpy strings are considered empty times.
+func Time() Checker {
+	return timeC{}
+}
+
+type timeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Time", v, path}
+	}
+	var empty time.Time
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.Parse(time.RFC3339Nano, vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Time", v, path}
+	}
+}
diff --git a/switchq/vendor/github.com/juju/schema/time_duration.go b/switchq/vendor/github.com/juju/schema/time_duration.go
new file mode 100644
index 0000000..71e75fe
--- /dev/null
+++ b/switchq/vendor/github.com/juju/schema/time_duration.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// TimeDuration returns a Checker that accepts a string value, and returns
+// the parsed time.Duration value. Emtpy strings are considered empty time.Duration
+func TimeDuration() Checker {
+	return timeDurationC{}
+}
+
+type timeDurationC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeDurationC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Duration", v, path}
+	}
+
+	var empty time.Duration
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.ParseDuration(vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Duration", v, path}
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/LICENSE b/switchq/vendor/github.com/juju/utils/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/utils/LICENSE.golang b/switchq/vendor/github.com/juju/utils/LICENSE.golang
new file mode 100644
index 0000000..953076d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/LICENSE.golang
@@ -0,0 +1,32 @@
+This licence applies to the following files:
+
+* filepath/stdlib.go
+* filepath/stdlibmatch.go
+
+Copyright (c) 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/switchq/vendor/github.com/juju/utils/Makefile b/switchq/vendor/github.com/juju/utils/Makefile
new file mode 100644
index 0000000..9c69f32
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/Makefile
@@ -0,0 +1,10 @@
+PROJECT := github.com/juju/utils
+
+check-licence:
+	@(fgrep -rl "Licensed under the LGPLv3" .;\
+		fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" .;\
+		find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \
+		xargs -I {} echo FAIL: licence missed: {}
+
+check: check-licence
+	go test $(PROJECT)/...
\ No newline at end of file
diff --git a/switchq/vendor/github.com/juju/utils/README.md b/switchq/vendor/github.com/juju/utils/README.md
new file mode 100644
index 0000000..ef08948
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/README.md
@@ -0,0 +1,4 @@
+juju/utils
+============
+
+This package provides general utility packages and functions.
diff --git a/switchq/vendor/github.com/juju/utils/attempt.go b/switchq/vendor/github.com/juju/utils/attempt.go
new file mode 100644
index 0000000..3becab2
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/attempt.go
@@ -0,0 +1,80 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"time"
+)
+
+// The Attempt and AttemptStrategy types are copied from those in launchpad.net/goamz/aws.
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully.
+type AttemptStrategy struct {
+	Total time.Duration // total duration of attempt.
+	Delay time.Duration // interval between each try in the burst.
+	Min   int           // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+	strategy AttemptStrategy
+	last     time.Time
+	end      time.Time
+	force    bool
+	count    int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+	now := time.Now()
+	return &Attempt{
+		strategy: s,
+		last:     now,
+		end:      now.Add(s.Total),
+		force:    true,
+	}
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+// It always returns true the first time it is called - we are guaranteed to
+// make at least one attempt.
+func (a *Attempt) Next() bool {
+	now := time.Now()
+	sleep := a.nextSleep(now)
+	if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+		return false
+	}
+	a.force = false
+	if sleep > 0 && a.count > 0 {
+		time.Sleep(sleep)
+		now = time.Now()
+	}
+	a.count++
+	a.last = now
+	return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+	sleep := a.strategy.Delay - now.Sub(a.last)
+	if sleep < 0 {
+		return 0
+	}
+	return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+	if a.force || a.strategy.Min > a.count {
+		return true
+	}
+	now := time.Now()
+	if now.Add(a.nextSleep(now)).Before(a.end) {
+		a.force = true
+		return true
+	}
+	return false
+}
diff --git a/switchq/vendor/github.com/juju/utils/clock/clock.go b/switchq/vendor/github.com/juju/utils/clock/clock.go
new file mode 100644
index 0000000..59a511d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/clock/clock.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import "time"
+
+// Clock provides an interface for dealing with clocks.
+type Clock interface {
+	// Now returns the current clock time.
+	Now() time.Time
+
+	// After waits for the duration to elapse and then sends the
+	// current time on the returned channel.
+	After(time.Duration) <-chan time.Time
+
+	// AfterFunc waits for the duration to elapse and then calls f in its own goroutine.
+	// It returns a Timer that can be used to cancel the call using its Stop method.
+	AfterFunc(d time.Duration, f func()) Timer
+
+	// NewTimer creates a new Timer that will send the current time
+	// on its channel after at least duration d.
+	NewTimer(d time.Duration) Timer
+}
+
+// Alarm returns a channel that will have the time sent on it at some point
+// after the supplied time occurs.
+//
+// This is short for c.After(t.Sub(c.Now())).
+func Alarm(c Clock, t time.Time) <-chan time.Time {
+	return c.After(t.Sub(c.Now()))
+}
+
+// The Timer type represents a single event.
+// A Timer must be created with AfterFunc.
+// This interface follows time.Timer's methods but provides easier mocking.
+type Timer interface {
+	// When the Timer expires, the current time will be sent on the
+	// channel returned from Chan, unless the Timer was created by
+	// AfterFunc.
+	Chan() <-chan time.Time
+
+	// Reset changes the timer to expire after duration d.
+	// It returns true if the timer had been active, false if
+	// the timer had expired or been stopped.
+	Reset(time.Duration) bool
+
+	// Stop prevents the Timer from firing. It returns true if
+	// the call stops the timer, false if the timer has already expired or been stopped.
+	// Stop does not close the channel, to prevent a read
+	// from the channel succeeding incorrectly.
+	Stop() bool
+}
diff --git a/switchq/vendor/github.com/juju/utils/clock/wall.go b/switchq/vendor/github.com/juju/utils/clock/wall.go
new file mode 100644
index 0000000..9bfc351
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/clock/wall.go
@@ -0,0 +1,47 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import (
+	"time"
+)
+
+// WallClock exposes wall-clock time via the Clock interface.
+var WallClock wallClock
+
+// ensure that WallClock does actually implement the Clock interface.
+var _ Clock = WallClock
+
+// WallClock exposes wall-clock time as returned by time.Now.
+type wallClock struct{}
+
+// Now is part of the Clock interface.
+func (wallClock) Now() time.Time {
+	return time.Now()
+}
+
+// After implements Clock.After.
+func (wallClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+// AfterFunc implements Clock.AfterFunc.
+func (wallClock) AfterFunc(d time.Duration, f func()) Timer {
+	return wallTimer{time.AfterFunc(d, f)}
+}
+
+// NewTimer implements Clock.NewTimer.
+func (wallClock) NewTimer(d time.Duration) Timer {
+	return wallTimer{time.NewTimer(d)}
+}
+
+// wallTimer implements the Timer interface.
+type wallTimer struct {
+	*time.Timer
+}
+
+// Chan implements Timer.Chan.
+func (t wallTimer) Chan() <-chan time.Time {
+	return t.C
+}
diff --git a/switchq/vendor/github.com/juju/utils/command.go b/switchq/vendor/github.com/juju/utils/command.go
new file mode 100644
index 0000000..4bd51cd
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/command.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os/exec"
+)
+
+// RunCommand executes the command and return the combined output.
+func RunCommand(command string, args ...string) (output string, err error) {
+	cmd := exec.Command(command, args...)
+	out, err := cmd.CombinedOutput()
+	output = string(out)
+	if err != nil {
+		return output, err
+	}
+	return output, nil
+}
diff --git a/switchq/vendor/github.com/juju/utils/dependencies.tsv b/switchq/vendor/github.com/juju/utils/dependencies.tsv
new file mode 100644
index 0000000..76d098e
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/dependencies.tsv
@@ -0,0 +1,24 @@
+github.com/juju/cmd	git	7c57a7d5a20602e4563a83f2d530283ca0e6f481	2016-08-10T12:53:08Z
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/gnuflag	git	4e76c56581859c14d9d87e1ddbe29e1c0f10195f	2016-08-09T16:52:14Z
+github.com/juju/httpprof	git	14bf14c307672fd2456bdbf35d19cf0ccd3cf565	2014-12-17T16:00:36Z
+github.com/juju/httprequest	git	89d547093c45e293599088cc63e805c6f1205dc0	2016-03-02T10:09:58Z
+github.com/juju/loggo	git	15901ae4de786d05edae84a27c93d3fbef66c91e	2016-08-04T22:15:26Z
+github.com/juju/retry	git	62c62032529169c7ec02fa48f93349604c345e1f	2015-10-29T02:48:21Z
+github.com/juju/testing	git	7177264a582e2c00d08277eaa91d88f8eb0fd869	2016-09-26T12:59:16Z
+github.com/juju/version	git	4ae6172c00626779a5a462c3e3d22fc0e889431a	2016-06-03T19:49:58Z
+github.com/julienschmidt/httprouter	git	77a895ad01ebc98a4dc95d8355bc825ce80a56f6	2015-10-13T22:55:20Z
+github.com/masterzen/azure-sdk-for-go	git	ee4f0065d00cd12b542f18f5bc45799e88163b12	2016-07-20T05:16:58Z
+github.com/masterzen/simplexml	git	4572e39b1ab9fe03ee513ce6fc7e289e98482190	2016-06-08T18:30:07Z
+github.com/masterzen/winrm	git	7a535cd943fccaeed196718896beec3fb51aff41	2016-08-04T09:38:27Z
+github.com/masterzen/xmlpath	git	13f4951698adc0fa9c1dda3e275d489a24201161	2014-02-18T18:59:01Z
+github.com/nu7hatch/gouuid	git	179d4d0c4d8d407a32af483c2354df1d2c91e6c3	2016-02-18t18:59:01Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+golang.org/x/net	git	ea47fc708ee3e20177f3ca3716217c4ab75942cb	2015-08-29T23:03:18Z
+golang.org/x/text	git	b01949dc0793a9af5e4cb3fce4d42999e76e8ca1	2016-05-25T23:07:23Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/errgo.v1	git	66cb46252b94c1f3d65646f54ee8043ab38d766c	2015-10-07T15:31:57Z
+gopkg.in/juju/names.v2	git	e38bc90539f22af61a9c656d35068bd5f0a5b30a	2016-05-25T23:07:23Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/tomb.v1	git	dd632973f1e7218eb1089048e0798ec9ae7dceb8	2014-10-24T13:56:13Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/switchq/vendor/github.com/juju/utils/file.go b/switchq/vendor/github.com/juju/utils/file.go
new file mode 100644
index 0000000..75fe5a6
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/file.go
@@ -0,0 +1,149 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+)
+
+// UserHomeDir returns the home directory for the specified user, or the
+// home directory for the current user if the specified user is empty.
+func UserHomeDir(userName string) (hDir string, err error) {
+	if userName == "" {
+		// TODO (wallyworld) - fix tests on Windows
+		// Ordinarily, we'd always use user.Current() to get the current user
+		// and then get the HomeDir from that. But our tests rely on poking
+		// a value into $HOME in order to override the normal home dir for the
+		// current user. So we're forced to use Home() to make the tests pass.
+		// All of our tests currently construct paths with the default user in
+		// mind eg "~/foo".
+		return Home(), nil
+	}
+	hDir, err = homeDir(userName)
+	if err != nil {
+		return "", err
+	}
+	return hDir, nil
+}
+
+// Only match paths starting with ~ (~user/test, ~/test). This will prevent
+// accidental expansion on Windows when short form paths are present (C:\users\ADMINI~1\test)
+var userHomePathRegexp = regexp.MustCompile("(^~(?P<user>[^/]*))(?P<path>.*)")
+
+// NormalizePath expands a path containing ~ to its absolute form,
+// and removes any .. or . path elements.
+func NormalizePath(dir string) (string, error) {
+	if userHomePathRegexp.MatchString(dir) {
+		user := userHomePathRegexp.ReplaceAllString(dir, "$user")
+		userHomeDir, err := UserHomeDir(user)
+		if err != nil {
+			return "", err
+		}
+		dir = userHomePathRegexp.ReplaceAllString(dir, fmt.Sprintf("%s$path", userHomeDir))
+	}
+	return filepath.Clean(dir), nil
+}
+
+// EnsureBaseDir ensures that path is always prefixed by baseDir,
+// allowing for the fact that path might have a Window drive letter in
+// it.
+func EnsureBaseDir(baseDir, path string) string {
+	if baseDir == "" {
+		return path
+	}
+	volume := filepath.VolumeName(path)
+	return filepath.Join(baseDir, path[len(volume):])
+}
+
+// JoinServerPath joins any number of path elements into a single path, adding
+// a path separator (based on the current juju server OS) if necessary. The
+// result is Cleaned; in particular, all empty strings are ignored.
+func JoinServerPath(elem ...string) string {
+	return path.Join(elem...)
+}
+
+// UniqueDirectory returns "path/name" if that directory doesn't exist.  If it
+// does, the method starts appending .1, .2, etc until a unique name is found.
+func UniqueDirectory(path, name string) (string, error) {
+	dir := filepath.Join(path, name)
+	_, err := os.Stat(dir)
+	if os.IsNotExist(err) {
+		return dir, nil
+	}
+	for i := 1; ; i++ {
+		dir := filepath.Join(path, fmt.Sprintf("%s.%d", name, i))
+		_, err := os.Stat(dir)
+		if os.IsNotExist(err) {
+			return dir, nil
+		} else if err != nil {
+			return "", err
+		}
+	}
+}
+
+// CopyFile writes the contents of the given source file to dest.
+func CopyFile(dest, source string) error {
+	df, err := os.Create(dest)
+	if err != nil {
+		return err
+	}
+	f, err := os.Open(source)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = io.Copy(df, f)
+	return err
+}
+
+// AtomicWriteFileAndChange atomically writes the filename with the
+// given contents and calls the given function after the contents were
+// written, but before the file is renamed.
+func AtomicWriteFileAndChange(filename string, contents []byte, change func(*os.File) error) (err error) {
+	dir, file := filepath.Split(filename)
+	f, err := ioutil.TempFile(dir, file)
+	if err != nil {
+		return fmt.Errorf("cannot create temp file: %v", err)
+	}
+	defer f.Close()
+	defer func() {
+		if err != nil {
+			// Don't leave the temp file lying around on error.
+			// Close the file before removing. Trying to remove an open file on
+			// Windows will fail.
+			f.Close()
+			os.Remove(f.Name())
+		}
+	}()
+	if _, err := f.Write(contents); err != nil {
+		return fmt.Errorf("cannot write %q contents: %v", filename, err)
+	}
+	if err := change(f); err != nil {
+		return err
+	}
+	f.Close()
+	if err := ReplaceFile(f.Name(), filename); err != nil {
+		return fmt.Errorf("cannot replace %q with %q: %v", f.Name(), filename, err)
+	}
+	return nil
+}
+
+// AtomicWriteFile atomically writes the filename with the given
+// contents and permissions, replacing any existing file at the same
+// path.
+func AtomicWriteFile(filename string, contents []byte, perms os.FileMode) (err error) {
+	return AtomicWriteFileAndChange(filename, contents, func(f *os.File) error {
+		// FileMod.Chmod() is not implemented on Windows, however, os.Chmod() is
+		if err := os.Chmod(f.Name(), perms); err != nil {
+			return fmt.Errorf("cannot set permissions: %v", err)
+		}
+		return nil
+	})
+}
diff --git a/switchq/vendor/github.com/juju/utils/file_unix.go b/switchq/vendor/github.com/juju/utils/file_unix.go
new file mode 100644
index 0000000..4380290
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/file_unix.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"os/user"
+	"strconv"
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+func homeDir(userName string) (string, error) {
+	u, err := user.Lookup(userName)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return u.HomeDir, nil
+}
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+//
+// On unix systems, an error may occur with a successful move, if the source
+// file location cannot be unlinked.
+func MoveFile(source, destination string) (bool, error) {
+	err := os.Link(source, destination)
+	if err != nil {
+		return false, err
+	}
+	err = os.Remove(source)
+	if err != nil {
+		return true, err
+	}
+	return true, nil
+}
+
+// ReplaceFile atomically replaces the destination file or directory
+// with the source. The errors that are returned are identical to
+// those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
+
+// MakeFileURL returns a file URL if a directory is passed in else it does nothing
+func MakeFileURL(in string) string {
+	if strings.HasPrefix(in, "/") {
+		return "file://" + in
+	}
+	return in
+}
+
+// ChownPath sets the uid and gid of path to match that of the user
+// specified.
+func ChownPath(path, username string) error {
+	u, err := user.Lookup(username)
+	if err != nil {
+		return fmt.Errorf("cannot lookup %q user id: %v", username, err)
+	}
+	uid, err := strconv.Atoi(u.Uid)
+	if err != nil {
+		return fmt.Errorf("invalid user id %q: %v", u.Uid, err)
+	}
+	gid, err := strconv.Atoi(u.Gid)
+	if err != nil {
+		return fmt.Errorf("invalid group id %q: %v", u.Gid, err)
+	}
+	return os.Chown(path, uid, gid)
+}
diff --git a/switchq/vendor/github.com/juju/utils/file_windows.go b/switchq/vendor/github.com/juju/utils/file_windows.go
new file mode 100644
index 0000000..9bb3936
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/file_windows.go
@@ -0,0 +1,142 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"syscall"
+	"unsafe"
+
+	"github.com/juju/errors"
+)
+
+const (
+	movefile_replace_existing = 0x1
+	movefile_write_through    = 0x8
+)
+
+//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+func MoveFile(source, destination string) (bool, error) {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_write_through); err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	return true, nil
+
+}
+
+// ReplaceFile atomically replaces the destination file or directory with the source.
+// The errors that are returned are identical to those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_replace_existing|movefile_write_through); err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	return nil
+}
+
+// MakeFileURL returns a proper file URL for the given path/directory
+func MakeFileURL(in string) string {
+	in = filepath.ToSlash(in)
+	// for windows at least should be <letter>: to be considered valid
+	// so we cant do anything with less than that.
+	if len(in) < 2 {
+		return in
+	}
+	if string(in[1]) != ":" {
+		return in
+	}
+	// since go 1.6 http client will only take this format.
+	return "file://" + in
+}
+
+func getUserSID(username string) (string, error) {
+	sid, _, _, e := syscall.LookupSID("", username)
+	if e != nil {
+		return "", e
+	}
+	sidStr, err := sid.String()
+	return sidStr, err
+}
+
+func readRegString(h syscall.Handle, key string) (value string, err error) {
+	var typ uint32
+	var buf uint32
+
+	// Get size of registry key
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, nil, &buf)
+	if err != nil {
+		return value, err
+	}
+
+	n := make([]uint16, buf/2+1)
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, (*byte)(unsafe.Pointer(&n[0])), &buf)
+	if err != nil {
+		return value, err
+	}
+	return syscall.UTF16ToString(n[:]), err
+}
+
+func homeFromRegistry(sid string) (string, error) {
+	var h syscall.Handle
+	// This key will exist on all platforms we support the agent on (windows server 2008 and above)
+	keyPath := fmt.Sprintf("Software\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\%s", sid)
+	err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+		syscall.StringToUTF16Ptr(keyPath),
+		0, syscall.KEY_READ, &h)
+	if err != nil {
+		return "", err
+	}
+	defer syscall.RegCloseKey(h)
+	str, err := readRegString(h, "ProfileImagePath")
+	if err != nil {
+		return "", err
+	}
+	return str, nil
+}
+
+// homeDir returns a local user home dir on Windows
+// user.Lookup() does not populate Gid and HomeDir on Windows,
+// so we get it from the registry
+func homeDir(user string) (string, error) {
+	u, err := getUserSID(user)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return homeFromRegistry(u)
+}
+
+// ChownPath is not implemented for Windows.
+func ChownPath(path, username string) error {
+	// This only exists to allow building on Windows. User lookup and
+	// file ownership needs to be handled in a completely different
+	// way and hasn't yet been implemented.
+	return nil
+}
diff --git a/switchq/vendor/github.com/juju/utils/gomaxprocs.go b/switchq/vendor/github.com/juju/utils/gomaxprocs.go
new file mode 100644
index 0000000..5977a86
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/gomaxprocs.go
@@ -0,0 +1,26 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"runtime"
+)
+
+var gomaxprocs = runtime.GOMAXPROCS
+var numCPU = runtime.NumCPU
+
+// UseMultipleCPUs sets GOMAXPROCS to the number of CPU cores unless it has
+// already been overridden by the GOMAXPROCS environment variable.
+func UseMultipleCPUs() {
+	if envGOMAXPROCS := os.Getenv("GOMAXPROCS"); envGOMAXPROCS != "" {
+		n := gomaxprocs(0)
+		logger.Debugf("GOMAXPROCS already set in environment to %q, %d internally",
+			envGOMAXPROCS, n)
+		return
+	}
+	n := numCPU()
+	logger.Debugf("setting GOMAXPROCS to %d", n)
+	gomaxprocs(n)
+}
diff --git a/switchq/vendor/github.com/juju/utils/home_unix.go b/switchq/vendor/github.com/juju/utils/home_unix.go
new file mode 100644
index 0000000..6b450be
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/home_unix.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+// +build !windows
+
+package utils
+
+import (
+	"os"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return os.Getenv("HOME")
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	return os.Setenv("HOME", s)
+}
diff --git a/switchq/vendor/github.com/juju/utils/home_windows.go b/switchq/vendor/github.com/juju/utils/home_windows.go
new file mode 100644
index 0000000..e61225c
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/home_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"path/filepath"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"))
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	v := filepath.VolumeName(s)
+	if v != "" {
+		if err := os.Setenv("HOMEDRIVE", v); err != nil {
+			return err
+		}
+	}
+	return os.Setenv("HOMEPATH", s[len(v):])
+}
diff --git a/switchq/vendor/github.com/juju/utils/http-1_4.go b/switchq/vendor/github.com/juju/utils/http-1_4.go
new file mode 100644
index 0000000..87bd5b1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/http-1_4.go
@@ -0,0 +1,24 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build !go1.7
+
+package utils
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+)
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+func installHTTPDialShim(t *http.Transport) {
+	t.Dial = func(network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return net.Dial(network, addr)
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/http-1_7.go b/switchq/vendor/github.com/juju/utils/http-1_7.go
new file mode 100644
index 0000000..d676dcb
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/http-1_7.go
@@ -0,0 +1,35 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build go1.7
+
+package utils
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+)
+
+var ctxtDialer = &net.Dialer{
+	Timeout:   30 * time.Second,
+	KeepAlive: 30 * time.Second,
+}
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+//
+// Note that this is Go version dependent because in Go 1.7 and above,
+// the DialContext field was introduced (and set in http.DefaultTransport)
+// which overrides the Dial field.
+func installHTTPDialShim(t *http.Transport) {
+	t.DialContext = func(ctxt context.Context, network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return ctxtDialer.DialContext(ctxt, network, addr)
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/http.go b/switchq/vendor/github.com/juju/utils/http.go
new file mode 100644
index 0000000..2f5ddf3
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/http.go
@@ -0,0 +1,117 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+)
+
+var insecureClient = (*http.Client)(nil)
+var insecureClientMutex = sync.Mutex{}
+
+func init() {
+	defaultTransport := http.DefaultTransport.(*http.Transport)
+	installHTTPDialShim(defaultTransport)
+	registerFileProtocol(defaultTransport)
+}
+
+// registerFileProtocol registers support for file:// URLs on the given transport.
+func registerFileProtocol(transport *http.Transport) {
+	transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+}
+
+// SSLHostnameVerification is used as a switch for when a given provider might
+// use self-signed credentials and we should not try to verify the hostname on
+// the TLS/SSL certificates
+type SSLHostnameVerification bool
+
+const (
+	// VerifySSLHostnames ensures we verify the hostname on the certificate
+	// matches the host we are connecting and is signed
+	VerifySSLHostnames = SSLHostnameVerification(true)
+	// NoVerifySSLHostnames informs us to skip verifying the hostname
+	// matches a valid certificate
+	NoVerifySSLHostnames = SSLHostnameVerification(false)
+)
+
+// GetHTTPClient returns either a standard http client or
+// non validating client depending on the value of verify.
+func GetHTTPClient(verify SSLHostnameVerification) *http.Client {
+	if verify == VerifySSLHostnames {
+		return GetValidatingHTTPClient()
+	}
+	return GetNonValidatingHTTPClient()
+}
+
+// GetValidatingHTTPClient returns a new http.Client that
+// verifies the server's certificate chain and hostname.
+func GetValidatingHTTPClient() *http.Client {
+	return &http.Client{}
+}
+
+// GetNonValidatingHTTPClient returns a new http.Client that
+// does not verify the server's certificate chain and hostname.
+func GetNonValidatingHTTPClient() *http.Client {
+	return &http.Client{
+		Transport: NewHttpTLSTransport(&tls.Config{
+			InsecureSkipVerify: true,
+		}),
+	}
+}
+
+// BasicAuthHeader creates a header that contains just the "Authorization"
+// entry.  The implementation was originally taked from net/http but this is
+// needed externally from the http request object in order to use this with
+// our websockets. See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt
+// "To receive authorization, the client sends the userid and password,
+// separated by a single colon (":") character, within a base64 encoded string
+// in the credentials."
+func BasicAuthHeader(username, password string) http.Header {
+	auth := username + ":" + password
+	encoded := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+	return http.Header{
+		"Authorization": {encoded},
+	}
+}
+
+// ParseBasicAuth attempts to find an Authorization header in the supplied
+// http.Header and if found parses it as a Basic header. See 2 (end of page 4)
+// http://www.ietf.org/rfc/rfc2617.txt "To receive authorization, the client
+// sends the userid and password, separated by a single colon (":") character,
+// within a base64 encoded string in the credentials."
+func ParseBasicAuthHeader(h http.Header) (userid, password string, err error) {
+	parts := strings.Fields(h.Get("Authorization"))
+	if len(parts) != 2 || parts[0] != "Basic" {
+		return "", "", fmt.Errorf("invalid or missing HTTP auth header")
+	}
+	// Challenge is a base64-encoded "tag:pass" string.
+	// See RFC 2617, Section 2.
+	challenge, err := base64.StdEncoding.DecodeString(parts[1])
+	if err != nil {
+		return "", "", fmt.Errorf("invalid HTTP auth encoding")
+	}
+	tokens := strings.SplitN(string(challenge), ":", 2)
+	if len(tokens) != 2 {
+		return "", "", fmt.Errorf("invalid HTTP auth contents")
+	}
+	return tokens[0], tokens[1], nil
+}
+
+// OutgoingAccessAllowed determines whether connections other than
+// localhost can be dialled.
+var OutgoingAccessAllowed = true
+
+func isLocalAddr(addr string) bool {
+	host, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		return false
+	}
+	return host == "localhost" || net.ParseIP(host).IsLoopback()
+}
diff --git a/switchq/vendor/github.com/juju/utils/isubuntu.go b/switchq/vendor/github.com/juju/utils/isubuntu.go
new file mode 100644
index 0000000..d85ed9a
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/isubuntu.go
@@ -0,0 +1,17 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+)
+
+// IsUbuntu executes lxb_release to see if the host OS is Ubuntu.
+func IsUbuntu() bool {
+	out, err := RunCommand("lsb_release", "-i", "-s")
+	if err != nil {
+		return false
+	}
+	return strings.TrimSpace(out) == "Ubuntu"
+}
diff --git a/switchq/vendor/github.com/juju/utils/limiter.go b/switchq/vendor/github.com/juju/utils/limiter.go
new file mode 100644
index 0000000..60bd066
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/limiter.go
@@ -0,0 +1,59 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+)
+
+type empty struct{}
+type limiter chan empty
+
+// Limiter represents a limited resource (eg a semaphore).
+type Limiter interface {
+	// Acquire another unit of the resource.
+	// Acquire returns false to indicate there is no more availability,
+	// until another entity calls Release.
+	Acquire() bool
+	// AcquireWait requests a unit of resource, but blocks until one is
+	// available.
+	AcquireWait()
+	// Release returns a unit of the resource. Calling Release when there
+	// are no units Acquired is an error.
+	Release() error
+}
+
+func NewLimiter(max int) Limiter {
+	return make(limiter, max)
+}
+
+// Acquire requests some resources that you can return later
+// It returns 'true' if there are resources available, but false if they are
+// not. Callers are responsible for calling Release if this returns true, but
+// should not release if this returns false.
+func (l limiter) Acquire() bool {
+	e := empty{}
+	select {
+	case l <- e:
+		return true
+	default:
+		return false
+	}
+}
+
+// AcquireWait waits for the resource to become available before returning.
+func (l limiter) AcquireWait() {
+	e := empty{}
+	l <- e
+}
+
+// Release returns the resource to the available pool.
+func (l limiter) Release() error {
+	select {
+	case <-l:
+		return nil
+	default:
+		return fmt.Errorf("Release without an associated Acquire")
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/multireader.go b/switchq/vendor/github.com/juju/utils/multireader.go
new file mode 100644
index 0000000..b8431f9
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/multireader.go
@@ -0,0 +1,189 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io"
+	"sort"
+
+	"github.com/juju/errors"
+)
+
+// SizeReaderAt combines io.ReaderAt with a Size method.
+type SizeReaderAt interface {
+	// Size returns the size of the data readable
+	// from the reader.
+	Size() int64
+	io.ReaderAt
+}
+
+// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt
+// (and Size), instead of just a reader.
+//
+// Note: this implementation was taken from a talk given
+// by Brad Fitzpatrick as OSCON 2013.
+//
+// http://talks.golang.org/2013/oscon-dl.slide#49
+// https://github.com/golang/talks/blob/master/2013/oscon-dl/server-compose.go
+func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {
+	m := &multiReaderAt{
+		parts: make([]offsetAndSource, 0, len(parts)),
+	}
+	var off int64
+	for _, p := range parts {
+		m.parts = append(m.parts, offsetAndSource{off, p})
+		off += p.Size()
+	}
+	m.size = off
+	return m
+}
+
+type offsetAndSource struct {
+	off int64
+	SizeReaderAt
+}
+
+type multiReaderAt struct {
+	parts []offsetAndSource
+	size  int64
+}
+
+func (m *multiReaderAt) Size() int64 {
+	return m.size
+}
+
+func (m *multiReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+	wantN := len(p)
+
+	// Skip past the requested offset.
+	skipParts := sort.Search(len(m.parts), func(i int) bool {
+		// This function returns whether parts[i] will
+		// contribute any bytes to our output.
+		part := m.parts[i]
+		return part.off+part.Size() > off
+	})
+	parts := m.parts[skipParts:]
+
+	// How far to skip in the first part.
+	needSkip := off
+	if len(parts) > 0 {
+		needSkip -= parts[0].off
+	}
+
+	for len(parts) > 0 && len(p) > 0 {
+		readP := p
+		partSize := parts[0].Size()
+		if int64(len(readP)) > partSize-needSkip {
+			readP = readP[:partSize-needSkip]
+		}
+		pn, err0 := parts[0].ReadAt(readP, needSkip)
+		if err0 != nil {
+			return n, err0
+		}
+		n += pn
+		p = p[pn:]
+		if int64(pn)+needSkip == partSize {
+			parts = parts[1:]
+		}
+		needSkip = 0
+	}
+
+	if n != wantN {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// NewMultiReaderSeeker returns an io.ReadSeeker that combines
+// all the given readers into a single one. It assumes that
+// all the seekers are initially positioned at the start.
+func NewMultiReaderSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+	sreaders := make([]SizeReaderAt, len(readers))
+	for i, r := range readers {
+		r1, err := newSizeReaderAt(r)
+		if err != nil {
+			panic(err)
+		}
+		sreaders[i] = r1
+	}
+	return &readSeeker{
+		r: NewMultiReaderAt(sreaders...),
+	}
+}
+
+// newSizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+// Note that it doesn't strictly adhere to the ReaderAt
+// contract because it's not safe to call ReadAt concurrently.
+// This doesn't matter because io.ReadSeeker doesn't
+// need to be thread-safe and this is only used in that
+// context.
+func newSizeReaderAt(r io.ReadSeeker) (SizeReaderAt, error) {
+	size, err := r.Seek(0, 2)
+	if err != nil {
+		return nil, err
+	}
+	return &sizeReaderAt{
+		r:    r,
+		size: size,
+		off:  size,
+	}, nil
+}
+
+// sizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+type sizeReaderAt struct {
+	r    io.ReadSeeker
+	size int64
+	off  int64
+}
+
+// ReadAt implemnts SizeReaderAt.ReadAt.
+func (r *sizeReaderAt) ReadAt(buf []byte, off int64) (n int, err error) {
+	if off != r.off {
+		_, err = r.r.Seek(off, 0)
+		if err != nil {
+			return 0, err
+		}
+		r.off = off
+	}
+	n, err = io.ReadFull(r.r, buf)
+	r.off += int64(n)
+	return n, err
+}
+
+// Size implemnts SizeReaderAt.Size.
+func (r *sizeReaderAt) Size() int64 {
+	return r.size
+}
+
+// readSeeker adapts a SizeReaderAt to an io.ReadSeeker.
+type readSeeker struct {
+	r   SizeReaderAt
+	off int64
+}
+
+// Seek implements io.Seeker.Seek.
+func (r *readSeeker) Seek(off int64, whence int) (int64, error) {
+	switch whence {
+	case 0:
+	case 1:
+		off += r.off
+	case 2:
+		off = r.r.Size() + off
+	}
+	if off < 0 {
+		return 0, errors.New("negative position")
+	}
+	r.off = off
+	return off, nil
+}
+
+// Read implements io.Reader.Read.
+func (r *readSeeker) Read(buf []byte) (int, error) {
+	n, err := r.r.ReadAt(buf, r.off)
+	r.off += int64(n)
+	if err == io.ErrUnexpectedEOF {
+		err = io.EOF
+	}
+	return n, err
+}
diff --git a/switchq/vendor/github.com/juju/utils/naturalsort.go b/switchq/vendor/github.com/juju/utils/naturalsort.go
new file mode 100644
index 0000000..d337093
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/naturalsort.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"unicode"
+)
+
+// SortStringsNaturally sorts strings according to their natural sort order.
+func SortStringsNaturally(s []string) []string {
+	sort.Sort(naturally(s))
+	return s
+}
+
+type naturally []string
+
+func (n naturally) Len() int {
+	return len(n)
+}
+
+func (n naturally) Swap(a, b int) {
+	n[a], n[b] = n[b], n[a]
+}
+
+// Less sorts by non-numeric prefix and numeric suffix
+// when one exists.
+func (n naturally) Less(a, b int) bool {
+	aVal := n[a]
+	bVal := n[b]
+
+	for {
+		// If bVal is empty, then aVal can't be less than it.
+		if bVal == "" {
+			return false
+		}
+		// If aVal is empty here, then is must be less than bVal.
+		if aVal == "" {
+			return true
+		}
+
+		aPrefix, aNumber, aRemainder := splitAtNumber(aVal)
+		bPrefix, bNumber, bRemainder := splitAtNumber(bVal)
+		if aPrefix != bPrefix {
+			return aPrefix < bPrefix
+		}
+		if aNumber != bNumber {
+			return aNumber < bNumber
+		}
+
+		// Everything is the same so far, try again with the remainer.
+		aVal = aRemainder
+		bVal = bRemainder
+	}
+}
+
+// splitAtNumber splits given string at the first digit, returning the
+// prefix before the number, the integer represented by the first
+// series of digits, and the remainder of the string after the first
+// series of digits. If no digits are present, the number is returned
+// as -1 and the remainder is empty.
+func splitAtNumber(str string) (string, int, string) {
+	i := indexOfDigit(str)
+	if i == -1 {
+		// no numbers
+		return str, -1, ""
+	}
+	j := i + indexOfNonDigit(str[i:])
+	n, err := strconv.Atoi(str[i:j])
+	if err != nil {
+		panic(fmt.Sprintf("parsing number %v: %v", str[i:j], err)) // should never happen
+	}
+	return str[:i], n, str[j:]
+}
+
+func indexOfDigit(str string) int {
+	for i, rune := range str {
+		if unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return -1
+}
+
+func indexOfNonDigit(str string) int {
+	for i, rune := range str {
+		if !unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return len(str)
+}
diff --git a/switchq/vendor/github.com/juju/utils/network.go b/switchq/vendor/github.com/juju/utils/network.go
new file mode 100644
index 0000000..505a6e9
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/network.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/juju/loggo"
+)
+
+var logger = loggo.GetLogger("juju.utils")
+
+// GetIPv4Address iterates through the addresses expecting the format from
+// func (ifi *net.Interface) Addrs() ([]net.Addr, error)
+func GetIPv4Address(addresses []net.Addr) (string, error) {
+	for _, addr := range addresses {
+		ip, _, err := net.ParseCIDR(addr.String())
+		if err != nil {
+			return "", err
+		}
+		ipv4 := ip.To4()
+		if ipv4 == nil {
+			continue
+		}
+		return ipv4.String(), nil
+	}
+	return "", fmt.Errorf("no addresses match")
+}
+
+// GetAddressForInterface looks for the network interface
+// and returns the IPv4 address from the possible addresses.
+func GetAddressForInterface(interfaceName string) (string, error) {
+	iface, err := net.InterfaceByName(interfaceName)
+	if err != nil {
+		logger.Errorf("cannot find network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	addrs, err := iface.Addrs()
+	if err != nil {
+		logger.Errorf("cannot get addresses for network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	return GetIPv4Address(addrs)
+}
diff --git a/switchq/vendor/github.com/juju/utils/os.go b/switchq/vendor/github.com/juju/utils/os.go
new file mode 100644
index 0000000..146f793
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/os.go
@@ -0,0 +1,41 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// These are the names of the operating systems recognized by Go.
+const (
+	OSWindows   = "windows"
+	OSDarwin    = "darwin"
+	OSDragonfly = "dragonfly"
+	OSFreebsd   = "freebsd"
+	OSLinux     = "linux"
+	OSNacl      = "nacl"
+	OSNetbsd    = "netbsd"
+	OSOpenbsd   = "openbsd"
+	OSSolaris   = "solaris"
+)
+
+// OSUnix is the list of unix-like operating systems recognized by Go.
+// See http://golang.org/src/path/filepath/path_unix.go.
+var OSUnix = []string{
+	OSDarwin,
+	OSDragonfly,
+	OSFreebsd,
+	OSLinux,
+	OSNacl,
+	OSNetbsd,
+	OSOpenbsd,
+	OSSolaris,
+}
+
+// OSIsUnix determines whether or not the given OS name is one of the
+// unix-like operating systems recognized by Go.
+func OSIsUnix(os string) bool {
+	for _, goos := range OSUnix {
+		if os == goos {
+			return true
+		}
+	}
+	return false
+}
diff --git a/switchq/vendor/github.com/juju/utils/password.go b/switchq/vendor/github.com/juju/utils/password.go
new file mode 100644
index 0000000..914e8e4
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/password.go
@@ -0,0 +1,92 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"crypto/sha512"
+	"encoding/base64"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+// CompatSalt is because Juju 1.16 and older used a hard-coded salt to compute
+// the password hash for all users and agents
+var CompatSalt = string([]byte{0x75, 0x82, 0x81, 0xca})
+
+const randomPasswordBytes = 18
+
+// MinAgentPasswordLength describes how long agent passwords should be. We
+// require this length because we assume enough entropy in the Agent password
+// that it is safe to not do extra rounds of iterated hashing.
+var MinAgentPasswordLength = base64.StdEncoding.EncodedLen(randomPasswordBytes)
+
+// RandomBytes returns n random bytes.
+func RandomBytes(n int) ([]byte, error) {
+	buf := make([]byte, n)
+	_, err := io.ReadFull(rand.Reader, buf)
+	if err != nil {
+		return nil, fmt.Errorf("cannot read random bytes: %v", err)
+	}
+	return buf, nil
+}
+
+// RandomPassword generates a random base64-encoded password.
+func RandomPassword() (string, error) {
+	b, err := RandomBytes(randomPasswordBytes)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// RandomSalt generates a random base64 data suitable for using as a password
+// salt The pbkdf2 guideline is to use 8 bytes of salt, so we do 12 raw bytes
+// into 16 base64 bytes. (The alternative is 6 raw into 8 base64).
+func RandomSalt() (string, error) {
+	b, err := RandomBytes(12)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// FastInsecureHash specifies whether a fast, insecure version of the hash
+// algorithm will be used.  Changing this will cause PasswordHash to
+// produce incompatible passwords.  It should only be changed for
+// testing purposes - to make tests run faster.
+var FastInsecureHash = false
+
+// UserPasswordHash returns base64-encoded one-way hash password that is
+// computationally hard to crack by iterating through possible passwords.
+func UserPasswordHash(password string, salt string) string {
+	if salt == "" {
+		panic("salt is not allowed to be empty")
+	}
+	iter := 8192
+	if FastInsecureHash {
+		iter = 1
+	}
+	// Generate 18 byte passwords because we know that MongoDB
+	// uses the MD5 sum of the password anyway, so there's
+	// no point in using more bytes. (18 so we don't get base 64
+	// padding characters).
+	h := pbkdf2.Key([]byte(password), []byte(salt), iter, 18, sha512.New)
+	return base64.StdEncoding.EncodeToString(h)
+}
+
+// AgentPasswordHash returns base64-encoded one-way hash of password. This is
+// not suitable for User passwords because those will have limited entropy (see
+// UserPasswordHash). However, since we generate long random passwords for
+// agents, we can trust that there is sufficient entropy to prevent brute force
+// search. And using a faster hash allows us to restart the state machines and
+// have 1000s of agents log in in a reasonable amount of time.
+func AgentPasswordHash(password string) string {
+	sum := sha512.New()
+	sum.Write([]byte(password))
+	h := sum.Sum(nil)
+	return base64.StdEncoding.EncodeToString(h[:18])
+}
diff --git a/switchq/vendor/github.com/juju/utils/randomstring.go b/switchq/vendor/github.com/juju/utils/randomstring.go
new file mode 100644
index 0000000..662f514
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/randomstring.go
@@ -0,0 +1,42 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Can be used as a sane default argument for RandomString
+var (
+	LowerAlpha = []rune("abcdefghijklmnopqrstuvwxyz")
+	UpperAlpha = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	Digits     = []rune("0123456789")
+)
+
+var (
+	randomStringMu   sync.Mutex
+	randomStringRand *rand.Rand
+)
+
+func init() {
+	randomStringRand = rand.New(
+		rand.NewSource(time.Now().UnixNano()),
+	)
+}
+
+// RandomString will return a string of length n that will only
+// contain runes inside validRunes
+func RandomString(n int, validRunes []rune) string {
+	randomStringMu.Lock()
+	defer randomStringMu.Unlock()
+
+	runes := make([]rune, n)
+	for i := range runes {
+		runes[i] = validRunes[randomStringRand.Intn(len(validRunes))]
+	}
+
+	return string(runes)
+}
diff --git a/switchq/vendor/github.com/juju/utils/relativeurl.go b/switchq/vendor/github.com/juju/utils/relativeurl.go
new file mode 100644
index 0000000..b70f1d5
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/relativeurl.go
@@ -0,0 +1,62 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+// RelativeURLPath returns a relative URL path that is lexically
+// equivalent to targpath when interpreted by url.URL.ResolveReference.
+// On success, the returned path will always be non-empty and relative
+// to basePath, even if basePath and targPath share no elements.
+//
+// It is assumed that both basePath and targPath are normalized
+// (have no . or .. elements).
+//
+// An error is returned if basePath or targPath are not absolute paths.
+func RelativeURLPath(basePath, targPath string) (string, error) {
+	if !strings.HasPrefix(basePath, "/") {
+		return "", errors.New("non-absolute base URL")
+	}
+	if !strings.HasPrefix(targPath, "/") {
+		return "", errors.New("non-absolute target URL")
+	}
+	baseParts := strings.Split(basePath, "/")
+	targParts := strings.Split(targPath, "/")
+
+	// For the purposes of dotdot, the last element of
+	// the paths are irrelevant. We save the last part
+	// of the target path for later.
+	lastElem := targParts[len(targParts)-1]
+	baseParts = baseParts[0 : len(baseParts)-1]
+	targParts = targParts[0 : len(targParts)-1]
+
+	// Find the common prefix between the two paths:
+	var i int
+	for ; i < len(baseParts); i++ {
+		if i >= len(targParts) || baseParts[i] != targParts[i] {
+			break
+		}
+	}
+	dotdotCount := len(baseParts) - i
+	targOnly := targParts[i:]
+	result := make([]string, 0, dotdotCount+len(targOnly)+1)
+	for i := 0; i < dotdotCount; i++ {
+		result = append(result, "..")
+	}
+	result = append(result, targOnly...)
+	result = append(result, lastElem)
+	final := strings.Join(result, "/")
+	if final == "" {
+		// If the final result is empty, the last element must
+		// have been empty, so the target was slash terminated
+		// and there were no previous elements, so "."
+		// is appropriate.
+		final = "."
+	}
+	return final, nil
+}
diff --git a/switchq/vendor/github.com/juju/utils/set/ints.go b/switchq/vendor/github.com/juju/utils/set/ints.go
new file mode 100644
index 0000000..02009ea
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/set/ints.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Ints represents the classic "set" data structure, and contains ints.
+type Ints map[int]bool
+
+// NewInts creates and initializes an Ints and populates it with
+// initial values as specified in the parameters.
+func NewInts(initial ...int) Ints {
+	result := make(Ints)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (is Ints) Size() int {
+	return len(is)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (is Ints) IsEmpty() bool {
+	return len(is) == 0
+}
+
+// Add puts a value into the set.
+func (is Ints) Add(value int) {
+	if is == nil {
+		panic("uninitalised set")
+	}
+	is[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (is Ints) Remove(value int) {
+	delete(is, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (is Ints) Contains(value int) bool {
+	_, exists := is[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (is Ints) Values() []int {
+	result := make([]int, len(is))
+	i := 0
+	for key := range is {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (is Ints) SortedValues() []int {
+	values := is.Values()
+	sort.Ints(values)
+	return values
+}
+
+// Union returns a new Ints representing a union of the elments in the
+// method target and the parameter.
+func (is Ints) Union(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Ints representing a intersection of the elments in the
+// method target and the parameter.
+func (is Ints) Intersection(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Ints representing all the values in the
+// target that are not in the parameter.
+func (is Ints) Difference(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/utils/set/strings.go b/switchq/vendor/github.com/juju/utils/set/strings.go
new file mode 100644
index 0000000..b89f932
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/set/strings.go
@@ -0,0 +1,112 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Strings represents the classic "set" data structure, and contains strings.
+type Strings map[string]bool
+
+// NewStrings creates and initializes a Strings and populates it with
+// initial values as specified in the parameters.
+func NewStrings(initial ...string) Strings {
+	result := make(Strings)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (s Strings) Size() int {
+	return len(s)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (s Strings) IsEmpty() bool {
+	return len(s) == 0
+}
+
+// Add puts a value into the set.
+func (s Strings) Add(value string) {
+	if s == nil {
+		panic("uninitalised set")
+	}
+	s[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (s Strings) Remove(value string) {
+	delete(s, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (s Strings) Contains(value string) bool {
+	_, exists := s[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (s Strings) Values() []string {
+	result := make([]string, len(s))
+	i := 0
+	for key := range s {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (s Strings) SortedValues() []string {
+	values := s.Values()
+	sort.Strings(values)
+	return values
+}
+
+// Union returns a new Strings representing a union of the elments in the
+// method target and the parameter.
+func (s Strings) Union(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Strings representing a intersection of the elments in the
+// method target and the parameter.
+func (s Strings) Intersection(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Strings representing all the values in the
+// target that are not in the parameter.
+func (s Strings) Difference(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/utils/set/tags.go b/switchq/vendor/github.com/juju/utils/set/tags.go
new file mode 100644
index 0000000..0fb6f87
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/set/tags.go
@@ -0,0 +1,150 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+
+	"github.com/juju/errors"
+	"gopkg.in/juju/names.v2"
+)
+
+// Tags represents the Set data structure, it implements tagSet
+// and contains names.Tags.
+type Tags map[names.Tag]bool
+
+// NewTags creates and initializes a Tags and populates it with
+// inital values as specified in the parameters.
+func NewTags(initial ...names.Tag) Tags {
+	result := make(Tags)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// NewTagsFromStrings creates and initializes a Tags and populates it
+// by using names.ParseTag on the initial values specified in the parameters.
+func NewTagsFromStrings(initial ...string) (Tags, error) {
+	result := make(Tags)
+	for _, value := range initial {
+		tag, err := names.ParseTag(value)
+		if err != nil {
+			return result, errors.Trace(err)
+		}
+		result.Add(tag)
+	}
+	return result, nil
+}
+
+// Size returns the number of elements in the set.
+func (t Tags) Size() int {
+	return len(t)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (t Tags) IsEmpty() bool {
+	return len(t) == 0
+}
+
+// Add puts a value into the set.
+func (t Tags) Add(value names.Tag) {
+	if t == nil {
+		panic("uninitalised set")
+	}
+	t[value] = true
+}
+
+// Remove takes a value out of the set.  If value wasn't in the set to start
+// with, this method silently succeeds.
+func (t Tags) Remove(value names.Tag) {
+	delete(t, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (t Tags) Contains(value names.Tag) bool {
+	_, exists := t[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (t Tags) Values() []names.Tag {
+	result := make([]names.Tag, len(t))
+	i := 0
+	for key := range t {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// stringValues returns a list of strings that represent a names.Tag
+// Used internally by the SortedValues method.
+func (t Tags) stringValues() []string {
+	result := make([]string, t.Size())
+	i := 0
+	for key := range t {
+		result[i] = key.String()
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (t Tags) SortedValues() []names.Tag {
+	values := t.stringValues()
+	sort.Strings(values)
+
+	result := make([]names.Tag, len(values))
+	for i, value := range values {
+		// We already know only good strings can live in the Tags set
+		// so we can safely ignore the error here.
+		tag, _ := names.ParseTag(value)
+		result[i] = tag
+	}
+	return result
+}
+
+// Union returns a new Tags representing a union of the elments in the
+// method target and the parameter.
+func (t Tags) Union(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Tags representing a intersection of the elments in the
+// method target and the parameter.
+func (t Tags) Intersection(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Tags representing all the values in the
+// target that are not in the parameter.
+func (t Tags) Difference(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/switchq/vendor/github.com/juju/utils/size.go b/switchq/vendor/github.com/juju/utils/size.go
new file mode 100644
index 0000000..8f6f88d
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/size.go
@@ -0,0 +1,78 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"github.com/juju/errors"
+)
+
+// ParseSize parses the string as a size, in mebibytes.
+//
+// The string must be a is a non-negative number with
+// an optional multiplier suffix (M, G, T, P, E, Z, or Y).
+// If the suffix is not specified, "M" is implied.
+func ParseSize(str string) (MB uint64, err error) {
+	// Find the first non-digit/period:
+	i := strings.IndexFunc(str, func(r rune) bool {
+		return r != '.' && !unicode.IsDigit(r)
+	})
+	var multiplier float64 = 1
+	if i > 0 {
+		suffix := str[i:]
+		multiplier = 0
+		for j := 0; j < len(sizeSuffixes); j++ {
+			base := string(sizeSuffixes[j])
+			// M, MB, or MiB are all valid.
+			switch suffix {
+			case base, base + "B", base + "iB":
+				multiplier = float64(sizeSuffixMultiplier(j))
+				break
+			}
+		}
+		if multiplier == 0 {
+			return 0, errors.Errorf("invalid multiplier suffix %q, expected one of %s", suffix, []byte(sizeSuffixes))
+		}
+		str = str[:i]
+	}
+
+	val, err := strconv.ParseFloat(str, 64)
+	if err != nil || val < 0 {
+		return 0, errors.Errorf("expected a non-negative number, got %q", str)
+	}
+	val *= multiplier
+	return uint64(math.Ceil(val)), nil
+}
+
+var sizeSuffixes = "MGTPEZY"
+
+func sizeSuffixMultiplier(i int) int {
+	return 1 << uint(i*10)
+}
+
+// SizeTracker tracks the number of bytes passing through
+// its Write method (which is otherwise a no-op).
+//
+// Use SizeTracker with io.MultiWriter() to track number of bytes
+// written. Use with io.TeeReader() to track number of bytes read.
+type SizeTracker struct {
+	// size is the number of bytes written so far.
+	size int64
+}
+
+// Size returns the number of bytes written so far.
+func (st SizeTracker) Size() int64 {
+	return st.size
+}
+
+// Write implements io.Writer.
+func (st *SizeTracker) Write(data []byte) (n int, err error) {
+	n = len(data)
+	st.size += int64(n)
+	return n, nil
+}
diff --git a/switchq/vendor/github.com/juju/utils/systemerrmessages_unix.go b/switchq/vendor/github.com/juju/utils/systemerrmessages_unix.go
new file mode 100644
index 0000000..7a0edd4
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/systemerrmessages_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+// The following are strings/regex-es which match common Unix error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `user: unknown user [a-z0-9_-]*`
+	NoSuchFileErrRegexp = `no such file or directory`
+	MkdirFailErrRegexp  = `.* not a directory`
+)
diff --git a/switchq/vendor/github.com/juju/utils/systemerrmessages_windows.go b/switchq/vendor/github.com/juju/utils/systemerrmessages_windows.go
new file mode 100644
index 0000000..b24d453
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/systemerrmessages_windows.go
@@ -0,0 +1,14 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// The following are strings/regex-es which match common Windows error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `No mapping between account names and security IDs was done\.`
+	NoSuchFileErrRegexp = `The system cannot find the (file|path) specified\.`
+	MkdirFailErrRegexp  = `mkdir .*` + NoSuchFileErrRegexp
+)
diff --git a/switchq/vendor/github.com/juju/utils/timeit.go b/switchq/vendor/github.com/juju/utils/timeit.go
new file mode 100644
index 0000000..172b593
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/timeit.go
@@ -0,0 +1,57 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"time"
+)
+
+type timer struct {
+	action     string
+	start      time.Time
+	depth      int
+	duration   time.Duration
+	subActions []*timer
+}
+
+func (t *timer) String() string {
+	this := fmt.Sprintf("%.3fs %*s%s\n", t.duration.Seconds(), t.depth, "", t.action)
+	for _, sub := range t.subActions {
+		this += sub.String()
+	}
+	return this
+}
+
+var stack []*timer
+
+// Start a timer, used for tracking time spent.
+// Generally used with either defer, as in:
+//  defer utils.Timeit("my func")()
+// Which will track how much time is spent in your function. Or
+// if you want to track the time spent in a function you are calling
+// then you would use:
+//  toc := utils.Timeit("anotherFunc()")
+//  anotherFunc()
+//  toc()
+// This tracks nested calls by indenting the output, and will print out the
+// full stack of timing when we reach the top of the stack.
+func Timeit(action string) func() {
+	cur := &timer{action: action, start: time.Now(), depth: len(stack)}
+	if len(stack) != 0 {
+		tip := stack[len(stack)-1]
+		tip.subActions = append(tip.subActions, cur)
+	}
+	stack = append(stack, cur)
+	return func() {
+		cur.duration = time.Since(cur.start)
+		if len(stack) == 0 || cur == stack[0] {
+			fmt.Fprint(os.Stderr, cur)
+			stack = nil
+		} else {
+			stack = stack[0 : len(stack)-1]
+		}
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/timer.go b/switchq/vendor/github.com/juju/utils/timer.go
new file mode 100644
index 0000000..6b32f09
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/timer.go
@@ -0,0 +1,124 @@
+// Copyright 2015 Canonical Ltd.
+// Copyright 2015 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"time"
+
+	"github.com/juju/utils/clock"
+)
+
+// Countdown implements a timer that will call a provided function.
+// after a internally stored duration. The steps as well as min and max
+// durations are declared upon initialization and depend on
+// the particular implementation.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type Countdown interface {
+	// Reset stops the timer and resets its duration to the minimum one.
+	// Start must be called to start the timer again.
+	Reset()
+
+	// Start starts the internal timer.
+	// At the end of the timer, if Reset hasn't been called in the mean time
+	// Func will be called and the duration is increased for the next call.
+	Start()
+}
+
+// NewBackoffTimer creates and initializes a new BackoffTimer
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+func NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer {
+	return &BackoffTimer{
+		config:          config,
+		currentDuration: config.Min,
+	}
+}
+
+// BackoffTimer implements Countdown.
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimer struct {
+	config BackoffTimerConfig
+
+	timer           clock.Timer
+	currentDuration time.Duration
+}
+
+// BackoffTimerConfig is a helper struct for backoff timer
+// that encapsulates config information.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimerConfig struct {
+	// The minimum duration after which Func is called.
+	Min time.Duration
+
+	// The maximum duration after which Func is called.
+	Max time.Duration
+
+	// Determines whether a small randomization is applied to
+	// the duration.
+	Jitter bool
+
+	// The factor by which you want the duration to increase
+	// every time.
+	Factor int64
+
+	// Func is the function that will be called when the countdown reaches 0.
+	Func func()
+
+	// Clock provides the AfterFunc function used to call func.
+	// It is exposed here so it's easier to mock it in tests.
+	Clock clock.Clock
+}
+
+// Start implements the Timer interface.
+// Any existing timer execution is stopped before
+// a new one is created.
+func (t *BackoffTimer) Start() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	t.timer = t.config.Clock.AfterFunc(t.currentDuration, t.config.Func)
+
+	// Since it's a backoff timer we will increase
+	// the duration after each signal.
+	t.increaseDuration()
+}
+
+// Reset implements the Timer interface.
+func (t *BackoffTimer) Reset() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	if t.currentDuration > t.config.Min {
+		t.currentDuration = t.config.Min
+	}
+}
+
+// increaseDuration will increase the duration based on
+// the current value and the factor. If jitter is true
+// it will add a 0.3% jitter to the final value.
+func (t *BackoffTimer) increaseDuration() {
+	current := int64(t.currentDuration)
+	nextDuration := time.Duration(current * t.config.Factor)
+	if t.config.Jitter {
+		// Get a factor in [-1; 1].
+		randFactor := (rand.Float64() * 2) - 1
+		jitter := float64(nextDuration) * randFactor * 0.03
+		nextDuration = nextDuration + time.Duration(jitter)
+	}
+	if nextDuration > t.config.Max {
+		nextDuration = t.config.Max
+	}
+	t.currentDuration = nextDuration
+}
diff --git a/switchq/vendor/github.com/juju/utils/tls.go b/switchq/vendor/github.com/juju/utils/tls.go
new file mode 100644
index 0000000..b805af8
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/tls.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+)
+
+// NewHttpTLSTransport returns a new http.Transport constructed with the TLS config
+// and the necessary parameters for Juju.
+func NewHttpTLSTransport(tlsConfig *tls.Config) *http.Transport {
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	transport := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		TLSClientConfig:     tlsConfig,
+		DisableKeepAlives:   true,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+	installHTTPDialShim(transport)
+	registerFileProtocol(transport)
+	return transport
+}
+
+// knownGoodCipherSuites contains the list of secure cipher suites to use
+// with tls.Config. This list matches those that Go 1.6 implements from
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations.
+//
+// https://tools.ietf.org/html/rfc7525#section-4.2 excludes RSA exchange completely
+// so we could be more strict if all our clients will support
+// TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256/384. Unfortunately Go's crypto library
+// is limited and doesn't support DHE-RSA-AES256-GCM-SHA384 and
+// DHE-RSA-AES256-SHA256, which are part of the recommended set.
+//
+// Unfortunately we can't drop the RSA algorithms because our servers aren't
+// generating ECDHE keys.
+var knownGoodCipherSuites = []uint16{
+	// These are technically useless for Juju, since we use an RSA certificate,
+	// but they also don't hurt anything, and supporting an ECDSA certificate
+	// could be useful in the future.
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+
+	// Windows doesn't support GCM currently, so we need these for RSA support.
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+
+	// We need this so that we have at least one suite in common
+	// with the default gnutls installed for precise and trusty.
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+}
+
+// SecureTLSConfig returns a tls.Config that conforms to Juju's security
+// standards, so as to avoid known security vulnerabilities in certain
+// configurations.
+//
+// Currently it excludes RC4 implementations from the available ciphersuites,
+// requires ciphersuites that provide forward secrecy, and sets the minimum TLS
+// version to 1.2.
+func SecureTLSConfig() *tls.Config {
+	return &tls.Config{
+		CipherSuites: knownGoodCipherSuites,
+		MinVersion:   tls.VersionTLS12,
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/trivial.go b/switchq/vendor/github.com/juju/utils/trivial.go
new file mode 100644
index 0000000..642e213
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/trivial.go
@@ -0,0 +1,147 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"unicode"
+)
+
+// TODO(ericsnow) Move the quoting helpers into the shell package?
+
+// ShQuote quotes s so that when read by bash, no metacharacters
+// within s will be interpreted as such.
+func ShQuote(s string) string {
+	// single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote
+	return `'` + strings.Replace(s, `'`, `'"'"'`, -1) + `'`
+}
+
+// WinPSQuote quotes s so that when read by powershell, no metacharacters
+// within s will be interpreted as such.
+func WinPSQuote(s string) string {
+	// See http://ss64.com/ps/syntax-esc.html#quotes.
+	// Double quotes inside single quotes are fine, double single quotes inside
+	// single quotes, not so much so. Having double quoted strings inside single
+	// quoted strings, ensure no expansion happens.
+	return `'` + strings.Replace(s, `'`, `"`, -1) + `'`
+}
+
+// WinCmdQuote quotes s so that when read by cmd.exe, no metacharacters
+// within s will be interpreted as such.
+func WinCmdQuote(s string) string {
+	// See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx.
+	quoted := winCmdQuote(s)
+	return winCmdEscapeMeta(quoted)
+}
+
+func winCmdQuote(s string) string {
+	var escaped string
+	for _, c := range s {
+		switch c {
+		case '\\', '"':
+			escaped += `\`
+		}
+		escaped += string(c)
+	}
+	return `"` + escaped + `"`
+}
+
+func winCmdEscapeMeta(str string) string {
+	const meta = `()%!^"<>&|`
+	var newStr string
+	for _, c := range str {
+		if strings.Contains(meta, string(c)) {
+			newStr += "^"
+		}
+		newStr += string(c)
+	}
+	return newStr
+}
+
+// CommandString flattens a sequence of command arguments into a
+// string suitable for executing in a shell, escaping slashes,
+// variables and quotes as necessary; each argument is double-quoted
+// if and only if necessary.
+func CommandString(args ...string) string {
+	var buf bytes.Buffer
+	for i, arg := range args {
+		needsQuotes := false
+		var argBuf bytes.Buffer
+		for _, r := range arg {
+			if unicode.IsSpace(r) {
+				needsQuotes = true
+			} else if r == '"' || r == '$' || r == '\\' {
+				needsQuotes = true
+				argBuf.WriteByte('\\')
+			}
+			argBuf.WriteRune(r)
+		}
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		if needsQuotes {
+			buf.WriteByte('"')
+			argBuf.WriteTo(&buf)
+			buf.WriteByte('"')
+		} else {
+			argBuf.WriteTo(&buf)
+		}
+	}
+	return buf.String()
+}
+
+// Gzip compresses the given data.
+func Gzip(data []byte) []byte {
+	var buf bytes.Buffer
+	w := gzip.NewWriter(&buf)
+	if _, err := w.Write(data); err != nil {
+		// Compression should never fail unless it fails
+		// to write to the underlying writer, which is a bytes.Buffer
+		// that never fails.
+		panic(err)
+	}
+	if err := w.Close(); err != nil {
+		panic(err)
+	}
+	return buf.Bytes()
+}
+
+// Gunzip uncompresses the given data.
+func Gunzip(data []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return nil, err
+	}
+	return ioutil.ReadAll(r)
+}
+
+// ReadSHA256 returns the SHA256 hash of the contents read from source
+// (hex encoded) and the size of the source in bytes.
+func ReadSHA256(source io.Reader) (string, int64, error) {
+	hash := sha256.New()
+	size, err := io.Copy(hash, source)
+	if err != nil {
+		return "", 0, err
+	}
+	digest := hex.EncodeToString(hash.Sum(nil))
+	return digest, size, nil
+}
+
+// ReadFileSHA256 is like ReadSHA256 but reads the contents of the
+// given file.
+func ReadFileSHA256(filename string) (string, int64, error) {
+	f, err := os.Open(filename)
+	if err != nil {
+		return "", 0, err
+	}
+	defer f.Close()
+	return ReadSHA256(f)
+}
diff --git a/switchq/vendor/github.com/juju/utils/username.go b/switchq/vendor/github.com/juju/utils/username.go
new file mode 100644
index 0000000..5107e3b
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/username.go
@@ -0,0 +1,77 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"os/user"
+
+	"github.com/juju/errors"
+)
+
+// ResolveSudo returns the original username if sudo was used. The
+// original username is extracted from the OS environment.
+func ResolveSudo(username string) string {
+	return resolveSudo(username, os.Getenv)
+}
+
+func resolveSudo(username string, getenvFunc func(string) string) string {
+	if username != "root" {
+		return username
+	}
+	// sudo was probably called, get the original user.
+	if username := getenvFunc("SUDO_USER"); username != "" {
+		return username
+	}
+	return username
+}
+
+// EnvUsername returns the username from the OS environment.
+func EnvUsername() (string, error) {
+	return os.Getenv("USER"), nil
+}
+
+// OSUsername returns the username of the current OS user (based on UID).
+func OSUsername() (string, error) {
+	u, err := user.Current()
+	if err != nil {
+		return "", errors.Trace(err)
+	}
+	return u.Username, nil
+}
+
+// ResolveUsername returns the username determined by the provided
+// functions. The functions are tried in the same order in which they
+// were passed in. An error returned from any of them is immediately
+// returned. If an empty string is returned then that signals that the
+// function did not find the username and the next function is tried.
+// Once a username is found, the provided resolveSudo func (if any) is
+// called with that username and the result is returned. If no username
+// is found then errors.NotFound is returned.
+func ResolveUsername(resolveSudo func(string) string, usernameFuncs ...func() (string, error)) (string, error) {
+	for _, usernameFunc := range usernameFuncs {
+		username, err := usernameFunc()
+		if err != nil {
+			return "", errors.Trace(err)
+		}
+		if username != "" {
+			if resolveSudo != nil {
+				if original := resolveSudo(username); original != "" {
+					username = original
+				}
+			}
+			return username, nil
+		}
+	}
+	return "", errors.NotFoundf("username")
+}
+
+// LocalUsername determines the current username on the local host.
+func LocalUsername() (string, error) {
+	username, err := ResolveUsername(ResolveSudo, EnvUsername, OSUsername)
+	if err != nil {
+		return "", errors.Annotatef(err, "cannot get current user from the environment: %v", os.Environ())
+	}
+	return username, nil
+}
diff --git a/switchq/vendor/github.com/juju/utils/uuid.go b/switchq/vendor/github.com/juju/utils/uuid.go
new file mode 100644
index 0000000..2404efc
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/uuid.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"regexp"
+	"strings"
+)
+
+// UUID represent a universal identifier with 16 octets.
+type UUID [16]byte
+
+// regex for validating that the UUID matches RFC 4122.
+// This package generates version 4 UUIDs but
+// accepts any UUID version.
+// http://www.ietf.org/rfc/rfc4122.txt
+var (
+	block1 = "[0-9a-f]{8}"
+	block2 = "[0-9a-f]{4}"
+	block3 = "[0-9a-f]{4}"
+	block4 = "[0-9a-f]{4}"
+	block5 = "[0-9a-f]{12}"
+
+	UUIDSnippet = block1 + "-" + block2 + "-" + block3 + "-" + block4 + "-" + block5
+	validUUID   = regexp.MustCompile("^" + UUIDSnippet + "$")
+)
+
+func UUIDFromString(s string) (UUID, error) {
+	if !IsValidUUIDString(s) {
+		return UUID{}, fmt.Errorf("invalid UUID: %q", s)
+	}
+	s = strings.Replace(s, "-", "", 4)
+	raw, err := hex.DecodeString(s)
+	if err != nil {
+		return UUID{}, err
+	}
+	var uuid UUID
+	copy(uuid[:], raw)
+	return uuid, nil
+}
+
+// IsValidUUIDString returns true, if the given string matches a valid UUID (version 4, variant 2).
+func IsValidUUIDString(s string) bool {
+	return validUUID.MatchString(s)
+}
+
+// MustNewUUID returns a new uuid, if an error occurs it panics.
+func MustNewUUID() UUID {
+	uuid, err := NewUUID()
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// NewUUID generates a new version 4 UUID relying only on random numbers.
+func NewUUID() (UUID, error) {
+	uuid := UUID{}
+	if _, err := io.ReadFull(rand.Reader, []byte(uuid[0:16])); err != nil {
+		return UUID{}, err
+	}
+	// Set version (4) and variant (2) according to RfC 4122.
+	var version byte = 4 << 4
+	var variant byte = 8 << 4
+	uuid[6] = version | (uuid[6] & 15)
+	uuid[8] = variant | (uuid[8] & 15)
+	return uuid, nil
+}
+
+// Copy returns a copy of the UUID.
+func (uuid UUID) Copy() UUID {
+	uuidCopy := uuid
+	return uuidCopy
+}
+
+// Raw returns a copy of the UUID bytes.
+func (uuid UUID) Raw() [16]byte {
+	return [16]byte(uuid)
+}
+
+// String returns a hexadecimal string representation with
+// standardized separators.
+func (uuid UUID) String() string {
+	return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16])
+}
diff --git a/switchq/vendor/github.com/juju/utils/yaml.go b/switchq/vendor/github.com/juju/utils/yaml.go
new file mode 100644
index 0000000..2d443a0
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/yaml.go
@@ -0,0 +1,107 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/errors"
+
+	"gopkg.in/yaml.v2"
+)
+
+// WriteYaml marshals obj as yaml to a temporary file in the same directory
+// as path, than atomically replaces path with the temporary file.
+func WriteYaml(path string, obj interface{}) error {
+	data, err := yaml.Marshal(obj)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	dir := filepath.Dir(path)
+	f, err := ioutil.TempFile(dir, "juju")
+	if err != nil {
+		return errors.Trace(err)
+	}
+	tmp := f.Name()
+	if _, err := f.Write(data); err != nil {
+		f.Close()      // don't leak file handle
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+	// Explicitly close the file before moving it. This is needed on Windows
+	// where the OS will not allow us to move a file that still has an open
+	// file handle. Must check the error on close because filesystems can delay
+	// reporting errors until the file is closed.
+	if err := f.Close(); err != nil {
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+
+	// ioutils.TempFile creates files 0600, but this function has a contract
+	// that files will be world readable, 0644 after replacement.
+	if err := os.Chmod(tmp, 0644); err != nil {
+		os.Remove(tmp) // remove file with incorrect permissions.
+		return errors.Trace(err)
+	}
+
+	return ReplaceFile(tmp, path)
+}
+
+// ReadYaml unmarshals the yaml contained in the file at path into obj. See
+// goyaml.Unmarshal. If path is not found, the error returned will be compatible
+// with os.IsNotExist.
+func ReadYaml(path string, obj interface{}) error {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return err // cannot wrap here because callers check for NotFound.
+	}
+	return yaml.Unmarshal(data, obj)
+}
+
+// ConformYAML ensures all keys of any nested maps are strings.  This is
+// necessary because YAML unmarshals map[interface{}]interface{} in nested
+// maps, which cannot be serialized by json or bson. Also, handle
+// []interface{}. cf. gopkg.in/juju/charm.v4/actions.go cleanse
+func ConformYAML(input interface{}) (interface{}, error) {
+	switch typedInput := input.(type) {
+
+	case map[string]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			newValue, err := ConformYAML(value)
+			if err != nil {
+				return nil, err
+			}
+			newMap[key] = newValue
+		}
+		return newMap, nil
+
+	case map[interface{}]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			typedKey, ok := key.(string)
+			if !ok {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newMap[typedKey] = value
+		}
+		return ConformYAML(newMap)
+
+	case []interface{}:
+		newSlice := make([]interface{}, len(typedInput))
+		for i, sliceValue := range typedInput {
+			newSliceValue, err := ConformYAML(sliceValue)
+			if err != nil {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newSlice[i] = newSliceValue
+		}
+		return newSlice, nil
+
+	default:
+		return input, nil
+	}
+}
diff --git a/switchq/vendor/github.com/juju/utils/zfile_windows.go b/switchq/vendor/github.com/juju/utils/zfile_windows.go
new file mode 100644
index 0000000..b1a50f1
--- /dev/null
+++ b/switchq/vendor/github.com/juju/utils/zfile_windows.go
@@ -0,0 +1,28 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// mksyscall_windows.pl -l32 file_windows.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package utils
+
+import "unsafe"
+import "syscall"
+
+var (
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/switchq/vendor/github.com/juju/version/LICENSE b/switchq/vendor/github.com/juju/version/LICENSE
new file mode 100644
index 0000000..5cec73f
--- /dev/null
+++ b/switchq/vendor/github.com/juju/version/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply. 
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/github.com/juju/version/README.md b/switchq/vendor/github.com/juju/version/README.md
new file mode 100644
index 0000000..2d1b48b
--- /dev/null
+++ b/switchq/vendor/github.com/juju/version/README.md
@@ -0,0 +1,3 @@
+# Version [![GoDoc](https://godoc.org/github.com/juju/version?status.svg)](https://godoc.org/github.com/juju/version) 
+version is a go package for intelligent version comparisons.
+
diff --git a/switchq/vendor/github.com/juju/version/version.go b/switchq/vendor/github.com/juju/version/version.go
new file mode 100644
index 0000000..026e99f
--- /dev/null
+++ b/switchq/vendor/github.com/juju/version/version.go
@@ -0,0 +1,297 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the AGPLv3, see LICENCE file for details.
+
+// Package version implements version parsing.
+package version
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// Number represents a version number.
+type Number struct {
+	Major int
+	Minor int
+	Tag   string
+	Patch int
+	Build int
+}
+
+// Zero is occasionally convenient and readable.
+// Please don't change its value.
+var Zero = Number{}
+
+// Binary specifies a binary version of juju.v
+type Binary struct {
+	Number
+	Series string
+	Arch   string
+}
+
+// String returns the string representation of the binary version.
+func (b Binary) String() string {
+	return fmt.Sprintf("%v-%s-%s", b.Number, b.Series, b.Arch)
+}
+
+// GetBSON implements bson.Getter.
+func (b Binary) GetBSON() (interface{}, error) {
+	return b.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (b *Binary) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (b Binary) MarshalJSON() ([]byte, error) {
+	return json.Marshal(b.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (b *Binary) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface.
+func (b Binary) MarshalYAML() (interface{}, error) {
+	return b.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface.
+func (b *Binary) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(vstr)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+var (
+	binaryPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?-([^-]+)-([^-]+)$`)
+	numberPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?$`)
+)
+
+// MustParse parses a version and panics if it does
+// not parse correctly.
+func MustParse(s string) Number {
+	v, err := Parse(s)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// MustParseBinary parses a binary version and panics if it does
+// not parse correctly.
+func MustParseBinary(s string) Binary {
+	b, err := ParseBinary(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+// ParseBinary parses a binary version of the form "1.2.3-series-arch".
+func ParseBinary(s string) (Binary, error) {
+	m := binaryPat.FindStringSubmatch(s)
+	if m == nil {
+		return Binary{}, fmt.Errorf("invalid binary version %q", s)
+	}
+	var b Binary
+	b.Major = atoi(m[1])
+	b.Minor = atoi(m[2])
+	b.Tag = m[3]
+	b.Patch = atoi(m[4])
+	if m[5] != "" {
+		b.Build = atoi(m[5][1:])
+	}
+	b.Series = m[6]
+	b.Arch = m[7]
+	return b, nil
+}
+
+// Parse parses the version, which is of the form 1.2.3
+// giving the major, minor and release versions
+// respectively.
+func Parse(s string) (Number, error) {
+	m := numberPat.FindStringSubmatch(s)
+	if m == nil {
+		return Number{}, fmt.Errorf("invalid version %q", s)
+	}
+	var n Number
+	n.Major = atoi(m[1])
+	n.Minor = atoi(m[2])
+	n.Tag = m[3]
+	n.Patch = atoi(m[4])
+	if m[5] != "" {
+		n.Build = atoi(m[5][1:])
+	}
+	return n, nil
+}
+
+// atoi is the same as strconv.Atoi but assumes that
+// the string has been verified to be a valid integer.
+func atoi(s string) int {
+	n, err := strconv.Atoi(s)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+// String returns the string representation of this Number.
+func (n Number) String() string {
+	var s string
+	if n.Tag == "" {
+		s = fmt.Sprintf("%d.%d.%d", n.Major, n.Minor, n.Patch)
+	} else {
+		s = fmt.Sprintf("%d.%d-%s%d", n.Major, n.Minor, n.Tag, n.Patch)
+	}
+	if n.Build > 0 {
+		s += fmt.Sprintf(".%d", n.Build)
+	}
+	return s
+}
+
+// Compare returns -1, 0 or 1 depending on whether
+// n is less than, equal to or greater than other.
+// The comparison compares Major, then Minor, then Patch, then Build, using the first difference as
+func (n Number) Compare(other Number) int {
+	if n == other {
+		return 0
+	}
+	less := false
+	switch {
+	case n.Major != other.Major:
+		less = n.Major < other.Major
+	case n.Minor != other.Minor:
+		less = n.Minor < other.Minor
+	case n.Tag != other.Tag:
+		switch {
+		case n.Tag == "":
+			less = false
+		case other.Tag == "":
+			less = true
+		default:
+			less = n.Tag < other.Tag
+		}
+	case n.Patch != other.Patch:
+		less = n.Patch < other.Patch
+	case n.Build != other.Build:
+		less = n.Build < other.Build
+	}
+	if less {
+		return -1
+	}
+	return 1
+}
+
+// GetBSON implements bson.Getter.
+func (n Number) GetBSON() (interface{}, error) {
+	return n.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (n *Number) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (n Number) MarshalJSON() ([]byte, error) {
+	return json.Marshal(n.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (n *Number) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface
+func (n Number) MarshalYAML() (interface{}, error) {
+	return n.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface
+func (n *Number) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(vstr)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// ParseMajorMinor takes an argument of the form "major.minor" and returns ints major and minor.
+func ParseMajorMinor(vers string) (int, int, error) {
+	parts := strings.Split(vers, ".")
+	major, err := strconv.Atoi(parts[0])
+	minor := -1
+	if err != nil {
+		return -1, -1, fmt.Errorf("invalid major version number %s: %v", parts[0], err)
+	}
+	if len(parts) == 2 {
+		minor, err = strconv.Atoi(parts[1])
+		if err != nil {
+			return -1, -1, fmt.Errorf("invalid minor version number %s: %v", parts[1], err)
+		}
+	} else if len(parts) > 2 {
+		return -1, -1, fmt.Errorf("invalid major.minor version number %s", vers)
+	}
+	return major, minor, nil
+}
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/LICENSE b/switchq/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/switchq/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/README.md b/switchq/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/doc.go b/switchq/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/env_os.go b/switchq/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/switchq/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/switchq/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/switchq/vendor/github.com/kelseyhightower/envconfig/usage.go b/switchq/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/switchq/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/LICENSE b/switchq/vendor/github.com/lunixbochs/vtclean/LICENSE
new file mode 100644
index 0000000..42e8263
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Ryan Hileman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/README.md b/switchq/vendor/github.com/lunixbochs/vtclean/README.md
new file mode 100644
index 0000000..ddd1372
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/README.md
@@ -0,0 +1,44 @@
+vtclean
+----
+
+Clean up raw terminal output by stripping escape sequences, optionally preserving color.
+
+Get it: `go get github.com/lunixbochs/vtclean/vtclean`
+
+API:
+
+    import "github.com/lunixbochs/vtclean"
+    vtclean.Clean(line string, color bool) string
+
+Command line example:
+
+    $ echo -e '\x1b[1;32mcolor example
+    color forced to stop at end of line
+    backspace is ba\b\bgood
+    no beeps!\x07\x07' | ./vtclean -color
+
+    color example
+    color forced to stop at end of line
+    backspace is good
+    no beeps!
+
+Go example:
+
+    package main
+
+    import (
+        "fmt"
+        "github.com/lunixbochs/vtclean"
+    )
+
+    func main() {
+        line := vtclean.Clean(
+            "\033[1;32mcolor, " +
+            "curs\033[Aor, " +
+            "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false)
+        fmt.Println(line)
+    }
+
+Output:
+
+    color, cursor
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/io.go b/switchq/vendor/github.com/lunixbochs/vtclean/io.go
new file mode 100644
index 0000000..31be007
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/io.go
@@ -0,0 +1,93 @@
+package vtclean
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+)
+
+type reader struct {
+	io.Reader
+	scanner *bufio.Scanner
+	buf     []byte
+
+	color bool
+}
+
+func NewReader(r io.Reader, color bool) io.Reader {
+	return &reader{Reader: r, color: color}
+}
+
+func (r *reader) scan() bool {
+	if r.scanner == nil {
+		r.scanner = bufio.NewScanner(r.Reader)
+	}
+	if len(r.buf) > 0 {
+		return true
+	}
+	if r.scanner.Scan() {
+		r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n")
+		return true
+	}
+	return false
+}
+
+func (r *reader) fill(p []byte) int {
+	n := len(r.buf)
+	copy(p, r.buf)
+	if len(p) < len(r.buf) {
+		r.buf = r.buf[len(p):]
+		n = len(p)
+	} else {
+		r.buf = nil
+	}
+	return n
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+	n := r.fill(p)
+	if n < len(p) {
+		if !r.scan() {
+			if n == 0 {
+				return 0, io.EOF
+			}
+			return n, nil
+		}
+		n += r.fill(p[n:])
+	}
+	return n, nil
+}
+
+type writer struct {
+	io.Writer
+	buf   []byte
+	color bool
+}
+
+func NewWriter(w io.Writer, color bool) io.WriteCloser {
+	return &writer{Writer: w, color: color}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+	buf := append(w.buf, p...)
+	lines := bytes.Split(buf, []byte("\n"))
+	if len(lines) > 0 {
+		last := len(lines) - 1
+		w.buf = lines[last]
+		count := 0
+		for _, line := range lines[:last] {
+			n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n"))
+			count += n
+			if err != nil {
+				return count, err
+			}
+		}
+	}
+	return len(p), nil
+}
+
+func (w *writer) Close() error {
+	cl := Clean(string(w.buf), w.color)
+	_, err := w.Writer.Write([]byte(cl))
+	return err
+}
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/line.go b/switchq/vendor/github.com/lunixbochs/vtclean/line.go
new file mode 100644
index 0000000..7272d91
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/line.go
@@ -0,0 +1,107 @@
+package vtclean
+
+type char struct {
+	char  byte
+	vt100 []byte
+}
+
+func chars(p []byte) []char {
+	tmp := make([]char, len(p))
+	for i, v := range p {
+		tmp[i].char = v
+	}
+	return tmp
+}
+
+type lineEdit struct {
+	buf       []char
+	pos, size int
+	vt100     []byte
+}
+
+func newLineEdit(length int) *lineEdit {
+	return &lineEdit{buf: make([]char, length)}
+}
+
+func (l *lineEdit) Vt100(p []byte) {
+	l.vt100 = p
+}
+
+func (l *lineEdit) Move(x int) {
+	if x < 0 && l.pos <= -x {
+		l.pos = 0
+	} else if x > 0 && l.pos+x > l.size {
+		l.pos = l.size
+	} else {
+		l.pos += x
+	}
+}
+
+func (l *lineEdit) Write(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	if len(l.buf)-l.pos < len(c) {
+		l.buf = append(l.buf[:l.pos], c...)
+	} else {
+		copy(l.buf[l.pos:], c)
+	}
+	l.pos += len(c)
+	if l.pos > l.size {
+		l.size = l.pos
+	}
+}
+
+func (l *lineEdit) Insert(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	l.size += len(c)
+	c = append(c, l.buf[l.pos:]...)
+	l.buf = append(l.buf[:l.pos], c...)
+}
+
+func (l *lineEdit) Delete(n int) {
+	most := l.size - l.pos
+	if n > most {
+		n = most
+	}
+	copy(l.buf[l.pos:], l.buf[l.pos+n:])
+	l.size -= n
+}
+
+func (l *lineEdit) Clear() {
+	for i := 0; i < len(l.buf); i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearLeft() {
+	for i := 0; i < l.pos+1; i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearRight() {
+	l.size = l.pos
+}
+
+func (l *lineEdit) Bytes() []byte {
+	length := 0
+	buf := l.buf[:l.size]
+	for _, v := range buf {
+		length += 1 + len(v.vt100)
+	}
+	tmp := make([]byte, 0, length)
+	for _, v := range buf {
+		tmp = append(tmp, v.vt100...)
+		tmp = append(tmp, v.char)
+	}
+	return tmp
+}
+
+func (l *lineEdit) String() string {
+	return string(l.Bytes())
+}
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/regex.txt b/switchq/vendor/github.com/lunixbochs/vtclean/regex.txt
new file mode 100644
index 0000000..e55e7f2
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/regex.txt
@@ -0,0 +1,14 @@
+this is the source definitions for the scary escape code regex
+
+# from tests in Terminal.app, this regex should cover all basic \e[ and \e] cases
+^([\[\]]([\d\?]+)?(;[\d\?]+)*)?.
+
+# this catches any case the above does not
+# make sure to not include any special characters the main regex finds (like ?)
+\[[^a-zA-Z0-9@\?]+.
+
+# esc + paren + any single char
+[\(\)].
+
+# didn't re-check this one (not included)
+[\[K]\d+;\d+
diff --git a/switchq/vendor/github.com/lunixbochs/vtclean/vtclean.go b/switchq/vendor/github.com/lunixbochs/vtclean/vtclean.go
new file mode 100644
index 0000000..1e74237
--- /dev/null
+++ b/switchq/vendor/github.com/lunixbochs/vtclean/vtclean.go
@@ -0,0 +1,81 @@
+package vtclean
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+)
+
+// see regex.txt for a slightly separated version of this regex
+var vt100re = regexp.MustCompile(`^\033([\[\]]([\d\?]+)?(;[\d\?]+)*)?(.)`)
+var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`)
+
+func Clean(line string, color bool) string {
+	var edit = newLineEdit(len(line))
+	lineb := []byte(line)
+
+	hadColor := false
+	for i := 0; i < len(lineb); {
+		c := lineb[i]
+		switch c {
+		case '\b':
+			edit.Move(-1)
+		case '\033':
+			// set terminal title
+			if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) {
+				pos := bytes.Index(lineb[i:], []byte("\a"))
+				if pos != -1 {
+					i += pos + 1
+					continue
+				}
+			}
+			if m := vt100exc.Find(lineb[i:]); m != nil {
+				i += len(m)
+			} else if m := vt100re.FindSubmatch(lineb[i:]); m != nil {
+				i += len(m[0])
+				num := string(m[2])
+				n, err := strconv.Atoi(num)
+				if err != nil || n > 10000 {
+					n = 1
+				}
+				switch m[4][0] {
+				case 'm':
+					if color {
+						hadColor = true
+						edit.Vt100(m[0])
+					}
+				case '@':
+					edit.Insert(bytes.Repeat([]byte{' '}, n))
+				case 'C':
+					edit.Move(n)
+				case 'D':
+					edit.Move(-n)
+				case 'P':
+					edit.Delete(n)
+				case 'K':
+					switch num {
+					case "", "0":
+						edit.ClearRight()
+					case "1":
+						edit.ClearLeft()
+					case "2":
+						edit.Clear()
+					}
+				}
+			} else {
+				i += 1
+			}
+			continue
+		default:
+			if c == '\n' || c >= ' ' {
+				edit.Write([]byte{c})
+			}
+		}
+		i += 1
+	}
+	out := edit.Bytes()
+	if hadColor {
+		out = append(out, []byte("\033[0m")...)
+	}
+	return string(out)
+}
diff --git a/switchq/vendor/github.com/mattn/go-colorable/LICENSE b/switchq/vendor/github.com/mattn/go-colorable/LICENSE
new file mode 100644
index 0000000..91b5cef
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-colorable/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/switchq/vendor/github.com/mattn/go-colorable/README.md b/switchq/vendor/github.com/mattn/go-colorable/README.md
new file mode 100644
index 0000000..e84226a
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-colorable/README.md
@@ -0,0 +1,43 @@
+# go-colorable
+
+Colorable writer for windows.
+
+For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
+This package is possible to handle escape sequence for ansi color on windows.
+
+## Too Bad!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
+
+
+## So Good!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
+
+## Usage
+
+```go
+logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
+logrus.SetOutput(colorable.NewColorableStdout())
+
+logrus.Info("succeeded")
+logrus.Warn("not correct")
+logrus.Error("something error")
+logrus.Fatal("panic")
+```
+
+You can compile above code on non-windows OSs.
+
+## Installation
+
+```
+$ go get github.com/mattn/go-colorable
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/switchq/vendor/github.com/mattn/go-colorable/colorable_others.go b/switchq/vendor/github.com/mattn/go-colorable/colorable_others.go
new file mode 100644
index 0000000..a7fe19a
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package colorable
+
+import (
+	"io"
+	"os"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return os.Stderr
+}
diff --git a/switchq/vendor/github.com/mattn/go-colorable/colorable_windows.go b/switchq/vendor/github.com/mattn/go-colorable/colorable_windows.go
new file mode 100644
index 0000000..628ad90
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -0,0 +1,820 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"github.com/mattn/go-isatty"
+)
+
+const (
+	foregroundBlue      = 0x1
+	foregroundGreen     = 0x2
+	foregroundRed       = 0x4
+	foregroundIntensity = 0x8
+	foregroundMask      = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+	backgroundBlue      = 0x10
+	backgroundGreen     = 0x20
+	backgroundRed       = 0x40
+	backgroundIntensity = 0x80
+	backgroundMask      = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+	x short
+	y short
+}
+
+type smallRect struct {
+	left   short
+	top    short
+	right  short
+	bottom short
+}
+
+type consoleScreenBufferInfo struct {
+	size              coord
+	cursorPosition    coord
+	attributes        word
+	window            smallRect
+	maximumWindowSize coord
+}
+
+type consoleCursorInfo struct {
+	size    dword
+	visible int32
+}
+
+var (
+	kernel32                       = syscall.NewLazyDLL("kernel32.dll")
+	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+	procSetConsoleTextAttribute    = kernel32.NewProc("SetConsoleTextAttribute")
+	procSetConsoleCursorPosition   = kernel32.NewProc("SetConsoleCursorPosition")
+	procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+	procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+	procGetConsoleCursorInfo       = kernel32.NewProc("GetConsoleCursorInfo")
+	procSetConsoleCursorInfo       = kernel32.NewProc("SetConsoleCursorInfo")
+)
+
+type Writer struct {
+	out     io.Writer
+	handle  syscall.Handle
+	lastbuf bytes.Buffer
+	oldattr word
+	oldpos  coord
+}
+
+// NewColorable return new instance of Writer which handle escape sequence from File.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	if isatty.IsTerminal(file.Fd()) {
+		var csbi consoleScreenBufferInfo
+		handle := syscall.Handle(file.Fd())
+		procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+		return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+	} else {
+		return file
+	}
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return NewColorable(os.Stdout)
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return NewColorable(os.Stderr)
+}
+
+var color256 = map[int]int{
+	0:   0x000000,
+	1:   0x800000,
+	2:   0x008000,
+	3:   0x808000,
+	4:   0x000080,
+	5:   0x800080,
+	6:   0x008080,
+	7:   0xc0c0c0,
+	8:   0x808080,
+	9:   0xff0000,
+	10:  0x00ff00,
+	11:  0xffff00,
+	12:  0x0000ff,
+	13:  0xff00ff,
+	14:  0x00ffff,
+	15:  0xffffff,
+	16:  0x000000,
+	17:  0x00005f,
+	18:  0x000087,
+	19:  0x0000af,
+	20:  0x0000d7,
+	21:  0x0000ff,
+	22:  0x005f00,
+	23:  0x005f5f,
+	24:  0x005f87,
+	25:  0x005faf,
+	26:  0x005fd7,
+	27:  0x005fff,
+	28:  0x008700,
+	29:  0x00875f,
+	30:  0x008787,
+	31:  0x0087af,
+	32:  0x0087d7,
+	33:  0x0087ff,
+	34:  0x00af00,
+	35:  0x00af5f,
+	36:  0x00af87,
+	37:  0x00afaf,
+	38:  0x00afd7,
+	39:  0x00afff,
+	40:  0x00d700,
+	41:  0x00d75f,
+	42:  0x00d787,
+	43:  0x00d7af,
+	44:  0x00d7d7,
+	45:  0x00d7ff,
+	46:  0x00ff00,
+	47:  0x00ff5f,
+	48:  0x00ff87,
+	49:  0x00ffaf,
+	50:  0x00ffd7,
+	51:  0x00ffff,
+	52:  0x5f0000,
+	53:  0x5f005f,
+	54:  0x5f0087,
+	55:  0x5f00af,
+	56:  0x5f00d7,
+	57:  0x5f00ff,
+	58:  0x5f5f00,
+	59:  0x5f5f5f,
+	60:  0x5f5f87,
+	61:  0x5f5faf,
+	62:  0x5f5fd7,
+	63:  0x5f5fff,
+	64:  0x5f8700,
+	65:  0x5f875f,
+	66:  0x5f8787,
+	67:  0x5f87af,
+	68:  0x5f87d7,
+	69:  0x5f87ff,
+	70:  0x5faf00,
+	71:  0x5faf5f,
+	72:  0x5faf87,
+	73:  0x5fafaf,
+	74:  0x5fafd7,
+	75:  0x5fafff,
+	76:  0x5fd700,
+	77:  0x5fd75f,
+	78:  0x5fd787,
+	79:  0x5fd7af,
+	80:  0x5fd7d7,
+	81:  0x5fd7ff,
+	82:  0x5fff00,
+	83:  0x5fff5f,
+	84:  0x5fff87,
+	85:  0x5fffaf,
+	86:  0x5fffd7,
+	87:  0x5fffff,
+	88:  0x870000,
+	89:  0x87005f,
+	90:  0x870087,
+	91:  0x8700af,
+	92:  0x8700d7,
+	93:  0x8700ff,
+	94:  0x875f00,
+	95:  0x875f5f,
+	96:  0x875f87,
+	97:  0x875faf,
+	98:  0x875fd7,
+	99:  0x875fff,
+	100: 0x878700,
+	101: 0x87875f,
+	102: 0x878787,
+	103: 0x8787af,
+	104: 0x8787d7,
+	105: 0x8787ff,
+	106: 0x87af00,
+	107: 0x87af5f,
+	108: 0x87af87,
+	109: 0x87afaf,
+	110: 0x87afd7,
+	111: 0x87afff,
+	112: 0x87d700,
+	113: 0x87d75f,
+	114: 0x87d787,
+	115: 0x87d7af,
+	116: 0x87d7d7,
+	117: 0x87d7ff,
+	118: 0x87ff00,
+	119: 0x87ff5f,
+	120: 0x87ff87,
+	121: 0x87ffaf,
+	122: 0x87ffd7,
+	123: 0x87ffff,
+	124: 0xaf0000,
+	125: 0xaf005f,
+	126: 0xaf0087,
+	127: 0xaf00af,
+	128: 0xaf00d7,
+	129: 0xaf00ff,
+	130: 0xaf5f00,
+	131: 0xaf5f5f,
+	132: 0xaf5f87,
+	133: 0xaf5faf,
+	134: 0xaf5fd7,
+	135: 0xaf5fff,
+	136: 0xaf8700,
+	137: 0xaf875f,
+	138: 0xaf8787,
+	139: 0xaf87af,
+	140: 0xaf87d7,
+	141: 0xaf87ff,
+	142: 0xafaf00,
+	143: 0xafaf5f,
+	144: 0xafaf87,
+	145: 0xafafaf,
+	146: 0xafafd7,
+	147: 0xafafff,
+	148: 0xafd700,
+	149: 0xafd75f,
+	150: 0xafd787,
+	151: 0xafd7af,
+	152: 0xafd7d7,
+	153: 0xafd7ff,
+	154: 0xafff00,
+	155: 0xafff5f,
+	156: 0xafff87,
+	157: 0xafffaf,
+	158: 0xafffd7,
+	159: 0xafffff,
+	160: 0xd70000,
+	161: 0xd7005f,
+	162: 0xd70087,
+	163: 0xd700af,
+	164: 0xd700d7,
+	165: 0xd700ff,
+	166: 0xd75f00,
+	167: 0xd75f5f,
+	168: 0xd75f87,
+	169: 0xd75faf,
+	170: 0xd75fd7,
+	171: 0xd75fff,
+	172: 0xd78700,
+	173: 0xd7875f,
+	174: 0xd78787,
+	175: 0xd787af,
+	176: 0xd787d7,
+	177: 0xd787ff,
+	178: 0xd7af00,
+	179: 0xd7af5f,
+	180: 0xd7af87,
+	181: 0xd7afaf,
+	182: 0xd7afd7,
+	183: 0xd7afff,
+	184: 0xd7d700,
+	185: 0xd7d75f,
+	186: 0xd7d787,
+	187: 0xd7d7af,
+	188: 0xd7d7d7,
+	189: 0xd7d7ff,
+	190: 0xd7ff00,
+	191: 0xd7ff5f,
+	192: 0xd7ff87,
+	193: 0xd7ffaf,
+	194: 0xd7ffd7,
+	195: 0xd7ffff,
+	196: 0xff0000,
+	197: 0xff005f,
+	198: 0xff0087,
+	199: 0xff00af,
+	200: 0xff00d7,
+	201: 0xff00ff,
+	202: 0xff5f00,
+	203: 0xff5f5f,
+	204: 0xff5f87,
+	205: 0xff5faf,
+	206: 0xff5fd7,
+	207: 0xff5fff,
+	208: 0xff8700,
+	209: 0xff875f,
+	210: 0xff8787,
+	211: 0xff87af,
+	212: 0xff87d7,
+	213: 0xff87ff,
+	214: 0xffaf00,
+	215: 0xffaf5f,
+	216: 0xffaf87,
+	217: 0xffafaf,
+	218: 0xffafd7,
+	219: 0xffafff,
+	220: 0xffd700,
+	221: 0xffd75f,
+	222: 0xffd787,
+	223: 0xffd7af,
+	224: 0xffd7d7,
+	225: 0xffd7ff,
+	226: 0xffff00,
+	227: 0xffff5f,
+	228: 0xffff87,
+	229: 0xffffaf,
+	230: 0xffffd7,
+	231: 0xffffff,
+	232: 0x080808,
+	233: 0x121212,
+	234: 0x1c1c1c,
+	235: 0x262626,
+	236: 0x303030,
+	237: 0x3a3a3a,
+	238: 0x444444,
+	239: 0x4e4e4e,
+	240: 0x585858,
+	241: 0x626262,
+	242: 0x6c6c6c,
+	243: 0x767676,
+	244: 0x808080,
+	245: 0x8a8a8a,
+	246: 0x949494,
+	247: 0x9e9e9e,
+	248: 0xa8a8a8,
+	249: 0xb2b2b2,
+	250: 0xbcbcbc,
+	251: 0xc6c6c6,
+	252: 0xd0d0d0,
+	253: 0xdadada,
+	254: 0xe4e4e4,
+	255: 0xeeeeee,
+}
+
+// Write write data on console
+func (w *Writer) Write(data []byte) (n int, err error) {
+	var csbi consoleScreenBufferInfo
+	procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+		if r1 == 0 {
+			break loop
+		}
+
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		var m byte
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				m = c
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+
+		var csbi consoleScreenBufferInfo
+		switch m {
+		case 'A':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'B':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'C':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'D':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			if n, err = strconv.Atoi(buf.String()); err == nil {
+				var csbi consoleScreenBufferInfo
+				procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+				csbi.cursorPosition.x += short(n)
+				procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			}
+		case 'E':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'F':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'G':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = short(n - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'H':
+			token := strings.Split(buf.String(), ";")
+			if len(token) != 2 {
+				continue
+			}
+			n1, err := strconv.Atoi(token[0])
+			if err != nil {
+				continue
+			}
+			n2, err := strconv.Atoi(token[1])
+			if err != nil {
+				continue
+			}
+			csbi.cursorPosition.x = short(n2 - 1)
+			csbi.cursorPosition.y = short(n1 - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'J':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'K':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'm':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			attr := csbi.attributes
+			cs := buf.String()
+			if cs == "" {
+				procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+				continue
+			}
+			token := strings.Split(cs, ";")
+			for i := 0; i < len(token); i++ {
+				ns := token[i]
+				if n, err = strconv.Atoi(ns); err == nil {
+					switch {
+					case n == 0 || n == 100:
+						attr = w.oldattr
+					case 1 <= n && n <= 5:
+						attr |= foregroundIntensity
+					case n == 7:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 22 == n || n == 25 || n == 25:
+						attr |= foregroundIntensity
+					case n == 27:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 30 <= n && n <= 37:
+						attr &= backgroundMask
+						if (n-30)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-30)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-30)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case n == 38: // set foreground color.
+						if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256foreAttr == nil {
+									n256setup()
+								}
+								attr &= backgroundMask
+								attr |= n256foreAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & backgroundMask)
+						}
+					case n == 39: // reset foreground color.
+						attr &= backgroundMask
+						attr |= w.oldattr & foregroundMask
+					case 40 <= n && n <= 47:
+						attr &= foregroundMask
+						if (n-40)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-40)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-40)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					case n == 48: // set background color.
+						if i < len(token)-2 && token[i+1] == "5" {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256backAttr == nil {
+									n256setup()
+								}
+								attr &= foregroundMask
+								attr |= n256backAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & foregroundMask)
+						}
+					case n == 49: // reset foreground color.
+						attr &= foregroundMask
+						attr |= w.oldattr & backgroundMask
+					case 90 <= n && n <= 97:
+						attr = (attr & backgroundMask)
+						attr |= foregroundIntensity
+						if (n-90)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-90)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-90)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case 100 <= n && n <= 107:
+						attr = (attr & foregroundMask)
+						attr |= backgroundIntensity
+						if (n-100)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-100)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-100)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					}
+					procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+				}
+			}
+		case 'h':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 1
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 'l':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 0
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 's':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			w.oldpos = csbi.cursorPosition
+		case 'u':
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+	rgb       int
+	red       bool
+	green     bool
+	blue      bool
+	intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+	if c.red {
+		attr |= foregroundRed
+	}
+	if c.green {
+		attr |= foregroundGreen
+	}
+	if c.blue {
+		attr |= foregroundBlue
+	}
+	if c.intensity {
+		attr |= foregroundIntensity
+	}
+	return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+	if c.red {
+		attr |= backgroundRed
+	}
+	if c.green {
+		attr |= backgroundGreen
+	}
+	if c.blue {
+		attr |= backgroundBlue
+	}
+	if c.intensity {
+		attr |= backgroundIntensity
+	}
+	return
+}
+
+var color16 = []consoleColor{
+	consoleColor{0x000000, false, false, false, false},
+	consoleColor{0x000080, false, false, true, false},
+	consoleColor{0x008000, false, true, false, false},
+	consoleColor{0x008080, false, true, true, false},
+	consoleColor{0x800000, true, false, false, false},
+	consoleColor{0x800080, true, false, true, false},
+	consoleColor{0x808000, true, true, false, false},
+	consoleColor{0xc0c0c0, true, true, true, false},
+	consoleColor{0x808080, false, false, false, true},
+	consoleColor{0x0000ff, false, false, true, true},
+	consoleColor{0x00ff00, false, true, false, true},
+	consoleColor{0x00ffff, false, true, true, true},
+	consoleColor{0xff0000, true, false, false, true},
+	consoleColor{0xff00ff, true, false, true, true},
+	consoleColor{0xffff00, true, true, false, true},
+	consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+	h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+	dh := a.h - b.h
+	switch {
+	case dh > 0.5:
+		dh = 1 - dh
+	case dh < -0.5:
+		dh = -1 - dh
+	}
+	ds := a.s - b.s
+	dv := a.v - b.v
+	return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+	r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+		float32((rgb&0x00FF00)>>8)/256.0,
+		float32(rgb&0x0000FF)/256.0
+	min, max := minmax3f(r, g, b)
+	h := max - min
+	if h > 0 {
+		if max == r {
+			h = (g - b) / h
+			if h < 0 {
+				h += 6
+			}
+		} else if max == g {
+			h = 2 + (b-r)/h
+		} else {
+			h = 4 + (r-g)/h
+		}
+	}
+	h /= 6.0
+	s := max - min
+	if max != 0 {
+		s /= max
+	}
+	v := max
+	return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+	t := make(hsvTable, len(rgbTable))
+	for i, c := range rgbTable {
+		t[i] = toHSV(c.rgb)
+	}
+	return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+	hsv := toHSV(rgb)
+	n := 7
+	l := float32(5.0)
+	for i, p := range t {
+		d := hsv.dist(p)
+		if d < l {
+			l, n = d, i
+		}
+	}
+	return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+	if a < b {
+		if b < c {
+			return a, c
+		} else if a < c {
+			return a, b
+		} else {
+			return c, b
+		}
+	} else {
+		if a < c {
+			return b, c
+		} else if b < c {
+			return b, a
+		} else {
+			return c, a
+		}
+	}
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+	n256foreAttr = make([]word, 256)
+	n256backAttr = make([]word, 256)
+	t := toHSVTable(color16)
+	for i, rgb := range color256 {
+		c := t.find(rgb)
+		n256foreAttr[i] = c.foregroundAttr()
+		n256backAttr[i] = c.backgroundAttr()
+	}
+}
diff --git a/switchq/vendor/github.com/mattn/go-colorable/noncolorable.go b/switchq/vendor/github.com/mattn/go-colorable/noncolorable.go
new file mode 100644
index 0000000..ca588c7
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -0,0 +1,61 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+)
+
+// NonColorable hold writer but remove escape sequence.
+type NonColorable struct {
+	out     io.Writer
+	lastbuf bytes.Buffer
+}
+
+// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
+func NewNonColorable(w io.Writer) io.Writer {
+	return &NonColorable{out: w}
+}
+
+// Write write data on console
+func (w *NonColorable) Write(data []byte) (n int, err error) {
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
diff --git a/switchq/vendor/github.com/mattn/go-isatty/LICENSE b/switchq/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 0000000..65dc692
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/switchq/vendor/github.com/mattn/go-isatty/README.md b/switchq/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 0000000..74845de
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,37 @@
+# go-isatty
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/mattn/go-isatty"
+	"os"
+)
+
+func main() {
+	if isatty.IsTerminal(os.Stdout.Fd()) {
+		fmt.Println("Is Terminal")
+	} else {
+		fmt.Println("Is Not Terminal")
+	}
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/switchq/vendor/github.com/mattn/go-isatty/doc.go b/switchq/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 0000000..17d4f90
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/switchq/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/switchq/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 0000000..83c5887
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,9 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+	return false
+}
diff --git a/switchq/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/switchq/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 0000000..42f2514
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/switchq/vendor/github.com/mattn/go-isatty/isatty_linux.go b/switchq/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 0000000..9d24bac
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/switchq/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/switchq/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 0000000..1f0c6bf
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+	var termio unix.Termio
+	err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+	return err == nil
+}
diff --git a/switchq/vendor/github.com/mattn/go-isatty/isatty_windows.go b/switchq/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 0000000..83c398b
--- /dev/null
+++ b/switchq/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/switchq/vendor/golang.org/x/crypto/LICENSE b/switchq/vendor/golang.org/x/crypto/LICENSE
new file mode 100644
index 0000000..6a66aea
--- /dev/null
+++ b/switchq/vendor/golang.org/x/crypto/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/switchq/vendor/golang.org/x/crypto/PATENTS b/switchq/vendor/golang.org/x/crypto/PATENTS
new file mode 100644
index 0000000..7330990
--- /dev/null
+++ b/switchq/vendor/golang.org/x/crypto/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the Go project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of Go, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of Go.  This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation.  If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of Go or any code incorporated within this
+implementation of Go constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of Go
+shall terminate as of the date such litigation is filed.
diff --git a/switchq/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/switchq/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
new file mode 100644
index 0000000..593f653
--- /dev/null
+++ b/switchq/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -0,0 +1,77 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC
+2898 / PKCS #5 v2.0.
+
+A key derivation function is useful when encrypting data based on a password
+or any other not-fully-random data. It uses a pseudorandom function to derive
+a secure encryption key based on the password.
+
+While v2.0 of the standard defines only one pseudorandom function to use,
+HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved
+Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
+choose, you can pass the `New` functions from the different SHA packages to
+pbkdf2.Key.
+*/
+package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+
+import (
+	"crypto/hmac"
+	"hash"
+)
+
+// Key derives a key from the password, salt and iteration count, returning a
+// []byte of length keylen that can be used as cryptographic key. The key is
+// derived based on the method described as PBKDF2 with the HMAC variant using
+// the supplied hash function.
+//
+// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you
+// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by
+// doing:
+//
+// 	dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New)
+//
+// Remember to get a good random salt. At least 8 bytes is recommended by the
+// RFC.
+//
+// Using a higher iteration count will increase the cost of an exhaustive
+// search but will also make derivation proportionally slower.
+func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte {
+	prf := hmac.New(h, password)
+	hashLen := prf.Size()
+	numBlocks := (keyLen + hashLen - 1) / hashLen
+
+	var buf [4]byte
+	dk := make([]byte, 0, numBlocks*hashLen)
+	U := make([]byte, hashLen)
+	for block := 1; block <= numBlocks; block++ {
+		// N.B.: || means concatenation, ^ means XOR
+		// for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter
+		// U_1 = PRF(password, salt || uint(i))
+		prf.Reset()
+		prf.Write(salt)
+		buf[0] = byte(block >> 24)
+		buf[1] = byte(block >> 16)
+		buf[2] = byte(block >> 8)
+		buf[3] = byte(block)
+		prf.Write(buf[:4])
+		dk = prf.Sum(dk)
+		T := dk[len(dk)-hashLen:]
+		copy(U, T)
+
+		// U_n = PRF(password, U_(n-1))
+		for n := 2; n <= iter; n++ {
+			prf.Reset()
+			prf.Write(U)
+			U = U[:0]
+			U = prf.Sum(U)
+			for x := range U {
+				T[x] ^= U[x]
+			}
+		}
+	}
+	return dk[:keyLen]
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/LICENSE b/switchq/vendor/gopkg.in/juju/names.v2/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/README.md b/switchq/vendor/gopkg.in/juju/names.v2/README.md
new file mode 100644
index 0000000..ecacdc7
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/README.md
@@ -0,0 +1,4 @@
+juju/names
+============
+
+This package provides helpers for handling Juju entity names.
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/action.go b/switchq/vendor/gopkg.in/juju/names.v2/action.go
new file mode 100644
index 0000000..3211881
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/action.go
@@ -0,0 +1,79 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+	"github.com/juju/utils"
+)
+
+const ActionTagKind = "action"
+
+type ActionTag struct {
+	// Tags that are serialized need to have fields exported.
+	ID utils.UUID
+}
+
+// NewActionTag returns the tag of an action with the given id (UUID).
+func NewActionTag(id string) ActionTag {
+	uuid, err := utils.UUIDFromString(id)
+	if err != nil {
+		panic(err)
+	}
+	return ActionTag{ID: uuid}
+}
+
+// ParseActionTag parses an action tag string.
+func ParseActionTag(actionTag string) (ActionTag, error) {
+	tag, err := ParseTag(actionTag)
+	if err != nil {
+		return ActionTag{}, err
+	}
+	at, ok := tag.(ActionTag)
+	if !ok {
+		return ActionTag{}, invalidTagError(actionTag, ActionTagKind)
+	}
+	return at, nil
+}
+
+func (t ActionTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ActionTag) Kind() string   { return ActionTagKind }
+func (t ActionTag) Id() string     { return t.ID.String() }
+
+// IsValidAction returns whether id is a valid action id (UUID).
+func IsValidAction(id string) bool {
+	return utils.IsValidUUIDString(id)
+}
+
+// ActionReceiverTag returns an ActionReceiver Tag from a
+// machine or unit name.
+func ActionReceiverTag(name string) (Tag, error) {
+	if IsValidUnit(name) {
+		return NewUnitTag(name), nil
+	}
+	if IsValidApplication(name) {
+		// TODO(jcw4) enable when leader elections complete
+		//return NewApplicationTag(name), nil
+	}
+	if IsValidMachine(name) {
+		return NewMachineTag(name), nil
+	}
+	return nil, fmt.Errorf("invalid actionreceiver name %q", name)
+}
+
+// ActionReceiverFrom Tag returns an ActionReceiver tag from
+// a machine or unit tag.
+func ActionReceiverFromTag(tag string) (Tag, error) {
+	unitTag, err := ParseUnitTag(tag)
+	if err == nil {
+		return unitTag, nil
+	}
+	machineTag, err := ParseMachineTag(tag)
+	if err == nil {
+		return machineTag, nil
+	}
+	return nil, errors.Errorf("invalid actionreceiver tag %q", tag)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/application.go b/switchq/vendor/gopkg.in/juju/names.v2/application.go
new file mode 100644
index 0000000..aa8985e
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/application.go
@@ -0,0 +1,48 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+const ApplicationTagKind = "application"
+
+const (
+	ApplicationSnippet = "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)"
+	NumberSnippet      = "(?:0|[1-9][0-9]*)"
+)
+
+var validApplication = regexp.MustCompile("^" + ApplicationSnippet + "$")
+
+// IsValidApplication returns whether name is a valid application name.
+func IsValidApplication(name string) bool {
+	return validApplication.MatchString(name)
+}
+
+type ApplicationTag struct {
+	Name string
+}
+
+func (t ApplicationTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ApplicationTag) Kind() string   { return ApplicationTagKind }
+func (t ApplicationTag) Id() string     { return t.Name }
+
+// NewApplicationTag returns the tag for the application with the given name.
+func NewApplicationTag(applicationName string) ApplicationTag {
+	return ApplicationTag{Name: applicationName}
+}
+
+// ParseApplicationTag parses a application tag string.
+func ParseApplicationTag(applicationTag string) (ApplicationTag, error) {
+	tag, err := ParseTag(applicationTag)
+	if err != nil {
+		return ApplicationTag{}, err
+	}
+	st, ok := tag.(ApplicationTag)
+	if !ok {
+		return ApplicationTag{}, invalidTagError(applicationTag, ApplicationTagKind)
+	}
+	return st, nil
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/charm.go b/switchq/vendor/gopkg.in/juju/names.v2/charm.go
new file mode 100644
index 0000000..3984426
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/charm.go
@@ -0,0 +1,105 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+// CharmTagKind specifies charm tag kind
+const CharmTagKind = "charm"
+
+// Valid charm url can be either in V1 or V3 format. (V2 is a
+// charmstore web URL like https://jujucharms.com/postgresql/105, but
+// that's not valid as a tag.)
+//
+// V1 is of the form:
+// schema:~user/series/name-revision
+// where
+//     schema    is optional and can be either "local" or "cs".
+//               When not supplied, "cs" is implied.
+//     user      is optional and is only applicable for "cs" schema
+//     series    is optional and is a valid series name
+//     name      is mandatory and is the name of the charm
+//     revision  is optional and can be -1 if revision is unset
+//
+// V3 is of the form
+// schema:user/name/series/revision
+// with the same fields and constraints as the V1 format.
+
+var (
+	// SeriesSnippet is a regular expression representing series
+	SeriesSnippet = "[a-z]+([a-z0-9]+)?"
+
+	// CharmNameSnippet is a regular expression representing charm name
+	CharmNameSnippet = "[a-z][a-z0-9]*(-[a-z0-9]*[a-z][a-z0-9]*)*"
+
+	localSchemaSnippet        = "local:"
+	v1CharmStoreSchemaSnippet = "cs:(~" + validUserNameSnippet + "/)?"
+	revisionSnippet           = "(-1|0|[1-9][0-9]*)"
+
+	validV1CharmRegEx = regexp.MustCompile("^(" +
+		localSchemaSnippet + "|" +
+		v1CharmStoreSchemaSnippet + ")?(" +
+		SeriesSnippet + "/)?" +
+		CharmNameSnippet + "(-" +
+		revisionSnippet + ")?$")
+
+	v3CharmStoreSchemaSnippet = "(cs:)?(" + validUserNameSnippet + "/)?"
+
+	validV3CharmRegEx = regexp.MustCompile("^(" +
+		localSchemaSnippet + "|" +
+		v3CharmStoreSchemaSnippet + ")" +
+		CharmNameSnippet + "(/" +
+		SeriesSnippet + ")?(/" +
+		revisionSnippet + ")?$")
+)
+
+// CharmTag represents tag for charm
+// using charm's URL
+type CharmTag struct {
+	url string
+}
+
+// String satisfies Tag interface.
+// Produces string representation of charm tag.
+func (t CharmTag) String() string { return t.Kind() + "-" + t.Id() }
+
+// Kind satisfies Tag interface.
+// Returns Charm tag kind.
+func (t CharmTag) Kind() string { return CharmTagKind }
+
+// Id satisfies Tag interface.
+// Returns charm URL.
+func (t CharmTag) Id() string { return t.url }
+
+// NewCharmTag returns the tag for the charm with the given url.
+// It will panic if the given charm url is not valid.
+func NewCharmTag(charmURL string) CharmTag {
+	if !IsValidCharm(charmURL) {
+		panic(fmt.Sprintf("%q is not a valid charm name", charmURL))
+	}
+	return CharmTag{url: charmURL}
+}
+
+var emptyTag = CharmTag{}
+
+// ParseCharmTag parses a charm tag string.
+func ParseCharmTag(charmTag string) (CharmTag, error) {
+	tag, err := ParseTag(charmTag)
+	if err != nil {
+		return emptyTag, err
+	}
+	ct, ok := tag.(CharmTag)
+	if !ok {
+		return emptyTag, invalidTagError(charmTag, CharmTagKind)
+	}
+	return ct, nil
+}
+
+// IsValidCharm returns whether name is a valid charm url.
+func IsValidCharm(url string) bool {
+	return validV1CharmRegEx.MatchString(url) || validV3CharmRegEx.MatchString(url)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/cloud.go b/switchq/vendor/gopkg.in/juju/names.v2/cloud.go
new file mode 100644
index 0000000..78a3c8b
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/cloud.go
@@ -0,0 +1,51 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const CloudTagKind = "cloud"
+
+var (
+	cloudSnippet = "[a-zA-Z0-9][a-zA-Z0-9.-]*"
+	validCloud   = regexp.MustCompile("^" + cloudSnippet + "$")
+)
+
+type CloudTag struct {
+	id string
+}
+
+func (t CloudTag) String() string { return t.Kind() + "-" + t.id }
+func (t CloudTag) Kind() string   { return CloudTagKind }
+func (t CloudTag) Id() string     { return t.id }
+
+// NewCloudTag returns the tag for the cloud with the given ID.
+// It will panic if the given cloud ID is not valid.
+func NewCloudTag(id string) CloudTag {
+	if !IsValidCloud(id) {
+		panic(fmt.Sprintf("%q is not a valid cloud ID", id))
+	}
+	return CloudTag{id}
+}
+
+// ParseCloudTag parses a cloud tag string.
+func ParseCloudTag(cloudTag string) (CloudTag, error) {
+	tag, err := ParseTag(cloudTag)
+	if err != nil {
+		return CloudTag{}, err
+	}
+	dt, ok := tag.(CloudTag)
+	if !ok {
+		return CloudTag{}, invalidTagError(cloudTag, CloudTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidCloud returns whether id is a valid cloud ID.
+func IsValidCloud(id string) bool {
+	return validCloud.MatchString(id)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/cloudcredential.go b/switchq/vendor/gopkg.in/juju/names.v2/cloudcredential.go
new file mode 100644
index 0000000..1d7252d
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/cloudcredential.go
@@ -0,0 +1,120 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+const CloudCredentialTagKind = "cloudcred"
+
+var (
+	cloudCredentialNameSnippet = "[a-zA-Z][a-zA-Z0-9.@_-]*"
+	validCloudCredentialName   = regexp.MustCompile("^" + cloudCredentialNameSnippet + "$")
+	validCloudCredential       = regexp.MustCompile(
+		"^" +
+			"(" + cloudSnippet + ")" +
+			"/(" + validUserSnippet + ")" + // credential owner
+			"/(" + cloudCredentialNameSnippet + ")" +
+			"$",
+	)
+)
+
+type CloudCredentialTag struct {
+	cloud CloudTag
+	owner UserTag
+	name  string
+}
+
+// IsZero reports whether t is zero.
+func (t CloudCredentialTag) IsZero() bool {
+	return t == CloudCredentialTag{}
+}
+
+// Kind is part of the Tag interface.
+func (t CloudCredentialTag) Kind() string { return CloudCredentialTagKind }
+
+// Id implements Tag.Id. It returns the empty string
+// if t is zero.
+func (t CloudCredentialTag) Id() string {
+	if t.IsZero() {
+		return ""
+	}
+	return fmt.Sprintf("%s/%s/%s", t.cloud.Id(), t.owner.Id(), t.name)
+}
+
+func quoteCredentialSeparator(in string) string {
+	return strings.Replace(in, "_", `%5f`, -1)
+}
+
+// String implements Tag.String. It returns the empty
+// string if t is zero.
+func (t CloudCredentialTag) String() string {
+	if t.IsZero() {
+		return ""
+	}
+	return fmt.Sprintf("%s-%s_%s_%s", t.Kind(),
+		quoteCredentialSeparator(t.cloud.Id()),
+		quoteCredentialSeparator(t.owner.Id()),
+		quoteCredentialSeparator(t.name))
+}
+
+// Cloud returns the tag of the cloud to which the credential pertains.
+func (t CloudCredentialTag) Cloud() CloudTag {
+	return t.cloud
+}
+
+// Owner returns the tag of the user that owns the credential.
+func (t CloudCredentialTag) Owner() UserTag {
+	return t.owner
+}
+
+// Name returns the cloud credential name, excluding the
+// cloud and owner qualifiers.
+func (t CloudCredentialTag) Name() string {
+	return t.name
+}
+
+// NewCloudCredentialTag returns the tag for the cloud with the given ID.
+// It will panic if the given cloud ID is not valid.
+func NewCloudCredentialTag(id string) CloudCredentialTag {
+	parts := validCloudCredential.FindStringSubmatch(id)
+	if len(parts) != 4 {
+		panic(fmt.Sprintf("%q is not a valid cloud credential ID", id))
+	}
+	cloud := NewCloudTag(parts[1])
+	owner := NewUserTag(parts[2])
+	return CloudCredentialTag{cloud, owner, parts[3]}
+}
+
+// ParseCloudCredentialTag parses a cloud tag string.
+func ParseCloudCredentialTag(s string) (CloudCredentialTag, error) {
+	tag, err := ParseTag(s)
+	if err != nil {
+		return CloudCredentialTag{}, err
+	}
+	dt, ok := tag.(CloudCredentialTag)
+	if !ok {
+		return CloudCredentialTag{}, invalidTagError(s, CloudCredentialTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidCloudCredential returns whether id is a valid cloud credential ID.
+func IsValidCloudCredential(id string) bool {
+	return validCloudCredential.MatchString(id)
+}
+
+// IsValidCloudCredentialName returns whether name is a valid cloud credential name.
+func IsValidCloudCredentialName(name string) bool {
+	return validCloudCredentialName.MatchString(name)
+}
+
+func cloudCredentialTagSuffixToId(s string) (string, error) {
+	s = strings.Replace(s, "_", "/", -1)
+	return url.QueryUnescape(s)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/controller.go b/switchq/vendor/gopkg.in/juju/names.v2/controller.go
new file mode 100644
index 0000000..c48c326
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/controller.go
@@ -0,0 +1,56 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+// ControllerTagKind indicates that a tag belongs to a controller.
+const ControllerTagKind = "controller"
+
+// ControllerTag represents a tag used to describe a controller.
+type ControllerTag struct {
+	uuid string
+}
+
+// Lowercase letters, digits and (non-leading) hyphens.
+var validControllerName = regexp.MustCompile(`^[a-z0-9]+[a-z0-9-]*$`)
+
+// NewControllerTag returns the tag of an controller with the given controller UUID.
+func NewControllerTag(uuid string) ControllerTag {
+	return ControllerTag{uuid: uuid}
+}
+
+// ParseControllerTag parses an environ tag string.
+func ParseControllerTag(controllerTag string) (ControllerTag, error) {
+	tag, err := ParseTag(controllerTag)
+	if err != nil {
+		return ControllerTag{}, err
+	}
+	et, ok := tag.(ControllerTag)
+	if !ok {
+		return ControllerTag{}, invalidTagError(controllerTag, ControllerTagKind)
+	}
+	return et, nil
+}
+
+// String implements Tag.
+func (t ControllerTag) String() string { return t.Kind() + "-" + t.Id() }
+
+// Kind implements Tag.
+func (t ControllerTag) Kind() string { return ControllerTagKind }
+
+// Id implements Tag.
+func (t ControllerTag) Id() string { return t.uuid }
+
+// IsValidController returns whether id is a valid controller UUID.
+func IsValidController(id string) bool {
+	return validUUID.MatchString(id)
+}
+
+// IsValidControllerName returns whether name is a valid string safe for a controller name.
+func IsValidControllerName(name string) bool {
+	return validControllerName.MatchString(name)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/environ.go b/switchq/vendor/gopkg.in/juju/names.v2/environ.go
new file mode 100644
index 0000000..db02d00
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/environ.go
@@ -0,0 +1,38 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+// EnvironTagKind is DEPRECATED: model tags are used instead.
+const EnvironTagKind = "environment"
+
+type EnvironTag struct {
+	uuid string
+}
+
+// NewEnvironTag returns the tag of an environment with the given environment UUID.
+func NewEnvironTag(uuid string) EnvironTag {
+	return EnvironTag{uuid: uuid}
+}
+
+// ParseEnvironTag parses an environ tag string.
+func ParseEnvironTag(environTag string) (EnvironTag, error) {
+	tag, err := ParseTag(environTag)
+	if err != nil {
+		return EnvironTag{}, err
+	}
+	et, ok := tag.(EnvironTag)
+	if !ok {
+		return EnvironTag{}, invalidTagError(environTag, EnvironTagKind)
+	}
+	return et, nil
+}
+
+func (t EnvironTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t EnvironTag) Kind() string   { return EnvironTagKind }
+func (t EnvironTag) Id() string     { return t.uuid }
+
+// IsValidEnvironment returns whether id is a valid environment UUID.
+func IsValidEnvironment(id string) bool {
+	return validUUID.MatchString(id)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/filesystem.go b/switchq/vendor/gopkg.in/juju/names.v2/filesystem.go
new file mode 100644
index 0000000..44ecc96
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/filesystem.go
@@ -0,0 +1,76 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const FilesystemTagKind = "filesystem"
+
+// Filesystems may be bound to a machine, meaning that the filesystem cannot
+// exist without that machine. We encode this in the tag.
+var validFilesystem = regexp.MustCompile("^(" + MachineSnippet + "/)?" + NumberSnippet + "$")
+
+type FilesystemTag struct {
+	id string
+}
+
+func (t FilesystemTag) String() string { return t.Kind() + "-" + t.id }
+func (t FilesystemTag) Kind() string   { return FilesystemTagKind }
+func (t FilesystemTag) Id() string     { return filesystemTagSuffixToId(t.id) }
+
+// NewFilesystemTag returns the tag for the filesystem with the given name.
+// It will panic if the given filesystem name is not valid.
+func NewFilesystemTag(id string) FilesystemTag {
+	tag, ok := tagFromFilesystemId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid filesystem id", id))
+	}
+	return tag
+}
+
+// ParseFilesystemTag parses a filesystem tag string.
+func ParseFilesystemTag(filesystemTag string) (FilesystemTag, error) {
+	tag, err := ParseTag(filesystemTag)
+	if err != nil {
+		return FilesystemTag{}, err
+	}
+	fstag, ok := tag.(FilesystemTag)
+	if !ok {
+		return FilesystemTag{}, invalidTagError(filesystemTag, FilesystemTagKind)
+	}
+	return fstag, nil
+}
+
+// IsValidFilesystem returns whether id is a valid filesystem id.
+func IsValidFilesystem(id string) bool {
+	return validFilesystem.MatchString(id)
+}
+
+// FilesystemMachine returns the machine component of the filesystem
+// tag, and a boolean indicating whether or not there is a
+// machine component.
+func FilesystemMachine(tag FilesystemTag) (MachineTag, bool) {
+	id := tag.Id()
+	pos := strings.LastIndex(id, "/")
+	if pos == -1 {
+		return MachineTag{}, false
+	}
+	return NewMachineTag(id[:pos]), true
+}
+
+func tagFromFilesystemId(id string) (FilesystemTag, bool) {
+	if !IsValidFilesystem(id) {
+		return FilesystemTag{}, false
+	}
+	id = strings.Replace(id, "/", "-", -1)
+	return FilesystemTag{id}, true
+}
+
+func filesystemTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/ipaddress.go b/switchq/vendor/gopkg.in/juju/names.v2/ipaddress.go
new file mode 100644
index 0000000..b92a389
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/ipaddress.go
@@ -0,0 +1,46 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"github.com/juju/utils"
+)
+
+const IPAddressTagKind = "ipaddress"
+
+// IsValidIPAddress returns whether id is a valid IP address ID.
+// Here it simply is checked if it is a valid UUID.
+func IsValidIPAddress(id string) bool {
+	return utils.IsValidUUIDString(id)
+}
+
+type IPAddressTag struct {
+	id utils.UUID
+}
+
+func (t IPAddressTag) String() string { return t.Kind() + "-" + t.id.String() }
+func (t IPAddressTag) Kind() string   { return IPAddressTagKind }
+func (t IPAddressTag) Id() string     { return t.id.String() }
+
+// NewIPAddressTag returns the tag for the IP address with the given ID (UUID).
+func NewIPAddressTag(id string) IPAddressTag {
+	uuid, err := utils.UUIDFromString(id)
+	if err != nil {
+		panic(err)
+	}
+	return IPAddressTag{id: uuid}
+}
+
+// ParseIPAddressTag parses an IP address tag string.
+func ParseIPAddressTag(ipAddressTag string) (IPAddressTag, error) {
+	tag, err := ParseTag(ipAddressTag)
+	if err != nil {
+		return IPAddressTag{}, err
+	}
+	ipat, ok := tag.(IPAddressTag)
+	if !ok {
+		return IPAddressTag{}, invalidTagError(ipAddressTag, IPAddressTagKind)
+	}
+	return ipat, nil
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/machine.go b/switchq/vendor/gopkg.in/juju/names.v2/machine.go
new file mode 100644
index 0000000..867aa72
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/machine.go
@@ -0,0 +1,60 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+	"strings"
+)
+
+const MachineTagKind = "machine"
+
+const (
+	ContainerTypeSnippet = "[a-z]+"
+	ContainerSnippet     = "/" + ContainerTypeSnippet + "/" + NumberSnippet + ""
+	MachineSnippet       = NumberSnippet + "(?:" + ContainerSnippet + ")*"
+)
+
+var validMachine = regexp.MustCompile("^" + MachineSnippet + "$")
+
+// IsValidMachine returns whether id is a valid machine id.
+func IsValidMachine(id string) bool {
+	return validMachine.MatchString(id)
+}
+
+// IsContainerMachine returns whether id is a valid container machine id.
+func IsContainerMachine(id string) bool {
+	return validMachine.MatchString(id) && strings.Contains(id, "/")
+}
+
+type MachineTag struct {
+	id string
+}
+
+func (t MachineTag) String() string { return t.Kind() + "-" + t.id }
+func (t MachineTag) Kind() string   { return MachineTagKind }
+func (t MachineTag) Id() string     { return machineTagSuffixToId(t.id) }
+
+// NewMachineTag returns the tag for the machine with the given id.
+func NewMachineTag(id string) MachineTag {
+	id = strings.Replace(id, "/", "-", -1)
+	return MachineTag{id: id}
+}
+
+// ParseMachineTag parses a machine tag string.
+func ParseMachineTag(machineTag string) (MachineTag, error) {
+	tag, err := ParseTag(machineTag)
+	if err != nil {
+		return MachineTag{}, err
+	}
+	mt, ok := tag.(MachineTag)
+	if !ok {
+		return MachineTag{}, invalidTagError(machineTag, MachineTagKind)
+	}
+	return mt, nil
+}
+
+func machineTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/model.go b/switchq/vendor/gopkg.in/juju/names.v2/model.go
new file mode 100644
index 0000000..60e0db2
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/model.go
@@ -0,0 +1,52 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+)
+
+const ModelTagKind = "model"
+
+// ModelTag represents a tag used to describe a model.
+type ModelTag struct {
+	uuid string
+}
+
+var validUUID = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`)
+
+// Lowercase letters, digits and (non-leading) hyphens, as per LP:1568944 #5.
+var validModelName = regexp.MustCompile(`^[a-z0-9]+[a-z0-9-]*$`)
+
+// NewModelTag returns the tag of an model with the given model UUID.
+func NewModelTag(uuid string) ModelTag {
+	return ModelTag{uuid: uuid}
+}
+
+// ParseModelTag parses an environ tag string.
+func ParseModelTag(modelTag string) (ModelTag, error) {
+	tag, err := ParseTag(modelTag)
+	if err != nil {
+		return ModelTag{}, err
+	}
+	et, ok := tag.(ModelTag)
+	if !ok {
+		return ModelTag{}, invalidTagError(modelTag, ModelTagKind)
+	}
+	return et, nil
+}
+
+func (t ModelTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t ModelTag) Kind() string   { return ModelTagKind }
+func (t ModelTag) Id() string     { return t.uuid }
+
+// IsValidModel returns whether id is a valid model UUID.
+func IsValidModel(id string) bool {
+	return validUUID.MatchString(id)
+}
+
+// IsValidModelName returns whether name is a valid string safe for a model name.
+func IsValidModelName(name string) bool {
+	return validModelName.MatchString(name)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/payload.go b/switchq/vendor/gopkg.in/juju/names.v2/payload.go
new file mode 100644
index 0000000..c911ffd
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/payload.go
@@ -0,0 +1,74 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"regexp"
+
+	"github.com/juju/utils"
+)
+
+const (
+	// PayloadTagKind is used as the prefix for the string
+	// representation of payload tags.
+	PayloadTagKind = "payload"
+
+	// This can be expanded later, as needed.
+	payloadClass = "([a-zA-Z](?:[a-zA-Z0-9-]*[a-zA-Z0-9])?)"
+)
+
+var validPayload = regexp.MustCompile("^" + payloadClass + "$")
+
+// IsValidPayload returns whether id is a valid Juju ID for
+// a charm payload. The ID must be a valid alpha-numeric (plus hyphens).
+func IsValidPayload(id string) bool {
+	return validPayload.MatchString(id)
+}
+
+// For compatibility with Juju 1.25, UUIDs are also supported.
+func isValidPayload(id string) bool {
+	return IsValidPayload(id) || utils.IsValidUUIDString(id)
+}
+
+// PayloadTag represents a charm payload.
+type PayloadTag struct {
+	id string
+}
+
+// NewPayloadTag returns the tag for a charm's payload with the given id.
+func NewPayloadTag(id string) PayloadTag {
+	return PayloadTag{
+		id: id,
+	}
+}
+
+// ParsePayloadTag parses a payload tag string.
+// So ParsePayloadTag(tag.String()) === tag.
+func ParsePayloadTag(tag string) (PayloadTag, error) {
+	t, err := ParseTag(tag)
+	if err != nil {
+		return PayloadTag{}, err
+	}
+	pt, ok := t.(PayloadTag)
+	if !ok {
+		return PayloadTag{}, invalidTagError(tag, PayloadTagKind)
+	}
+	return pt, nil
+}
+
+// Kind implements Tag.
+func (t PayloadTag) Kind() string {
+	return PayloadTagKind
+}
+
+// Id implements Tag.Id. It always returns the same ID with which
+// it was created. So NewPayloadTag(x).Id() == x for all valid x.
+func (t PayloadTag) Id() string {
+	return t.id
+}
+
+// String implements Tag.
+func (t PayloadTag) String() string {
+	return tagString(t)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/relation.go b/switchq/vendor/gopkg.in/juju/names.v2/relation.go
new file mode 100644
index 0000000..b61696c
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/relation.go
@@ -0,0 +1,67 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const RelationTagKind = "relation"
+
+const RelationSnippet = "[a-z][a-z0-9]*(?:[_-][a-z0-9]+)*"
+
+// Relation keys have the format "application1:relName1 application2:relName2".
+// Except the peer relations, which have the format "application:relName"
+// Relation tags have the format "relation-application1.rel1#application2.rel2".
+// For peer relations, the format is "relation-application.rel"
+
+var (
+	validRelation     = regexp.MustCompile("^" + ApplicationSnippet + ":" + RelationSnippet + " " + ApplicationSnippet + ":" + RelationSnippet + "$")
+	validPeerRelation = regexp.MustCompile("^" + ApplicationSnippet + ":" + RelationSnippet + "$")
+)
+
+// IsValidRelation returns whether key is a valid relation key.
+func IsValidRelation(key string) bool {
+	return validRelation.MatchString(key) || validPeerRelation.MatchString(key)
+}
+
+type RelationTag struct {
+	key string
+}
+
+func (t RelationTag) String() string { return t.Kind() + "-" + t.key }
+func (t RelationTag) Kind() string   { return RelationTagKind }
+func (t RelationTag) Id() string     { return relationTagSuffixToKey(t.key) }
+
+// NewRelationTag returns the tag for the relation with the given key.
+func NewRelationTag(relationKey string) RelationTag {
+	if !IsValidRelation(relationKey) {
+		panic(fmt.Sprintf("%q is not a valid relation key", relationKey))
+	}
+	// Replace both ":" with "." and the " " with "#".
+	relationKey = strings.Replace(relationKey, ":", ".", 2)
+	relationKey = strings.Replace(relationKey, " ", "#", 1)
+	return RelationTag{key: relationKey}
+}
+
+// ParseRelationTag parses a relation tag string.
+func ParseRelationTag(relationTag string) (RelationTag, error) {
+	tag, err := ParseTag(relationTag)
+	if err != nil {
+		return RelationTag{}, err
+	}
+	rt, ok := tag.(RelationTag)
+	if !ok {
+		return RelationTag{}, invalidTagError(relationTag, RelationTagKind)
+	}
+	return rt, nil
+}
+
+func relationTagSuffixToKey(s string) string {
+	// Replace both "." with ":" and the "#" with " ".
+	s = strings.Replace(s, ".", ":", 2)
+	return strings.Replace(s, "#", " ", 1)
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/space.go b/switchq/vendor/gopkg.in/juju/names.v2/space.go
new file mode 100644
index 0000000..049e1c7
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/space.go
@@ -0,0 +1,50 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const (
+	SpaceTagKind = "space"
+	SpaceSnippet = "(?:[a-z0-9]+(?:-[a-z0-9]+)*)"
+)
+
+var validSpace = regexp.MustCompile("^" + SpaceSnippet + "$")
+
+// IsValidSpace reports whether name is a valid space name.
+func IsValidSpace(name string) bool {
+	return validSpace.MatchString(name)
+}
+
+type SpaceTag struct {
+	name string
+}
+
+func (t SpaceTag) String() string { return t.Kind() + "-" + t.Id() }
+func (t SpaceTag) Kind() string   { return SpaceTagKind }
+func (t SpaceTag) Id() string     { return t.name }
+
+// NewSpaceTag returns the tag of a space with the given name.
+func NewSpaceTag(name string) SpaceTag {
+	if !IsValidSpace(name) {
+		panic(fmt.Sprintf("%q is not a valid space name", name))
+	}
+	return SpaceTag{name: name}
+}
+
+// ParseSpaceTag parses a space tag string.
+func ParseSpaceTag(spaceTag string) (SpaceTag, error) {
+	tag, err := ParseTag(spaceTag)
+	if err != nil {
+		return SpaceTag{}, err
+	}
+	nt, ok := tag.(SpaceTag)
+	if !ok {
+		return SpaceTag{}, invalidTagError(spaceTag, SpaceTagKind)
+	}
+	return nt, nil
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/storage.go b/switchq/vendor/gopkg.in/juju/names.v2/storage.go
new file mode 100644
index 0000000..2314f87
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/storage.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const (
+	StorageTagKind = "storage"
+
+	// StorageNameSnippet is the regular expression that describes valid
+	// storage names (without the storage instance sequence number).
+	StorageNameSnippet = "(?:[a-z][a-z0-9]*(?:-[a-z0-9]*[a-z][a-z0-9]*)*)"
+)
+
+var validStorage = regexp.MustCompile("^(" + StorageNameSnippet + ")/" + NumberSnippet + "$")
+
+type StorageTag struct {
+	id string
+}
+
+func (t StorageTag) String() string { return t.Kind() + "-" + t.id }
+func (t StorageTag) Kind() string   { return StorageTagKind }
+func (t StorageTag) Id() string     { return storageTagSuffixToId(t.id) }
+
+// NewStorageTag returns the tag for the storage instance with the given ID.
+// It will panic if the given string is not a valid storage instance Id.
+func NewStorageTag(id string) StorageTag {
+	tag, ok := tagFromStorageId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid storage instance ID", id))
+	}
+	return tag
+}
+
+// ParseStorageTag parses a storage tag string.
+func ParseStorageTag(s string) (StorageTag, error) {
+	tag, err := ParseTag(s)
+	if err != nil {
+		return StorageTag{}, err
+	}
+	st, ok := tag.(StorageTag)
+	if !ok {
+		return StorageTag{}, invalidTagError(s, StorageTagKind)
+	}
+	return st, nil
+}
+
+// IsValidStorage returns whether id is a valid storage instance ID.
+func IsValidStorage(id string) bool {
+	return validStorage.MatchString(id)
+}
+
+// StorageName returns the storage name from a storage instance ID.
+// StorageName returns an error if "id" is not a valid storage
+// instance ID.
+func StorageName(id string) (string, error) {
+	s := validStorage.FindStringSubmatch(id)
+	if s == nil {
+		return "", fmt.Errorf("%q is not a valid storage instance ID", id)
+	}
+	return s[1], nil
+}
+
+func tagFromStorageId(id string) (StorageTag, bool) {
+	// replace only the last "/" with "-".
+	i := strings.LastIndex(id, "/")
+	if i <= 0 || !IsValidStorage(id) {
+		return StorageTag{}, false
+	}
+	id = id[:i] + "-" + id[i+1:]
+	return StorageTag{id}, true
+}
+
+func storageTagSuffixToId(s string) string {
+	// Replace only the last "-" with "/", as it is valid for storage
+	// names to contain hyphens.
+	if i := strings.LastIndex(s, "-"); i > 0 {
+		s = s[:i] + "/" + s[i+1:]
+	}
+	return s
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/subnet.go b/switchq/vendor/gopkg.in/juju/names.v2/subnet.go
new file mode 100644
index 0000000..281eb0f
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/subnet.go
@@ -0,0 +1,49 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"net"
+)
+
+const SubnetTagKind = "subnet"
+
+// IsValidSubnet returns whether cidr is a valid subnet CIDR.
+func IsValidSubnet(cidr string) bool {
+	_, ipNet, err := net.ParseCIDR(cidr)
+	if err == nil && ipNet.String() == cidr {
+		return true
+	}
+	return false
+}
+
+type SubnetTag struct {
+	cidr string
+}
+
+func (t SubnetTag) String() string { return t.Kind() + "-" + t.cidr }
+func (t SubnetTag) Kind() string   { return SubnetTagKind }
+func (t SubnetTag) Id() string     { return t.cidr }
+
+// NewSubnetTag returns the tag for subnet with the given CIDR.
+func NewSubnetTag(cidr string) SubnetTag {
+	if !IsValidSubnet(cidr) {
+		panic(fmt.Sprintf("%s is not a valid subnet CIDR", cidr))
+	}
+	return SubnetTag{cidr: cidr}
+}
+
+// ParseSubnetTag parses a subnet tag string.
+func ParseSubnetTag(subnetTag string) (SubnetTag, error) {
+	tag, err := ParseTag(subnetTag)
+	if err != nil {
+		return SubnetTag{}, err
+	}
+	subt, ok := tag.(SubnetTag)
+	if !ok {
+		return SubnetTag{}, invalidTagError(subnetTag, SubnetTagKind)
+	}
+	return subt, nil
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/tag.go b/switchq/vendor/gopkg.in/juju/names.v2/tag.go
new file mode 100644
index 0000000..9feae00
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/tag.go
@@ -0,0 +1,216 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/utils"
+)
+
+// A Tag tags things that are taggable. Its purpose is to uniquely
+// identify some resource and provide a consistent representation of
+// that identity in both a human-readable and a machine-friendly format.
+// The latter benefits use of the tag in over-the-wire transmission
+// (e.g. in HTTP RPC calls) and in filename paths. The human-readable
+// tag "name" is available through the Id method. The machine-friendly
+// representation is provided by the String method.
+//
+// The ParseTag function may be used to build a tag from the machine-
+// formatted string. As well each kind of tag has its own Parse* method.
+// Each kind also has a New* method (e.g. NewMachineTag) which produces
+// a tag from the human-readable tag "ID".
+//
+// In the context of juju, the API *must* use tags to represent the
+// various juju entities. This contrasts with user-facing code, where
+// tags *must not* be used. Internal to juju the use of tags is a
+// judgement call based on the situation.
+type Tag interface {
+	// Kind returns the kind of the tag.
+	// This method is for legacy compatibility, callers should
+	// use equality or type assertions to verify the Kind, or type
+	// of a Tag.
+	Kind() string
+
+	// Id returns an identifier for this Tag.
+	// The contents and format of the identifier are specific
+	// to the implementer of the Tag.
+	Id() string
+
+	fmt.Stringer // all Tags should be able to print themselves
+}
+
+// tagString returns the canonical string representation of a tag.
+// It round-trips with splitTag().
+func tagString(tag Tag) string {
+	return tag.Kind() + "-" + tag.Id()
+}
+
+// TagKind returns one of the *TagKind constants for the given tag, or
+// an error if none matches.
+func TagKind(tag string) (string, error) {
+	i := strings.Index(tag, "-")
+	if i <= 0 || !validKinds(tag[:i]) {
+		return "", fmt.Errorf("%q is not a valid tag", tag)
+	}
+	return tag[:i], nil
+}
+
+func validKinds(kind string) bool {
+	switch kind {
+	case UnitTagKind, MachineTagKind, ApplicationTagKind, EnvironTagKind, UserTagKind,
+		RelationTagKind, ActionTagKind, VolumeTagKind, CharmTagKind, StorageTagKind,
+		FilesystemTagKind, IPAddressTagKind, SpaceTagKind, SubnetTagKind,
+		PayloadTagKind, ModelTagKind, ControllerTagKind, CloudTagKind, CloudCredentialTagKind:
+		return true
+	}
+	return false
+}
+
+func splitTag(tag string) (string, string, error) {
+	kind, err := TagKind(tag)
+	if err != nil {
+		return "", "", err
+	}
+	return kind, tag[len(kind)+1:], nil
+}
+
+// ParseTag parses a string representation into a Tag.
+func ParseTag(tag string) (Tag, error) {
+	kind, id, err := splitTag(tag)
+	if err != nil {
+		return nil, invalidTagError(tag, "")
+	}
+	switch kind {
+	case UnitTagKind:
+		id = unitTagSuffixToId(id)
+		if !IsValidUnit(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewUnitTag(id), nil
+	case MachineTagKind:
+		id = machineTagSuffixToId(id)
+		if !IsValidMachine(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewMachineTag(id), nil
+	case ApplicationTagKind:
+		if !IsValidApplication(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewApplicationTag(id), nil
+	case UserTagKind:
+		if !IsValidUser(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewUserTag(id), nil
+	case EnvironTagKind:
+		if !IsValidEnvironment(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewEnvironTag(id), nil
+	case ModelTagKind:
+		if !IsValidModel(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewModelTag(id), nil
+	case ControllerTagKind:
+		if !IsValidController(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewControllerTag(id), nil
+
+	case RelationTagKind:
+		id = relationTagSuffixToKey(id)
+		if !IsValidRelation(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewRelationTag(id), nil
+	case ActionTagKind:
+		if !IsValidAction(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewActionTag(id), nil
+	case VolumeTagKind:
+		id = volumeTagSuffixToId(id)
+		if !IsValidVolume(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewVolumeTag(id), nil
+	case CharmTagKind:
+		if !IsValidCharm(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCharmTag(id), nil
+	case StorageTagKind:
+		id = storageTagSuffixToId(id)
+		if !IsValidStorage(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewStorageTag(id), nil
+	case FilesystemTagKind:
+		id = filesystemTagSuffixToId(id)
+		if !IsValidFilesystem(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewFilesystemTag(id), nil
+	case IPAddressTagKind:
+		uuid, err := utils.UUIDFromString(id)
+		if err != nil {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewIPAddressTag(uuid.String()), nil
+	case SubnetTagKind:
+		if !IsValidSubnet(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewSubnetTag(id), nil
+	case SpaceTagKind:
+		if !IsValidSpace(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewSpaceTag(id), nil
+	case PayloadTagKind:
+		if !isValidPayload(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewPayloadTag(id), nil
+	case CloudTagKind:
+		if !IsValidCloud(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCloudTag(id), nil
+	case CloudCredentialTagKind:
+		id, err = cloudCredentialTagSuffixToId(id)
+		if err != nil {
+			return nil, errors.Wrap(err, invalidTagError(tag, kind))
+		}
+		if !IsValidCloudCredential(id) {
+			return nil, invalidTagError(tag, kind)
+		}
+		return NewCloudCredentialTag(id), nil
+	default:
+		return nil, invalidTagError(tag, "")
+	}
+}
+
+func invalidTagError(tag, kind string) error {
+	if kind != "" {
+		return fmt.Errorf("%q is not a valid %s tag", tag, kind)
+	}
+	return fmt.Errorf("%q is not a valid tag", tag)
+}
+
+// ReadableString returns a human-readable string from the tag passed in.
+// It currently supports unit and machine tags. Support for additional types
+// can be added in as needed.
+func ReadableString(tag Tag) string {
+	if tag == nil {
+		return ""
+	}
+
+	return tag.Kind() + " " + tag.Id()
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/unit.go b/switchq/vendor/gopkg.in/juju/names.v2/unit.go
new file mode 100644
index 0000000..3f3a026
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/unit.go
@@ -0,0 +1,79 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const UnitTagKind = "unit"
+
+var validUnit = regexp.MustCompile("^(" + ApplicationSnippet + ")/" + NumberSnippet + "$")
+
+type UnitTag struct {
+	name string
+}
+
+func (t UnitTag) String() string { return t.Kind() + "-" + t.name }
+func (t UnitTag) Kind() string   { return UnitTagKind }
+func (t UnitTag) Id() string     { return unitTagSuffixToId(t.name) }
+
+// NewUnitTag returns the tag for the unit with the given name.
+// It will panic if the given unit name is not valid.
+func NewUnitTag(unitName string) UnitTag {
+	tag, ok := tagFromUnitName(unitName)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid unit name", unitName))
+	}
+	return tag
+}
+
+// ParseUnitTag parses a unit tag string.
+func ParseUnitTag(unitTag string) (UnitTag, error) {
+	tag, err := ParseTag(unitTag)
+	if err != nil {
+		return UnitTag{}, err
+	}
+	ut, ok := tag.(UnitTag)
+	if !ok {
+		return UnitTag{}, invalidTagError(unitTag, UnitTagKind)
+	}
+	return ut, nil
+}
+
+// IsValidUnit returns whether name is a valid unit name.
+func IsValidUnit(name string) bool {
+	return validUnit.MatchString(name)
+}
+
+// UnitApplication returns the name of the application that the unit is
+// associated with. It returns an error if unitName is not a valid unit name.
+func UnitApplication(unitName string) (string, error) {
+	s := validUnit.FindStringSubmatch(unitName)
+	if s == nil {
+		return "", fmt.Errorf("%q is not a valid unit name", unitName)
+	}
+	return s[1], nil
+}
+
+func tagFromUnitName(unitName string) (UnitTag, bool) {
+	// Replace only the last "/" with "-".
+	i := strings.LastIndex(unitName, "/")
+	if i <= 0 || !IsValidUnit(unitName) {
+		return UnitTag{}, false
+	}
+	unitName = unitName[:i] + "-" + unitName[i+1:]
+	return UnitTag{name: unitName}, true
+}
+
+func unitTagSuffixToId(s string) string {
+	// Replace only the last "-" with "/", as it is valid for application
+	// names to contain hyphens.
+	if i := strings.LastIndex(s, "-"); i > 0 {
+		s = s[:i] + "/" + s[i+1:]
+	}
+	return s
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/user.go b/switchq/vendor/gopkg.in/juju/names.v2/user.go
new file mode 100644
index 0000000..332ee27
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/user.go
@@ -0,0 +1,130 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+)
+
+const (
+	UserTagKind     = "user"
+	LocalUserDomain = "local"
+)
+
+var (
+	// TODO this does not allow single character usernames or
+	// domains. Is that deliberate?
+	// https://github.com/juju/names/issues/54
+	validUserNameSnippet = "[a-zA-Z0-9][a-zA-Z0-9.+-]*[a-zA-Z0-9]"
+	validUserSnippet     = fmt.Sprintf("(?:%s(?:@%s)?)", validUserNameSnippet, validUserNameSnippet)
+	validName            = regexp.MustCompile(fmt.Sprintf("^(?P<name>%s)(?:@(?P<domain>%s))?$", validUserNameSnippet, validUserNameSnippet))
+	validUserName        = regexp.MustCompile("^" + validUserNameSnippet + "$")
+)
+
+// IsValidUser returns whether id is a valid user id.
+// Valid users may or may not be qualified with an
+// @domain suffix. Examples of valid users include
+// bob, bob@local, bob@somewhere-else, 0-a-f@123.
+func IsValidUser(id string) bool {
+	return validName.MatchString(id)
+}
+
+// IsValidUserName returns whether the given
+// name is a valid name part of a user. That is,
+// usernames with a domain suffix will return
+// false.
+func IsValidUserName(name string) bool {
+	return validUserName.MatchString(name)
+}
+
+// IsValidUserDomain returns whether the given user
+// domain is valid.
+func IsValidUserDomain(domain string) bool {
+	return validUserName.MatchString(domain)
+}
+
+// UserTag represents a user that may be stored locally
+// or associated with some external domain.
+type UserTag struct {
+	name   string
+	domain string
+}
+
+func (t UserTag) Kind() string   { return UserTagKind }
+func (t UserTag) String() string { return UserTagKind + "-" + t.Id() }
+
+// Id implements Tag.Id. Local users will always have
+// an Id value without any domain.
+func (t UserTag) Id() string {
+	if t.domain == "" || t.domain == LocalUserDomain {
+		return t.name
+	}
+	return t.name + "@" + t.domain
+}
+
+// Name returns the name part of the user name
+// without its associated domain.
+func (t UserTag) Name() string { return t.name }
+
+// IsLocal returns true if the tag represents a local user.
+// Users without an explicit domain are considered local.
+func (t UserTag) IsLocal() bool {
+	return t.Domain() == LocalUserDomain || t.Domain() == ""
+}
+
+// Domain returns the user domain. Users in the local database
+// are from the LocalDomain. Other users are considered 'remote' users.
+func (t UserTag) Domain() string {
+	return t.domain
+}
+
+// WithDomain returns a copy of the user tag with the
+// domain changed to the given argument.
+// The domain must satisfy IsValidUserDomain
+// or this function will panic.
+func (t UserTag) WithDomain(domain string) UserTag {
+	if !IsValidUserDomain(domain) {
+		panic(fmt.Sprintf("invalid user domain %q", domain))
+	}
+	return UserTag{
+		name:   t.name,
+		domain: domain,
+	}
+}
+
+// NewUserTag returns the tag for the user with the given name.
+// It panics if the user name does not satisfy IsValidUser.
+func NewUserTag(userName string) UserTag {
+	parts := validName.FindStringSubmatch(userName)
+	if len(parts) != 3 {
+		panic(fmt.Sprintf("invalid user tag %q", userName))
+	}
+	domain := parts[2]
+	if domain == LocalUserDomain {
+		domain = ""
+	}
+	return UserTag{name: parts[1], domain: domain}
+}
+
+// NewLocalUserTag returns the tag for a local user with the given name.
+func NewLocalUserTag(name string) UserTag {
+	if !IsValidUserName(name) {
+		panic(fmt.Sprintf("invalid user name %q", name))
+	}
+	return UserTag{name: name}
+}
+
+// ParseUserTag parses a user tag string.
+func ParseUserTag(tag string) (UserTag, error) {
+	t, err := ParseTag(tag)
+	if err != nil {
+		return UserTag{}, err
+	}
+	ut, ok := t.(UserTag)
+	if !ok {
+		return UserTag{}, invalidTagError(tag, UserTagKind)
+	}
+	return ut, nil
+}
diff --git a/switchq/vendor/gopkg.in/juju/names.v2/volume.go b/switchq/vendor/gopkg.in/juju/names.v2/volume.go
new file mode 100644
index 0000000..2c1fd1b
--- /dev/null
+++ b/switchq/vendor/gopkg.in/juju/names.v2/volume.go
@@ -0,0 +1,76 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package names
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+)
+
+const VolumeTagKind = "volume"
+
+// Volumes may be bound to a machine, meaning that the volume cannot
+// exist without that machine. We encode this in the tag to allow
+var validVolume = regexp.MustCompile("^(" + MachineSnippet + "/)?" + NumberSnippet + "$")
+
+type VolumeTag struct {
+	id string
+}
+
+func (t VolumeTag) String() string { return t.Kind() + "-" + t.id }
+func (t VolumeTag) Kind() string   { return VolumeTagKind }
+func (t VolumeTag) Id() string     { return volumeTagSuffixToId(t.id) }
+
+// NewVolumeTag returns the tag for the volume with the given ID.
+// It will panic if the given volume ID is not valid.
+func NewVolumeTag(id string) VolumeTag {
+	tag, ok := tagFromVolumeId(id)
+	if !ok {
+		panic(fmt.Sprintf("%q is not a valid volume ID", id))
+	}
+	return tag
+}
+
+// ParseVolumeTag parses a volume tag string.
+func ParseVolumeTag(volumeTag string) (VolumeTag, error) {
+	tag, err := ParseTag(volumeTag)
+	if err != nil {
+		return VolumeTag{}, err
+	}
+	dt, ok := tag.(VolumeTag)
+	if !ok {
+		return VolumeTag{}, invalidTagError(volumeTag, VolumeTagKind)
+	}
+	return dt, nil
+}
+
+// IsValidVolume returns whether id is a valid volume ID.
+func IsValidVolume(id string) bool {
+	return validVolume.MatchString(id)
+}
+
+// VolumeMachine returns the machine component of the volume
+// tag, and a boolean indicating whether or not there is a
+// machine component.
+func VolumeMachine(tag VolumeTag) (MachineTag, bool) {
+	id := tag.Id()
+	pos := strings.LastIndex(id, "/")
+	if pos == -1 {
+		return MachineTag{}, false
+	}
+	return NewMachineTag(id[:pos]), true
+}
+
+func tagFromVolumeId(id string) (VolumeTag, bool) {
+	if !IsValidVolume(id) {
+		return VolumeTag{}, false
+	}
+	id = strings.Replace(id, "/", "-", -1)
+	return VolumeTag{id}, true
+}
+
+func volumeTagSuffixToId(s string) string {
+	return strings.Replace(s, "-", "/", -1)
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/LICENSE b/switchq/vendor/gopkg.in/mgo.v2/LICENSE
new file mode 100644
index 0000000..770c767
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/LICENSE
@@ -0,0 +1,25 @@
+mgo - MongoDB driver for Go
+
+Copyright (c) 2010-2013 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/LICENSE b/switchq/vendor/gopkg.in/mgo.v2/bson/LICENSE
new file mode 100644
index 0000000..8903260
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/LICENSE
@@ -0,0 +1,25 @@
+BSON library for Go
+
+Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met: 
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer. 
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution. 
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/bson.go b/switchq/vendor/gopkg.in/mgo.v2/bson/bson.go
new file mode 100644
index 0000000..7fb7f8c
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/bson.go
@@ -0,0 +1,738 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package bson is an implementation of the BSON specification for Go:
+//
+//     http://bsonspec.org
+//
+// It was created as part of the mgo MongoDB driver for Go, but is standalone
+// and may be used on its own without the driver.
+package bson
+
+import (
+	"bytes"
+	"crypto/md5"
+	"crypto/rand"
+	"encoding/binary"
+	"encoding/hex"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"runtime"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// The public API.
+
+// A value implementing the bson.Getter interface will have its GetBSON
+// method called when the given value has to be marshalled, and the result
+// of this method will be marshaled in place of the actual object.
+//
+// If GetBSON returns return a non-nil error, the marshalling procedure
+// will stop and error out with the provided value.
+type Getter interface {
+	GetBSON() (interface{}, error)
+}
+
+// A value implementing the bson.Setter interface will receive the BSON
+// value via the SetBSON method during unmarshaling, and the object
+// itself will not be changed as usual.
+//
+// If setting the value works, the method should return nil or alternatively
+// bson.SetZero to set the respective field to its zero value (nil for
+// pointer types). If SetBSON returns a value of type bson.TypeError, the
+// BSON value will be omitted from a map or slice being decoded and the
+// unmarshalling will continue. If it returns any other non-nil error, the
+// unmarshalling procedure will stop and error out with the provided value.
+//
+// This interface is generally useful in pointer receivers, since the method
+// will want to change the receiver. A type field that implements the Setter
+// interface doesn't have to be a pointer, though.
+//
+// Unlike the usual behavior, unmarshalling onto a value that implements a
+// Setter interface will NOT reset the value to its zero state. This allows
+// the value to decide by itself how to be unmarshalled.
+//
+// For example:
+//
+//     type MyString string
+//
+//     func (s *MyString) SetBSON(raw bson.Raw) error {
+//         return raw.Unmarshal(s)
+//     }
+//
+type Setter interface {
+	SetBSON(raw Raw) error
+}
+
+// SetZero may be returned from a SetBSON method to have the value set to
+// its respective zero value. When used in pointer values, this will set the
+// field to nil rather than to the pre-allocated value.
+var SetZero = errors.New("set to zero")
+
+// M is a convenient alias for a map[string]interface{} map, useful for
+// dealing with BSON in a native way.  For instance:
+//
+//     bson.M{"a": 1, "b": true}
+//
+// There's no special handling for this type in addition to what's done anyway
+// for an equivalent map type.  Elements in the map will be dumped in an
+// undefined ordered. See also the bson.D type for an ordered alternative.
+type M map[string]interface{}
+
+// D represents a BSON document containing ordered elements. For example:
+//
+//     bson.D{{"a", 1}, {"b", true}}
+//
+// In some situations, such as when creating indexes for MongoDB, the order in
+// which the elements are defined is important.  If the order is not important,
+// using a map is generally more comfortable. See bson.M and bson.RawD.
+type D []DocElem
+
+// DocElem is an element of the bson.D document representation.
+type DocElem struct {
+	Name  string
+	Value interface{}
+}
+
+// Map returns a map out of the ordered element name/value pairs in d.
+func (d D) Map() (m M) {
+	m = make(M, len(d))
+	for _, item := range d {
+		m[item.Name] = item.Value
+	}
+	return m
+}
+
+// The Raw type represents raw unprocessed BSON documents and elements.
+// Kind is the kind of element as defined per the BSON specification, and
+// Data is the raw unprocessed data for the respective element.
+// Using this type it is possible to unmarshal or marshal values partially.
+//
+// Relevant documentation:
+//
+//     http://bsonspec.org/#/specification
+//
+type Raw struct {
+	Kind byte
+	Data []byte
+}
+
+// RawD represents a BSON document containing raw unprocessed elements.
+// This low-level representation may be useful when lazily processing
+// documents of uncertain content, or when manipulating the raw content
+// documents in general.
+type RawD []RawDocElem
+
+// See the RawD type.
+type RawDocElem struct {
+	Name  string
+	Value Raw
+}
+
+// ObjectId is a unique ID identifying a BSON value. It must be exactly 12 bytes
+// long. MongoDB objects by default have such a property set in their "_id"
+// property.
+//
+// http://www.mongodb.org/display/DOCS/Object+IDs
+type ObjectId string
+
+// ObjectIdHex returns an ObjectId from the provided hex representation.
+// Calling this function with an invalid hex representation will
+// cause a runtime panic. See the IsObjectIdHex function.
+func ObjectIdHex(s string) ObjectId {
+	d, err := hex.DecodeString(s)
+	if err != nil || len(d) != 12 {
+		panic(fmt.Sprintf("invalid input to ObjectIdHex: %q", s))
+	}
+	return ObjectId(d)
+}
+
+// IsObjectIdHex returns whether s is a valid hex representation of
+// an ObjectId. See the ObjectIdHex function.
+func IsObjectIdHex(s string) bool {
+	if len(s) != 24 {
+		return false
+	}
+	_, err := hex.DecodeString(s)
+	return err == nil
+}
+
+// objectIdCounter is atomically incremented when generating a new ObjectId
+// using NewObjectId() function. It's used as a counter part of an id.
+var objectIdCounter uint32 = readRandomUint32()
+
+// readRandomUint32 returns a random objectIdCounter.
+func readRandomUint32() uint32 {
+	var b [4]byte
+	_, err := io.ReadFull(rand.Reader, b[:])
+	if err != nil {
+		panic(fmt.Errorf("cannot read random object id: %v", err))
+	}
+	return uint32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24))
+}
+
+// machineId stores machine id generated once and used in subsequent calls
+// to NewObjectId function.
+var machineId = readMachineId()
+var processId = os.Getpid()
+
+// readMachineId generates and returns a machine id.
+// If this function fails to get the hostname it will cause a runtime error.
+func readMachineId() []byte {
+	var sum [3]byte
+	id := sum[:]
+	hostname, err1 := os.Hostname()
+	if err1 != nil {
+		_, err2 := io.ReadFull(rand.Reader, id)
+		if err2 != nil {
+			panic(fmt.Errorf("cannot get hostname: %v; %v", err1, err2))
+		}
+		return id
+	}
+	hw := md5.New()
+	hw.Write([]byte(hostname))
+	copy(id, hw.Sum(nil))
+	return id
+}
+
+// NewObjectId returns a new unique ObjectId.
+func NewObjectId() ObjectId {
+	var b [12]byte
+	// Timestamp, 4 bytes, big endian
+	binary.BigEndian.PutUint32(b[:], uint32(time.Now().Unix()))
+	// Machine, first 3 bytes of md5(hostname)
+	b[4] = machineId[0]
+	b[5] = machineId[1]
+	b[6] = machineId[2]
+	// Pid, 2 bytes, specs don't specify endianness, but we use big endian.
+	b[7] = byte(processId >> 8)
+	b[8] = byte(processId)
+	// Increment, 3 bytes, big endian
+	i := atomic.AddUint32(&objectIdCounter, 1)
+	b[9] = byte(i >> 16)
+	b[10] = byte(i >> 8)
+	b[11] = byte(i)
+	return ObjectId(b[:])
+}
+
+// NewObjectIdWithTime returns a dummy ObjectId with the timestamp part filled
+// with the provided number of seconds from epoch UTC, and all other parts
+// filled with zeroes. It's not safe to insert a document with an id generated
+// by this method, it is useful only for queries to find documents with ids
+// generated before or after the specified timestamp.
+func NewObjectIdWithTime(t time.Time) ObjectId {
+	var b [12]byte
+	binary.BigEndian.PutUint32(b[:4], uint32(t.Unix()))
+	return ObjectId(string(b[:]))
+}
+
+// String returns a hex string representation of the id.
+// Example: ObjectIdHex("4d88e15b60f486e428412dc9").
+func (id ObjectId) String() string {
+	return fmt.Sprintf(`ObjectIdHex("%x")`, string(id))
+}
+
+// Hex returns a hex representation of the ObjectId.
+func (id ObjectId) Hex() string {
+	return hex.EncodeToString([]byte(id))
+}
+
+// MarshalJSON turns a bson.ObjectId into a json.Marshaller.
+func (id ObjectId) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf(`"%x"`, string(id))), nil
+}
+
+var nullBytes = []byte("null")
+
+// UnmarshalJSON turns *bson.ObjectId into a json.Unmarshaller.
+func (id *ObjectId) UnmarshalJSON(data []byte) error {
+	if len(data) > 0 && (data[0] == '{' || data[0] == 'O') {
+		var v struct {
+			Id json.RawMessage `json:"$oid"`
+			Func struct {
+				Id json.RawMessage
+			} `json:"$oidFunc"`
+		}
+		err := jdec(data, &v)
+		if err == nil {
+			if len(v.Id) > 0 {
+				data = []byte(v.Id)
+			} else {
+				data = []byte(v.Func.Id)
+			}
+		}
+	}
+	if len(data) == 2 && data[0] == '"' && data[1] == '"' || bytes.Equal(data, nullBytes) {
+		*id = ""
+		return nil
+	}
+	if len(data) != 26 || data[0] != '"' || data[25] != '"' {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s", string(data)))
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[1:25])
+	if err != nil {
+		return errors.New(fmt.Sprintf("invalid ObjectId in JSON: %s (%s)", string(data), err))
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// MarshalText turns bson.ObjectId into an encoding.TextMarshaler.
+func (id ObjectId) MarshalText() ([]byte, error) {
+	return []byte(fmt.Sprintf("%x", string(id))), nil
+}
+
+// UnmarshalText turns *bson.ObjectId into an encoding.TextUnmarshaler.
+func (id *ObjectId) UnmarshalText(data []byte) error {
+	if len(data) == 1 && data[0] == ' ' || len(data) == 0 {
+		*id = ""
+		return nil
+	}
+	if len(data) != 24 {
+		return fmt.Errorf("invalid ObjectId: %s", data)
+	}
+	var buf [12]byte
+	_, err := hex.Decode(buf[:], data[:])
+	if err != nil {
+		return fmt.Errorf("invalid ObjectId: %s (%s)", data, err)
+	}
+	*id = ObjectId(string(buf[:]))
+	return nil
+}
+
+// Valid returns true if id is valid. A valid id must contain exactly 12 bytes.
+func (id ObjectId) Valid() bool {
+	return len(id) == 12
+}
+
+// byteSlice returns byte slice of id from start to end.
+// Calling this function with an invalid id will cause a runtime panic.
+func (id ObjectId) byteSlice(start, end int) []byte {
+	if len(id) != 12 {
+		panic(fmt.Sprintf("invalid ObjectId: %q", string(id)))
+	}
+	return []byte(string(id)[start:end])
+}
+
+// Time returns the timestamp part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Time() time.Time {
+	// First 4 bytes of ObjectId is 32-bit big-endian seconds from epoch.
+	secs := int64(binary.BigEndian.Uint32(id.byteSlice(0, 4)))
+	return time.Unix(secs, 0)
+}
+
+// Machine returns the 3-byte machine id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Machine() []byte {
+	return id.byteSlice(4, 7)
+}
+
+// Pid returns the process id part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Pid() uint16 {
+	return binary.BigEndian.Uint16(id.byteSlice(7, 9))
+}
+
+// Counter returns the incrementing value part of the id.
+// It's a runtime error to call this method with an invalid id.
+func (id ObjectId) Counter() int32 {
+	b := id.byteSlice(9, 12)
+	// Counter is stored as big-endian 3-byte value
+	return int32(uint32(b[0])<<16 | uint32(b[1])<<8 | uint32(b[2]))
+}
+
+// The Symbol type is similar to a string and is used in languages with a
+// distinct symbol type.
+type Symbol string
+
+// Now returns the current time with millisecond precision. MongoDB stores
+// timestamps with the same precision, so a Time returned from this method
+// will not change after a roundtrip to the database. That's the only reason
+// why this function exists. Using the time.Now function also works fine
+// otherwise.
+func Now() time.Time {
+	return time.Unix(0, time.Now().UnixNano()/1e6*1e6)
+}
+
+// MongoTimestamp is a special internal type used by MongoDB that for some
+// strange reason has its own datatype defined in BSON.
+type MongoTimestamp int64
+
+type orderKey int64
+
+// MaxKey is a special value that compares higher than all other possible BSON
+// values in a MongoDB database.
+var MaxKey = orderKey(1<<63 - 1)
+
+// MinKey is a special value that compares lower than all other possible BSON
+// values in a MongoDB database.
+var MinKey = orderKey(-1 << 63)
+
+type undefined struct{}
+
+// Undefined represents the undefined BSON value.
+var Undefined undefined
+
+// Binary is a representation for non-standard binary values.  Any kind should
+// work, but the following are known as of this writing:
+//
+//   0x00 - Generic. This is decoded as []byte(data), not Binary{0x00, data}.
+//   0x01 - Function (!?)
+//   0x02 - Obsolete generic.
+//   0x03 - UUID
+//   0x05 - MD5
+//   0x80 - User defined.
+//
+type Binary struct {
+	Kind byte
+	Data []byte
+}
+
+// RegEx represents a regular expression.  The Options field may contain
+// individual characters defining the way in which the pattern should be
+// applied, and must be sorted. Valid options as of this writing are 'i' for
+// case insensitive matching, 'm' for multi-line matching, 'x' for verbose
+// mode, 'l' to make \w, \W, and similar be locale-dependent, 's' for dot-all
+// mode (a '.' matches everything), and 'u' to make \w, \W, and similar match
+// unicode. The value of the Options parameter is not verified before being
+// marshaled into the BSON format.
+type RegEx struct {
+	Pattern string
+	Options string
+}
+
+// JavaScript is a type that holds JavaScript code. If Scope is non-nil, it
+// will be marshaled as a mapping from identifiers to values that may be
+// used when evaluating the provided Code.
+type JavaScript struct {
+	Code  string
+	Scope interface{}
+}
+
+// DBPointer refers to a document id in a namespace.
+//
+// This type is deprecated in the BSON specification and should not be used
+// except for backwards compatibility with ancient applications.
+type DBPointer struct {
+	Namespace string
+	Id        ObjectId
+}
+
+const initialBufferSize = 64
+
+func handleErr(err *error) {
+	if r := recover(); r != nil {
+		if _, ok := r.(runtime.Error); ok {
+			panic(r)
+		} else if _, ok := r.(externalPanic); ok {
+			panic(r)
+		} else if s, ok := r.(string); ok {
+			*err = errors.New(s)
+		} else if e, ok := r.(error); ok {
+			*err = e
+		} else {
+			panic(r)
+		}
+	}
+}
+
+// Marshal serializes the in value, which may be a map or a struct value.
+// In the case of struct values, only exported fields will be serialized,
+// and the order of serialized fields will match that of the struct itself.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty  Only include the field if it's not set to the zero
+//                value for the type or to empty slices or maps.
+//
+//     minsize    Marshal an int64 value as an int32, if that's feasible
+//                while preserving the numeric value.
+//
+//     inline     Inline the field, which must be a struct or a map,
+//                causing all of its fields or keys to be processed as if
+//                they were part of the outer struct. For maps, keys must
+//                not conflict with the bson keys of other struct fields.
+//
+// Some examples:
+//
+//     type T struct {
+//         A bool
+//         B int    "myb"
+//         C string "myc,omitempty"
+//         D string `bson:",omitempty" json:"jsonkey"`
+//         E int64  ",minsize"
+//         F int64  "myf,omitempty,minsize"
+//     }
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := &encoder{make([]byte, 0, initialBufferSize)}
+	e.addDoc(reflect.ValueOf(in))
+	return e.out, nil
+}
+
+// Unmarshal deserializes data from in into the out value.  The out value
+// must be a map, a pointer to a struct, or a pointer to a bson.D value.
+// In the case of struct values, only exported fields will be deserialized.
+// The lowercased field name is used as the key for each exported field,
+// but this behavior may be changed using the respective field tag.
+// The tag may also contain flags to tweak the marshalling behavior for
+// the field. The tag formats accepted are:
+//
+//     "[<key>][,<flag1>[,<flag2>]]"
+//
+//     `(...) bson:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported during unmarshal (see the
+// Marshal method for other flags):
+//
+//     inline     Inline the field, which must be a struct or a map.
+//                Inlined structs are handled as if its fields were part
+//                of the outer struct. An inlined map causes keys that do
+//                not match any other struct field to be inserted in the
+//                map rather than being discarded as usual.
+//
+// The target field or element types of out may not necessarily match
+// the BSON values of the provided data.  The following conversions are
+// made automatically:
+//
+// - Numeric types are converted if at least the integer part of the
+//   value would be preserved correctly
+// - Bools are converted to numeric types as 1 or 0
+// - Numeric types are converted to bools as true if not 0 or false otherwise
+// - Binary and string BSON data is converted to a string, array or byte slice
+//
+// If the value would not fit the type and cannot be converted, it's
+// silently skipped.
+//
+// Pointer values are initialized when necessary.
+func Unmarshal(in []byte, out interface{}) (err error) {
+	if raw, ok := out.(*Raw); ok {
+		raw.Kind = 3
+		raw.Data = in
+		return nil
+	}
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(in)
+		d.readDocTo(v)
+	case reflect.Struct:
+		return errors.New("Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Unmarshal needs a map or a pointer to a struct.")
+	}
+	return nil
+}
+
+// Unmarshal deserializes raw into the out value.  If the out value type
+// is not compatible with raw, a *bson.TypeError is returned.
+//
+// See the Unmarshal function documentation for more details on the
+// unmarshalling process.
+func (raw Raw) Unmarshal(out interface{}) (err error) {
+	defer handleErr(&err)
+	v := reflect.ValueOf(out)
+	switch v.Kind() {
+	case reflect.Ptr:
+		v = v.Elem()
+		fallthrough
+	case reflect.Map:
+		d := newDecoder(raw.Data)
+		good := d.readElemTo(v, raw.Kind)
+		if !good {
+			return &TypeError{v.Type(), raw.Kind}
+		}
+	case reflect.Struct:
+		return errors.New("Raw Unmarshal can't deal with struct values. Use a pointer.")
+	default:
+		return errors.New("Raw Unmarshal needs a map or a valid pointer.")
+	}
+	return nil
+}
+
+type TypeError struct {
+	Type reflect.Type
+	Kind byte
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("BSON kind 0x%02x isn't compatible with type %s", e.Kind, e.Type.String())
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+	InlineMap  int
+	Zero       reflect.Value
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	MinSize   bool
+	Inline    []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var structMapMutex sync.RWMutex
+
+type externalPanic string
+
+func (e externalPanic) String() string {
+	return string(e)
+}
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	structMapMutex.RLock()
+	sinfo, found := structMap[st]
+	structMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("bson")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "minsize":
+					info.MinSize = true
+				case "inline":
+					inline = true
+				default:
+					msg := fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)
+					panic(externalPanic(msg))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				panic("Option ,inline needs a struct value or map field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+	sinfo = &structInfo{
+		fieldsMap,
+		fieldsList,
+		inlineMap,
+		reflect.New(st).Elem(),
+	}
+	structMapMutex.Lock()
+	structMap[st] = sinfo
+	structMapMutex.Unlock()
+	return sinfo, nil
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/decimal.go b/switchq/vendor/gopkg.in/mgo.v2/bson/decimal.go
new file mode 100644
index 0000000..3d2f700
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/decimal.go
@@ -0,0 +1,310 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package bson
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+// Decimal128 holds decimal128 BSON values.
+type Decimal128 struct {
+	h, l uint64
+}
+
+func (d Decimal128) String() string {
+	var pos int     // positive sign
+	var e int       // exponent
+	var h, l uint64 // significand high/low
+
+	if d.h>>63&1 == 0 {
+		pos = 1
+	}
+
+	switch d.h >> 58 & (1<<5 - 1) {
+	case 0x1F:
+		return "NaN"
+	case 0x1E:
+		return "-Inf"[pos:]
+	}
+
+	l = d.l
+	if d.h>>61&3 == 3 {
+		// Bits: 1*sign 2*ignored 14*exponent 111*significand.
+		// Implicit 0b100 prefix in significand.
+		e = int(d.h>>47&(1<<14-1)) - 6176
+		//h = 4<<47 | d.h&(1<<47-1)
+		// Spec says all of these values are out of range.
+		h, l = 0, 0
+	} else {
+		// Bits: 1*sign 14*exponent 113*significand
+		e = int(d.h>>49&(1<<14-1)) - 6176
+		h = d.h & (1<<49 - 1)
+	}
+
+	// Would be handled by the logic below, but that's trivial and common.
+	if h == 0 && l == 0 && e == 0 {
+		return "-0"[pos:]
+	}
+
+	var repr [48]byte // Loop 5 times over 9 digits plus dot, negative sign, and leading zero.
+	var last = len(repr)
+	var i = len(repr)
+	var dot = len(repr) + e
+	var rem uint32
+Loop:
+	for d9 := 0; d9 < 5; d9++ {
+		h, l, rem = divmod(h, l, 1e9)
+		for d1 := 0; d1 < 9; d1++ {
+			// Handle "-0.0", "0.00123400", "-1.00E-6", "1.050E+3", etc.
+			if i < len(repr) && (dot == i || l == 0 && h == 0 && rem > 0 && rem < 10 && (dot < i-6 || e > 0)) {
+				e += len(repr) - i
+				i--
+				repr[i] = '.'
+				last = i - 1
+				dot = len(repr) // Unmark.
+			}
+			c := '0' + byte(rem%10)
+			rem /= 10
+			i--
+			repr[i] = c
+			// Handle "0E+3", "1E+3", etc.
+			if l == 0 && h == 0 && rem == 0 && i == len(repr)-1 && (dot < i-5 || e > 0) {
+				last = i
+				break Loop
+			}
+			if c != '0' {
+				last = i
+			}
+			// Break early. Works without it, but why.
+			if dot > i && l == 0 && h == 0 && rem == 0 {
+				break Loop
+			}
+		}
+	}
+	repr[last-1] = '-'
+	last--
+
+	if e > 0 {
+		return string(repr[last+pos:]) + "E+" + strconv.Itoa(e)
+	}
+	if e < 0 {
+		return string(repr[last+pos:]) + "E" + strconv.Itoa(e)
+	}
+	return string(repr[last+pos:])
+}
+
+func divmod(h, l uint64, div uint32) (qh, ql uint64, rem uint32) {
+	div64 := uint64(div)
+	a := h >> 32
+	aq := a / div64
+	ar := a % div64
+	b := ar<<32 + h&(1<<32-1)
+	bq := b / div64
+	br := b % div64
+	c := br<<32 + l>>32
+	cq := c / div64
+	cr := c % div64
+	d := cr<<32 + l&(1<<32-1)
+	dq := d / div64
+	dr := d % div64
+	return (aq<<32 | bq), (cq<<32 | dq), uint32(dr)
+}
+
+var dNaN = Decimal128{0x1F << 58, 0}
+var dPosInf = Decimal128{0x1E << 58, 0}
+var dNegInf = Decimal128{0x3E << 58, 0}
+
+func dErr(s string) (Decimal128, error) {
+	return dNaN, fmt.Errorf("cannot parse %q as a decimal128", s)
+}
+
+func ParseDecimal128(s string) (Decimal128, error) {
+	orig := s
+	if s == "" {
+		return dErr(orig)
+	}
+	neg := s[0] == '-'
+	if neg || s[0] == '+' {
+		s = s[1:]
+	}
+
+	if (len(s) == 3 || len(s) == 8) && (s[0] == 'N' || s[0] == 'n' || s[0] == 'I' || s[0] == 'i') {
+		if s == "NaN" || s == "nan" || strings.EqualFold(s, "nan") {
+			return dNaN, nil
+		}
+		if s == "Inf" || s == "inf" || strings.EqualFold(s, "inf") || strings.EqualFold(s, "infinity") {
+			if neg {
+				return dNegInf, nil
+			}
+			return dPosInf, nil
+		}
+		return dErr(orig)
+	}
+
+	var h, l uint64
+	var e int
+
+	var add, ovr uint32
+	var mul uint32 = 1
+	var dot = -1
+	var digits = 0
+	var i = 0
+	for i < len(s) {
+		c := s[i]
+		if mul == 1e9 {
+			h, l, ovr = muladd(h, l, mul, add)
+			mul, add = 1, 0
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if c >= '0' && c <= '9' {
+			i++
+			if c > '0' || digits > 0 {
+				digits++
+			}
+			if digits > 34 {
+				if c == '0' {
+					// Exact rounding.
+					e++
+					continue
+				}
+				return dErr(orig)
+			}
+			mul *= 10
+			add *= 10
+			add += uint32(c - '0')
+			continue
+		}
+		if c == '.' {
+			i++
+			if dot >= 0 || i == 1 && len(s) == 1 {
+				return dErr(orig)
+			}
+			if i == len(s) {
+				break
+			}
+			if s[i] < '0' || s[i] > '9' || e > 0 {
+				return dErr(orig)
+			}
+			dot = i
+			continue
+		}
+		break
+	}
+	if i == 0 {
+		return dErr(orig)
+	}
+	if mul > 1 {
+		h, l, ovr = muladd(h, l, mul, add)
+		if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+			return dErr(orig)
+		}
+	}
+	if dot >= 0 {
+		e += dot - i
+	}
+	if i+1 < len(s) && (s[i] == 'E' || s[i] == 'e') {
+		i++
+		eneg := s[i] == '-'
+		if eneg || s[i] == '+' {
+			i++
+			if i == len(s) {
+				return dErr(orig)
+			}
+		}
+		n := 0
+		for i < len(s) && n < 1e4 {
+			c := s[i]
+			i++
+			if c < '0' || c > '9' {
+				return dErr(orig)
+			}
+			n *= 10
+			n += int(c - '0')
+		}
+		if eneg {
+			n = -n
+		}
+		e += n
+		for e < -6176 {
+			// Subnormal.
+			var div uint32 = 1
+			for div < 1e9 && e < -6176 {
+				div *= 10
+				e++
+			}
+			var rem uint32
+			h, l, rem = divmod(h, l, div)
+			if rem > 0 {
+				return dErr(orig)
+			}
+		}
+		for e > 6111 {
+			// Clamped.
+			var mul uint32 = 1
+			for mul < 1e9 && e > 6111 {
+				mul *= 10
+				e--
+			}
+			h, l, ovr = muladd(h, l, mul, 0)
+			if ovr > 0 || h&((1<<15-1)<<49) > 0 {
+				return dErr(orig)
+			}
+		}
+		if e < -6176 || e > 6111 {
+			return dErr(orig)
+		}
+	}
+
+	if i < len(s) {
+		return dErr(orig)
+	}
+
+	h |= uint64(e+6176) & uint64(1<<14-1) << 49
+	if neg {
+		h |= 1 << 63
+	}
+	return Decimal128{h, l}, nil
+}
+
+func muladd(h, l uint64, mul uint32, add uint32) (resh, resl uint64, overflow uint32) {
+	mul64 := uint64(mul)
+	a := mul64 * (l & (1<<32 - 1))
+	b := a>>32 + mul64*(l>>32)
+	c := b>>32 + mul64*(h&(1<<32-1))
+	d := c>>32 + mul64*(h>>32)
+
+	a = a&(1<<32-1) + uint64(add)
+	b = b&(1<<32-1) + a>>32
+	c = c&(1<<32-1) + b>>32
+	d = d&(1<<32-1) + c>>32
+
+	return (d<<32 | c&(1<<32-1)), (b<<32 | a&(1<<32-1)), uint32(d >> 32)
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/decode.go b/switchq/vendor/gopkg.in/mgo.v2/bson/decode.go
new file mode 100644
index 0000000..7c2d841
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/decode.go
@@ -0,0 +1,849 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+)
+
+type decoder struct {
+	in      []byte
+	i       int
+	docType reflect.Type
+}
+
+var typeM = reflect.TypeOf(M{})
+
+func newDecoder(in []byte) *decoder {
+	return &decoder{in, 0, typeM}
+}
+
+// --------------------------------------------------------------------------
+// Some helper functions.
+
+func corrupted() {
+	panic("Document is corrupted")
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of documents.
+
+const (
+	setterUnknown = iota
+	setterNone
+	setterType
+	setterAddr
+)
+
+var setterStyles map[reflect.Type]int
+var setterIface reflect.Type
+var setterMutex sync.RWMutex
+
+func init() {
+	var iface Setter
+	setterIface = reflect.TypeOf(&iface).Elem()
+	setterStyles = make(map[reflect.Type]int)
+}
+
+func setterStyle(outt reflect.Type) int {
+	setterMutex.RLock()
+	style := setterStyles[outt]
+	setterMutex.RUnlock()
+	if style == setterUnknown {
+		setterMutex.Lock()
+		defer setterMutex.Unlock()
+		if outt.Implements(setterIface) {
+			setterStyles[outt] = setterType
+		} else if reflect.PtrTo(outt).Implements(setterIface) {
+			setterStyles[outt] = setterAddr
+		} else {
+			setterStyles[outt] = setterNone
+		}
+		style = setterStyles[outt]
+	}
+	return style
+}
+
+func getSetter(outt reflect.Type, out reflect.Value) Setter {
+	style := setterStyle(outt)
+	if style == setterNone {
+		return nil
+	}
+	if style == setterAddr {
+		if !out.CanAddr() {
+			return nil
+		}
+		out = out.Addr()
+	} else if outt.Kind() == reflect.Ptr && out.IsNil() {
+		out.Set(reflect.New(outt.Elem()))
+	}
+	return out.Interface().(Setter)
+}
+
+func clearMap(m reflect.Value) {
+	var none reflect.Value
+	for _, k := range m.MapKeys() {
+		m.SetMapIndex(k, none)
+	}
+}
+
+func (d *decoder) readDocTo(out reflect.Value) {
+	var elemType reflect.Type
+	outt := out.Type()
+	outk := outt.Kind()
+
+	for {
+		if outk == reflect.Ptr && out.IsNil() {
+			out.Set(reflect.New(outt.Elem()))
+		}
+		if setter := getSetter(outt, out); setter != nil {
+			var raw Raw
+			d.readDocTo(reflect.ValueOf(&raw))
+			err := setter.SetBSON(raw)
+			if _, ok := err.(*TypeError); err != nil && !ok {
+				panic(err)
+			}
+			return
+		}
+		if outk == reflect.Ptr {
+			out = out.Elem()
+			outt = out.Type()
+			outk = out.Kind()
+			continue
+		}
+		break
+	}
+
+	var fieldsMap map[string]fieldInfo
+	var inlineMap reflect.Value
+	start := d.i
+
+	origout := out
+	if outk == reflect.Interface {
+		if d.docType.Kind() == reflect.Map {
+			mv := reflect.MakeMap(d.docType)
+			out.Set(mv)
+			out = mv
+		} else {
+			dv := reflect.New(d.docType).Elem()
+			out.Set(dv)
+			out = dv
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	docType := d.docType
+	keyType := typeString
+	convertKey := false
+	switch outk {
+	case reflect.Map:
+		keyType = outt.Key()
+		if keyType.Kind() != reflect.String {
+			panic("BSON map must have string keys. Got: " + outt.String())
+		}
+		if keyType != typeString {
+			convertKey = true
+		}
+		elemType = outt.Elem()
+		if elemType == typeIface {
+			d.docType = outt
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(out.Type()))
+		} else if out.Len() > 0 {
+			clearMap(out)
+		}
+	case reflect.Struct:
+		if outt != typeRaw {
+			sinfo, err := getStructInfo(out.Type())
+			if err != nil {
+				panic(err)
+			}
+			fieldsMap = sinfo.FieldsMap
+			out.Set(sinfo.Zero)
+			if sinfo.InlineMap != -1 {
+				inlineMap = out.Field(sinfo.InlineMap)
+				if !inlineMap.IsNil() && inlineMap.Len() > 0 {
+					clearMap(inlineMap)
+				}
+				elemType = inlineMap.Type().Elem()
+				if elemType == typeIface {
+					d.docType = inlineMap.Type()
+				}
+			}
+		}
+	case reflect.Slice:
+		switch outt.Elem() {
+		case typeDocElem:
+			origout.Set(d.readDocElems(outt))
+			return
+		case typeRawDocElem:
+			origout.Set(d.readRawDocElems(outt))
+			return
+		}
+		fallthrough
+	default:
+		panic("Unsupported document type for unmarshalling: " + out.Type().String())
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+
+		switch outk {
+		case reflect.Map:
+			e := reflect.New(elemType).Elem()
+			if d.readElemTo(e, kind) {
+				k := reflect.ValueOf(name)
+				if convertKey {
+					k = k.Convert(keyType)
+				}
+				out.SetMapIndex(k, e)
+			}
+		case reflect.Struct:
+			if outt == typeRaw {
+				d.dropElem(kind)
+			} else {
+				if info, ok := fieldsMap[name]; ok {
+					if info.Inline == nil {
+						d.readElemTo(out.Field(info.Num), kind)
+					} else {
+						d.readElemTo(out.FieldByIndex(info.Inline), kind)
+					}
+				} else if inlineMap.IsValid() {
+					if inlineMap.IsNil() {
+						inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+					}
+					e := reflect.New(elemType).Elem()
+					if d.readElemTo(e, kind) {
+						inlineMap.SetMapIndex(reflect.ValueOf(name), e)
+					}
+				} else {
+					d.dropElem(kind)
+				}
+			}
+		case reflect.Slice:
+		}
+
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+	d.docType = docType
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{0x03, d.in[start:d.i]}))
+	}
+}
+
+func (d *decoder) readArrayDocTo(out reflect.Value) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	i := 0
+	l := out.Len()
+	for d.in[d.i] != '\x00' {
+		if i >= l {
+			panic("Length mismatch on array field")
+		}
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		d.readElemTo(out.Index(i), kind)
+		if d.i >= end {
+			corrupted()
+		}
+		i++
+	}
+	if i != l {
+		panic("Length mismatch on array field")
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+func (d *decoder) readSliceDoc(t reflect.Type) interface{} {
+	tmp := make([]reflect.Value, 0, 8)
+	elemType := t.Elem()
+	if elemType == typeRawDocElem {
+		d.dropElem(0x04)
+		return reflect.Zero(t).Interface()
+	}
+
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		for d.i < end && d.in[d.i] != '\x00' {
+			d.i++
+		}
+		if d.i >= end {
+			corrupted()
+		}
+		d.i++
+		e := reflect.New(elemType).Elem()
+		if d.readElemTo(e, kind) {
+			tmp = append(tmp, e)
+		}
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+
+	n := len(tmp)
+	slice := reflect.MakeSlice(t, n, n)
+	for i := 0; i != n; i++ {
+		slice.Index(i).Set(tmp[i])
+	}
+	return slice.Interface()
+}
+
+var typeSlice = reflect.TypeOf([]interface{}{})
+var typeIface = typeSlice.Elem()
+
+func (d *decoder) readDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]DocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := DocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readRawDocElems(typ reflect.Type) reflect.Value {
+	docType := d.docType
+	d.docType = typ
+	slice := make([]RawDocElem, 0, 8)
+	d.readDocWith(func(kind byte, name string) {
+		e := RawDocElem{Name: name}
+		v := reflect.ValueOf(&e.Value)
+		if d.readElemTo(v.Elem(), kind) {
+			slice = append(slice, e)
+		}
+	})
+	slicev := reflect.New(typ).Elem()
+	slicev.Set(reflect.ValueOf(slice))
+	d.docType = docType
+	return slicev
+}
+
+func (d *decoder) readDocWith(f func(kind byte, name string)) {
+	end := int(d.readInt32())
+	end += d.i - 4
+	if end <= d.i || end > len(d.in) || d.in[end-1] != '\x00' {
+		corrupted()
+	}
+	for d.in[d.i] != '\x00' {
+		kind := d.readByte()
+		name := d.readCStr()
+		if d.i >= end {
+			corrupted()
+		}
+		f(kind, name)
+		if d.i >= end {
+			corrupted()
+		}
+	}
+	d.i++ // '\x00'
+	if d.i != end {
+		corrupted()
+	}
+}
+
+// --------------------------------------------------------------------------
+// Unmarshaling of individual elements within a document.
+
+var blackHole = settableValueOf(struct{}{})
+
+func (d *decoder) dropElem(kind byte) {
+	d.readElemTo(blackHole, kind)
+}
+
+// Attempt to decode an element from the document and put it into out.
+// If the types are not compatible, the returned ok value will be
+// false and out will be unchanged.
+func (d *decoder) readElemTo(out reflect.Value, kind byte) (good bool) {
+
+	start := d.i
+
+	if kind == 0x03 {
+		// Delegate unmarshaling of documents.
+		outt := out.Type()
+		outk := out.Kind()
+		switch outk {
+		case reflect.Interface, reflect.Ptr, reflect.Struct, reflect.Map:
+			d.readDocTo(out)
+			return true
+		}
+		if setterStyle(outt) != setterNone {
+			d.readDocTo(out)
+			return true
+		}
+		if outk == reflect.Slice {
+			switch outt.Elem() {
+			case typeDocElem:
+				out.Set(d.readDocElems(outt))
+			case typeRawDocElem:
+				out.Set(d.readRawDocElems(outt))
+			default:
+				d.readDocTo(blackHole)
+			}
+			return true
+		}
+		d.readDocTo(blackHole)
+		return true
+	}
+
+	var in interface{}
+
+	switch kind {
+	case 0x01: // Float64
+		in = d.readFloat64()
+	case 0x02: // UTF-8 string
+		in = d.readStr()
+	case 0x03: // Document
+		panic("Can't happen. Handled above.")
+	case 0x04: // Array
+		outt := out.Type()
+		if setterStyle(outt) != setterNone {
+			// Skip the value so its data is handed to the setter below.
+			d.dropElem(kind)
+			break
+		}
+		for outt.Kind() == reflect.Ptr {
+			outt = outt.Elem()
+		}
+		switch outt.Kind() {
+		case reflect.Array:
+			d.readArrayDocTo(out)
+			return true
+		case reflect.Slice:
+			in = d.readSliceDoc(outt)
+		default:
+			in = d.readSliceDoc(typeSlice)
+		}
+	case 0x05: // Binary
+		b := d.readBinary()
+		if b.Kind == 0x00 || b.Kind == 0x02 {
+			in = b.Data
+		} else {
+			in = b
+		}
+	case 0x06: // Undefined (obsolete, but still seen in the wild)
+		in = Undefined
+	case 0x07: // ObjectId
+		in = ObjectId(d.readBytes(12))
+	case 0x08: // Bool
+		in = d.readBool()
+	case 0x09: // Timestamp
+		// MongoDB handles timestamps as milliseconds.
+		i := d.readInt64()
+		if i == -62135596800000 {
+			in = time.Time{} // In UTC for convenience.
+		} else {
+			in = time.Unix(i/1e3, i%1e3*1e6)
+		}
+	case 0x0A: // Nil
+		in = nil
+	case 0x0B: // RegEx
+		in = d.readRegEx()
+	case 0x0C:
+		in = DBPointer{Namespace: d.readStr(), Id: ObjectId(d.readBytes(12))}
+	case 0x0D: // JavaScript without scope
+		in = JavaScript{Code: d.readStr()}
+	case 0x0E: // Symbol
+		in = Symbol(d.readStr())
+	case 0x0F: // JavaScript with scope
+		d.i += 4 // Skip length
+		js := JavaScript{d.readStr(), make(M)}
+		d.readDocTo(reflect.ValueOf(js.Scope))
+		in = js
+	case 0x10: // Int32
+		in = int(d.readInt32())
+	case 0x11: // Mongo-specific timestamp
+		in = MongoTimestamp(d.readInt64())
+	case 0x12: // Int64
+		in = d.readInt64()
+	case 0x13: // Decimal128
+		in = Decimal128{
+			l: uint64(d.readInt64()),
+			h: uint64(d.readInt64()),
+		}
+	case 0x7F: // Max key
+		in = MaxKey
+	case 0xFF: // Min key
+		in = MinKey
+	default:
+		panic(fmt.Sprintf("Unknown element kind (0x%02X)", kind))
+	}
+
+	outt := out.Type()
+
+	if outt == typeRaw {
+		out.Set(reflect.ValueOf(Raw{kind, d.in[start:d.i]}))
+		return true
+	}
+
+	if setter := getSetter(outt, out); setter != nil {
+		err := setter.SetBSON(Raw{kind, d.in[start:d.i]})
+		if err == SetZero {
+			out.Set(reflect.Zero(outt))
+			return true
+		}
+		if err == nil {
+			return true
+		}
+		if _, ok := err.(*TypeError); !ok {
+			panic(err)
+		}
+		return false
+	}
+
+	if in == nil {
+		out.Set(reflect.Zero(outt))
+		return true
+	}
+
+	outk := outt.Kind()
+
+	// Dereference and initialize pointer if necessary.
+	first := true
+	for outk == reflect.Ptr {
+		if !out.IsNil() {
+			out = out.Elem()
+		} else {
+			elem := reflect.New(outt.Elem())
+			if first {
+				// Only set if value is compatible.
+				first = false
+				defer func(out, elem reflect.Value) {
+					if good {
+						out.Set(elem)
+					}
+				}(out, elem)
+			} else {
+				out.Set(elem)
+			}
+			out = elem
+		}
+		outt = out.Type()
+		outk = outt.Kind()
+	}
+
+	inv := reflect.ValueOf(in)
+	if outt == inv.Type() {
+		out.Set(inv)
+		return true
+	}
+
+	switch outk {
+	case reflect.Interface:
+		out.Set(inv)
+		return true
+	case reflect.String:
+		switch inv.Kind() {
+		case reflect.String:
+			out.SetString(inv.String())
+			return true
+		case reflect.Slice:
+			if b, ok := in.([]byte); ok {
+				out.SetString(string(b))
+				return true
+			}
+		case reflect.Int, reflect.Int64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatInt(inv.Int(), 10))
+				return true
+			}
+		case reflect.Float64:
+			if outt == typeJSONNumber {
+				out.SetString(strconv.FormatFloat(inv.Float(), 'f', -1, 64))
+				return true
+			}
+		}
+	case reflect.Slice, reflect.Array:
+		// Remember, array (0x04) slices are built with the correct
+		// element type.  If we are here, must be a cross BSON kind
+		// conversion (e.g. 0x05 unmarshalling on string).
+		if outt.Elem().Kind() != reflect.Uint8 {
+			break
+		}
+		switch inv.Kind() {
+		case reflect.String:
+			slice := []byte(inv.String())
+			out.Set(reflect.ValueOf(slice))
+			return true
+		case reflect.Slice:
+			switch outt.Kind() {
+			case reflect.Array:
+				reflect.Copy(out, inv)
+			case reflect.Slice:
+				out.SetBytes(inv.Bytes())
+			}
+			return true
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetInt(inv.Int())
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetInt(int64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetInt(1)
+			} else {
+				out.SetInt(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("can't happen: no uint types in BSON (!?)")
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch inv.Kind() {
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetUint(uint64(inv.Int()))
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetUint(uint64(inv.Float()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetUint(1)
+			} else {
+				out.SetUint(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON.")
+		}
+	case reflect.Float32, reflect.Float64:
+		switch inv.Kind() {
+		case reflect.Float32, reflect.Float64:
+			out.SetFloat(inv.Float())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetFloat(float64(inv.Int()))
+			return true
+		case reflect.Bool:
+			if inv.Bool() {
+				out.SetFloat(1)
+			} else {
+				out.SetFloat(0)
+			}
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Bool:
+		switch inv.Kind() {
+		case reflect.Bool:
+			out.SetBool(inv.Bool())
+			return true
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			out.SetBool(inv.Int() != 0)
+			return true
+		case reflect.Float32, reflect.Float64:
+			out.SetBool(inv.Float() != 0)
+			return true
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			panic("Can't happen. No uint types in BSON?")
+		}
+	case reflect.Struct:
+		if outt == typeURL && inv.Kind() == reflect.String {
+			u, err := url.Parse(inv.String())
+			if err != nil {
+				panic(err)
+			}
+			out.Set(reflect.ValueOf(u).Elem())
+			return true
+		}
+		if outt == typeBinary {
+			if b, ok := in.([]byte); ok {
+				out.Set(reflect.ValueOf(Binary{Data: b}))
+				return true
+			}
+		}
+	}
+
+	return false
+}
+
+// --------------------------------------------------------------------------
+// Parsers of basic types.
+
+func (d *decoder) readRegEx() RegEx {
+	re := RegEx{}
+	re.Pattern = d.readCStr()
+	re.Options = d.readCStr()
+	return re
+}
+
+func (d *decoder) readBinary() Binary {
+	l := d.readInt32()
+	b := Binary{}
+	b.Kind = d.readByte()
+	b.Data = d.readBytes(l)
+	if b.Kind == 0x02 && len(b.Data) >= 4 {
+		// Weird obsolete format with redundant length.
+		b.Data = b.Data[4:]
+	}
+	return b
+}
+
+func (d *decoder) readStr() string {
+	l := d.readInt32()
+	b := d.readBytes(l - 1)
+	if d.readByte() != '\x00' {
+		corrupted()
+	}
+	return string(b)
+}
+
+func (d *decoder) readCStr() string {
+	start := d.i
+	end := start
+	l := len(d.in)
+	for ; end != l; end++ {
+		if d.in[end] == '\x00' {
+			break
+		}
+	}
+	d.i = end + 1
+	if d.i > l {
+		corrupted()
+	}
+	return string(d.in[start:end])
+}
+
+func (d *decoder) readBool() bool {
+	b := d.readByte()
+	if b == 0 {
+		return false
+	}
+	if b == 1 {
+		return true
+	}
+	panic(fmt.Sprintf("encoded boolean must be 1 or 0, found %d", b))
+}
+
+func (d *decoder) readFloat64() float64 {
+	return math.Float64frombits(uint64(d.readInt64()))
+}
+
+func (d *decoder) readInt32() int32 {
+	b := d.readBytes(4)
+	return int32((uint32(b[0]) << 0) |
+		(uint32(b[1]) << 8) |
+		(uint32(b[2]) << 16) |
+		(uint32(b[3]) << 24))
+}
+
+func (d *decoder) readInt64() int64 {
+	b := d.readBytes(8)
+	return int64((uint64(b[0]) << 0) |
+		(uint64(b[1]) << 8) |
+		(uint64(b[2]) << 16) |
+		(uint64(b[3]) << 24) |
+		(uint64(b[4]) << 32) |
+		(uint64(b[5]) << 40) |
+		(uint64(b[6]) << 48) |
+		(uint64(b[7]) << 56))
+}
+
+func (d *decoder) readByte() byte {
+	i := d.i
+	d.i++
+	if d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[i]
+}
+
+func (d *decoder) readBytes(length int32) []byte {
+	if length < 0 {
+		corrupted()
+	}
+	start := d.i
+	d.i += int(length)
+	if d.i < start || d.i > len(d.in) {
+		corrupted()
+	}
+	return d.in[start : start+int(length)]
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/encode.go b/switchq/vendor/gopkg.in/mgo.v2/bson/encode.go
new file mode 100644
index 0000000..add39e8
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/encode.go
@@ -0,0 +1,514 @@
+// BSON library for Go
+//
+// Copyright (c) 2010-2012 - Gustavo Niemeyer <gustavo@niemeyer.net>
+//
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice, this
+//    list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright notice,
+//    this list of conditions and the following disclaimer in the documentation
+//    and/or other materials provided with the distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// gobson - BSON library for Go.
+
+package bson
+
+import (
+	"encoding/json"
+	"fmt"
+	"math"
+	"net/url"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+// --------------------------------------------------------------------------
+// Some internal infrastructure.
+
+var (
+	typeBinary         = reflect.TypeOf(Binary{})
+	typeObjectId       = reflect.TypeOf(ObjectId(""))
+	typeDBPointer      = reflect.TypeOf(DBPointer{"", ObjectId("")})
+	typeSymbol         = reflect.TypeOf(Symbol(""))
+	typeMongoTimestamp = reflect.TypeOf(MongoTimestamp(0))
+	typeOrderKey       = reflect.TypeOf(MinKey)
+	typeDocElem        = reflect.TypeOf(DocElem{})
+	typeRawDocElem     = reflect.TypeOf(RawDocElem{})
+	typeRaw            = reflect.TypeOf(Raw{})
+	typeURL            = reflect.TypeOf(url.URL{})
+	typeTime           = reflect.TypeOf(time.Time{})
+	typeString         = reflect.TypeOf("")
+	typeJSONNumber     = reflect.TypeOf(json.Number(""))
+)
+
+const itoaCacheSize = 32
+
+var itoaCache []string
+
+func init() {
+	itoaCache = make([]string, itoaCacheSize)
+	for i := 0; i != itoaCacheSize; i++ {
+		itoaCache[i] = strconv.Itoa(i)
+	}
+}
+
+func itoa(i int) string {
+	if i < itoaCacheSize {
+		return itoaCache[i]
+	}
+	return strconv.Itoa(i)
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of the document value itself.
+
+type encoder struct {
+	out []byte
+}
+
+func (e *encoder) addDoc(v reflect.Value) {
+	for {
+		if vi, ok := v.Interface().(Getter); ok {
+			getv, err := vi.GetBSON()
+			if err != nil {
+				panic(err)
+			}
+			v = reflect.ValueOf(getv)
+			continue
+		}
+		if v.Kind() == reflect.Ptr {
+			v = v.Elem()
+			continue
+		}
+		break
+	}
+
+	if v.Type() == typeRaw {
+		raw := v.Interface().(Raw)
+		if raw.Kind != 0x03 && raw.Kind != 0x00 {
+			panic("Attempted to marshal Raw kind " + strconv.Itoa(int(raw.Kind)) + " as a document")
+		}
+		if len(raw.Data) == 0 {
+			panic("Attempted to marshal empty Raw document")
+		}
+		e.addBytes(raw.Data...)
+		return
+	}
+
+	start := e.reserveInt32()
+
+	switch v.Kind() {
+	case reflect.Map:
+		e.addMap(v)
+	case reflect.Struct:
+		e.addStruct(v)
+	case reflect.Array, reflect.Slice:
+		e.addSlice(v)
+	default:
+		panic("Can't marshal " + v.Type().String() + " as a BSON document")
+	}
+
+	e.addBytes(0)
+	e.setInt32(start, int32(len(e.out)-start))
+}
+
+func (e *encoder) addMap(v reflect.Value) {
+	for _, k := range v.MapKeys() {
+		e.addElem(k.String(), v.MapIndex(k), false)
+	}
+}
+
+func (e *encoder) addStruct(v reflect.Value) {
+	sinfo, err := getStructInfo(v.Type())
+	if err != nil {
+		panic(err)
+	}
+	var value reflect.Value
+	if sinfo.InlineMap >= 0 {
+		m := v.Field(sinfo.InlineMap)
+		if m.Len() > 0 {
+			for _, k := range m.MapKeys() {
+				ks := k.String()
+				if _, found := sinfo.FieldsMap[ks]; found {
+					panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", ks))
+				}
+				e.addElem(ks, m.MapIndex(k), false)
+			}
+		}
+	}
+	for _, info := range sinfo.FieldsList {
+		if info.Inline == nil {
+			value = v.Field(info.Num)
+		} else {
+			value = v.FieldByIndex(info.Inline)
+		}
+		if info.OmitEmpty && isZero(value) {
+			continue
+		}
+		e.addElem(info.Key, value, info.MinSize)
+	}
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Ptr, reflect.Interface:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		if vt == typeTime {
+			return v.Interface().(time.Time).IsZero()
+		}
+		for i := 0; i < v.NumField(); i++ {
+			if vt.Field(i).PkgPath != "" && !vt.Field(i).Anonymous {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (e *encoder) addSlice(v reflect.Value) {
+	vi := v.Interface()
+	if d, ok := vi.(D); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if d, ok := vi.(RawD); ok {
+		for _, elem := range d {
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	l := v.Len()
+	et := v.Type().Elem()
+	if et == typeDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(DocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	if et == typeRawDocElem {
+		for i := 0; i < l; i++ {
+			elem := v.Index(i).Interface().(RawDocElem)
+			e.addElem(elem.Name, reflect.ValueOf(elem.Value), false)
+		}
+		return
+	}
+	for i := 0; i < l; i++ {
+		e.addElem(itoa(i), v.Index(i), false)
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of elements in a document.
+
+func (e *encoder) addElemName(kind byte, name string) {
+	e.addBytes(kind)
+	e.addBytes([]byte(name)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) addElem(name string, v reflect.Value, minSize bool) {
+
+	if !v.IsValid() {
+		e.addElemName(0x0A, name)
+		return
+	}
+
+	if getter, ok := v.Interface().(Getter); ok {
+		getv, err := getter.GetBSON()
+		if err != nil {
+			panic(err)
+		}
+		e.addElem(name, reflect.ValueOf(getv), minSize)
+		return
+	}
+
+	switch v.Kind() {
+
+	case reflect.Interface:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.Ptr:
+		e.addElem(name, v.Elem(), minSize)
+
+	case reflect.String:
+		s := v.String()
+		switch v.Type() {
+		case typeObjectId:
+			if len(s) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s)) + ")")
+			}
+			e.addElemName(0x07, name)
+			e.addBytes([]byte(s)...)
+		case typeSymbol:
+			e.addElemName(0x0E, name)
+			e.addStr(s)
+		case typeJSONNumber:
+			n := v.Interface().(json.Number)
+			if i, err := n.Int64(); err == nil {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			} else if f, err := n.Float64(); err == nil {
+				e.addElemName(0x01, name)
+				e.addFloat64(f)
+			} else {
+				panic("failed to convert json.Number to a number: " + s)
+			}
+		default:
+			e.addElemName(0x02, name)
+			e.addStr(s)
+		}
+
+	case reflect.Float32, reflect.Float64:
+		e.addElemName(0x01, name)
+		e.addFloat64(v.Float())
+
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		u := v.Uint()
+		if int64(u) < 0 {
+			panic("BSON has no uint64 type, and value is too large to fit correctly in an int64")
+		} else if u <= math.MaxInt32 && (minSize || v.Kind() <= reflect.Uint32) {
+			e.addElemName(0x10, name)
+			e.addInt32(int32(u))
+		} else {
+			e.addElemName(0x12, name)
+			e.addInt64(int64(u))
+		}
+
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch v.Type() {
+		case typeMongoTimestamp:
+			e.addElemName(0x11, name)
+			e.addInt64(v.Int())
+
+		case typeOrderKey:
+			if v.Int() == int64(MaxKey) {
+				e.addElemName(0x7F, name)
+			} else {
+				e.addElemName(0xFF, name)
+			}
+
+		default:
+			i := v.Int()
+			if (minSize || v.Type().Kind() != reflect.Int64) && i >= math.MinInt32 && i <= math.MaxInt32 {
+				// It fits into an int32, encode as such.
+				e.addElemName(0x10, name)
+				e.addInt32(int32(i))
+			} else {
+				e.addElemName(0x12, name)
+				e.addInt64(i)
+			}
+		}
+
+	case reflect.Bool:
+		e.addElemName(0x08, name)
+		if v.Bool() {
+			e.addBytes(1)
+		} else {
+			e.addBytes(0)
+		}
+
+	case reflect.Map:
+		e.addElemName(0x03, name)
+		e.addDoc(v)
+
+	case reflect.Slice:
+		vt := v.Type()
+		et := vt.Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			e.addBinary(0x00, v.Bytes())
+		} else if et == typeDocElem || et == typeRawDocElem {
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Array:
+		et := v.Type().Elem()
+		if et.Kind() == reflect.Uint8 {
+			e.addElemName(0x05, name)
+			if v.CanAddr() {
+				e.addBinary(0x00, v.Slice(0, v.Len()).Interface().([]byte))
+			} else {
+				n := v.Len()
+				e.addInt32(int32(n))
+				e.addBytes(0x00)
+				for i := 0; i < n; i++ {
+					el := v.Index(i)
+					e.addBytes(byte(el.Uint()))
+				}
+			}
+		} else {
+			e.addElemName(0x04, name)
+			e.addDoc(v)
+		}
+
+	case reflect.Struct:
+		switch s := v.Interface().(type) {
+
+		case Raw:
+			kind := s.Kind
+			if kind == 0x00 {
+				kind = 0x03
+			}
+			if len(s.Data) == 0 && kind != 0x06 && kind != 0x0A && kind != 0xFF && kind != 0x7F {
+				panic("Attempted to marshal empty Raw document")
+			}
+			e.addElemName(kind, name)
+			e.addBytes(s.Data...)
+
+		case Binary:
+			e.addElemName(0x05, name)
+			e.addBinary(s.Kind, s.Data)
+
+		case Decimal128:
+			e.addElemName(0x13, name)
+			e.addInt64(int64(s.l))
+			e.addInt64(int64(s.h))
+
+		case DBPointer:
+			e.addElemName(0x0C, name)
+			e.addStr(s.Namespace)
+			if len(s.Id) != 12 {
+				panic("ObjectIDs must be exactly 12 bytes long (got " +
+					strconv.Itoa(len(s.Id)) + ")")
+			}
+			e.addBytes([]byte(s.Id)...)
+
+		case RegEx:
+			e.addElemName(0x0B, name)
+			e.addCStr(s.Pattern)
+			e.addCStr(s.Options)
+
+		case JavaScript:
+			if s.Scope == nil {
+				e.addElemName(0x0D, name)
+				e.addStr(s.Code)
+			} else {
+				e.addElemName(0x0F, name)
+				start := e.reserveInt32()
+				e.addStr(s.Code)
+				e.addDoc(reflect.ValueOf(s.Scope))
+				e.setInt32(start, int32(len(e.out)-start))
+			}
+
+		case time.Time:
+			// MongoDB handles timestamps as milliseconds.
+			e.addElemName(0x09, name)
+			e.addInt64(s.Unix()*1000 + int64(s.Nanosecond()/1e6))
+
+		case url.URL:
+			e.addElemName(0x02, name)
+			e.addStr(s.String())
+
+		case undefined:
+			e.addElemName(0x06, name)
+
+		default:
+			e.addElemName(0x03, name)
+			e.addDoc(v)
+		}
+
+	default:
+		panic("Can't marshal " + v.Type().String() + " in a BSON document")
+	}
+}
+
+// --------------------------------------------------------------------------
+// Marshaling of base types.
+
+func (e *encoder) addBinary(subtype byte, v []byte) {
+	if subtype == 0x02 {
+		// Wonder how that brilliant idea came to life. Obsolete, luckily.
+		e.addInt32(int32(len(v) + 4))
+		e.addBytes(subtype)
+		e.addInt32(int32(len(v)))
+	} else {
+		e.addInt32(int32(len(v)))
+		e.addBytes(subtype)
+	}
+	e.addBytes(v...)
+}
+
+func (e *encoder) addStr(v string) {
+	e.addInt32(int32(len(v) + 1))
+	e.addCStr(v)
+}
+
+func (e *encoder) addCStr(v string) {
+	e.addBytes([]byte(v)...)
+	e.addBytes(0)
+}
+
+func (e *encoder) reserveInt32() (pos int) {
+	pos = len(e.out)
+	e.addBytes(0, 0, 0, 0)
+	return pos
+}
+
+func (e *encoder) setInt32(pos int, v int32) {
+	e.out[pos+0] = byte(v)
+	e.out[pos+1] = byte(v >> 8)
+	e.out[pos+2] = byte(v >> 16)
+	e.out[pos+3] = byte(v >> 24)
+}
+
+func (e *encoder) addInt32(v int32) {
+	u := uint32(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24))
+}
+
+func (e *encoder) addInt64(v int64) {
+	u := uint64(v)
+	e.addBytes(byte(u), byte(u>>8), byte(u>>16), byte(u>>24),
+		byte(u>>32), byte(u>>40), byte(u>>48), byte(u>>56))
+}
+
+func (e *encoder) addFloat64(v float64) {
+	e.addInt64(int64(math.Float64bits(v)))
+}
+
+func (e *encoder) addBytes(v ...byte) {
+	e.out = append(e.out, v...)
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/bson/json.go b/switchq/vendor/gopkg.in/mgo.v2/bson/json.go
new file mode 100644
index 0000000..09df826
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/bson/json.go
@@ -0,0 +1,380 @@
+package bson
+
+import (
+	"bytes"
+	"encoding/base64"
+	"fmt"
+	"gopkg.in/mgo.v2/internal/json"
+	"strconv"
+	"time"
+)
+
+// UnmarshalJSON unmarshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func UnmarshalJSON(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&jsonExt)
+	return d.Decode(value)
+}
+
+// MarshalJSON marshals a JSON value that may hold non-standard
+// syntax as defined in BSON's extended JSON specification.
+func MarshalJSON(value interface{}) ([]byte, error) {
+	var buf bytes.Buffer
+	e := json.NewEncoder(&buf)
+	e.Extend(&jsonExt)
+	err := e.Encode(value)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// jdec is used internally by the JSON decoding functions
+// so they may unmarshal functions without getting into endless
+// recursion due to keyed objects.
+func jdec(data []byte, value interface{}) error {
+	d := json.NewDecoder(bytes.NewBuffer(data))
+	d.Extend(&funcExt)
+	return d.Decode(value)
+}
+
+var jsonExt json.Extension
+var funcExt json.Extension
+
+// TODO
+// - Shell regular expressions ("/regexp/opts")
+
+func init() {
+	jsonExt.DecodeUnquotedKeys(true)
+	jsonExt.DecodeTrailingCommas(true)
+
+	funcExt.DecodeFunc("BinData", "$binaryFunc", "$type", "$binary")
+	jsonExt.DecodeKeyed("$binary", jdecBinary)
+	jsonExt.DecodeKeyed("$binaryFunc", jdecBinary)
+	jsonExt.EncodeType([]byte(nil), jencBinarySlice)
+	jsonExt.EncodeType(Binary{}, jencBinaryType)
+
+	funcExt.DecodeFunc("ISODate", "$dateFunc", "S")
+	funcExt.DecodeFunc("new Date", "$dateFunc", "S")
+	jsonExt.DecodeKeyed("$date", jdecDate)
+	jsonExt.DecodeKeyed("$dateFunc", jdecDate)
+	jsonExt.EncodeType(time.Time{}, jencDate)
+
+	funcExt.DecodeFunc("Timestamp", "$timestamp", "t", "i")
+	jsonExt.DecodeKeyed("$timestamp", jdecTimestamp)
+	jsonExt.EncodeType(MongoTimestamp(0), jencTimestamp)
+
+	funcExt.DecodeConst("undefined", Undefined)
+
+	jsonExt.DecodeKeyed("$regex", jdecRegEx)
+	jsonExt.EncodeType(RegEx{}, jencRegEx)
+
+	funcExt.DecodeFunc("ObjectId", "$oidFunc", "Id")
+	jsonExt.DecodeKeyed("$oid", jdecObjectId)
+	jsonExt.DecodeKeyed("$oidFunc", jdecObjectId)
+	jsonExt.EncodeType(ObjectId(""), jencObjectId)
+
+	funcExt.DecodeFunc("DBRef", "$dbrefFunc", "$ref", "$id")
+	jsonExt.DecodeKeyed("$dbrefFunc", jdecDBRef)
+
+	funcExt.DecodeFunc("NumberLong", "$numberLongFunc", "N")
+	jsonExt.DecodeKeyed("$numberLong", jdecNumberLong)
+	jsonExt.DecodeKeyed("$numberLongFunc", jdecNumberLong)
+	jsonExt.EncodeType(int64(0), jencNumberLong)
+	jsonExt.EncodeType(int(0), jencInt)
+
+	funcExt.DecodeConst("MinKey", MinKey)
+	funcExt.DecodeConst("MaxKey", MaxKey)
+	jsonExt.DecodeKeyed("$minKey", jdecMinKey)
+	jsonExt.DecodeKeyed("$maxKey", jdecMaxKey)
+	jsonExt.EncodeType(orderKey(0), jencMinMaxKey)
+
+	jsonExt.DecodeKeyed("$undefined", jdecUndefined)
+	jsonExt.EncodeType(Undefined, jencUndefined)
+
+	jsonExt.Extend(&funcExt)
+}
+
+func fbytes(format string, args ...interface{}) []byte {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, format, args...)
+	return buf.Bytes()
+}
+
+func jdecBinary(data []byte) (interface{}, error) {
+	var v struct {
+		Binary []byte `json:"$binary"`
+		Type   string `json:"$type"`
+		Func   struct {
+			Binary []byte `json:"$binary"`
+			Type   int64  `json:"$type"`
+		} `json:"$binaryFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+
+	var binData []byte
+	var binKind int64
+	if v.Type == "" && v.Binary == nil {
+		binData = v.Func.Binary
+		binKind = v.Func.Type
+	} else if v.Type == "" {
+		return v.Binary, nil
+	} else {
+		binData = v.Binary
+		binKind, err = strconv.ParseInt(v.Type, 0, 64)
+		if err != nil {
+			binKind = -1
+		}
+	}
+
+	if binKind == 0 {
+		return binData, nil
+	}
+	if binKind < 0 || binKind > 255 {
+		return nil, fmt.Errorf("invalid type in binary object: %s", data)
+	}
+
+	return Binary{Kind: byte(binKind), Data: binData}, nil
+}
+
+func jencBinarySlice(v interface{}) ([]byte, error) {
+	in := v.([]byte)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in)))
+	base64.StdEncoding.Encode(out, in)
+	return fbytes(`{"$binary":"%s","$type":"0x0"}`, out), nil
+}
+
+func jencBinaryType(v interface{}) ([]byte, error) {
+	in := v.(Binary)
+	out := make([]byte, base64.StdEncoding.EncodedLen(len(in.Data)))
+	base64.StdEncoding.Encode(out, in.Data)
+	return fbytes(`{"$binary":"%s","$type":"0x%x"}`, out, in.Kind), nil
+}
+
+const jdateFormat = "2006-01-02T15:04:05.999Z"
+
+func jdecDate(data []byte) (interface{}, error) {
+	var v struct {
+		S    string `json:"$date"`
+		Func struct {
+			S string
+		} `json:"$dateFunc"`
+	}
+	_ = jdec(data, &v)
+	if v.S == "" {
+		v.S = v.Func.S
+	}
+	if v.S != "" {
+		for _, format := range []string{jdateFormat, "2006-01-02"} {
+			t, err := time.Parse(format, v.S)
+			if err == nil {
+				return t, nil
+			}
+		}
+		return nil, fmt.Errorf("cannot parse date: %q", v.S)
+	}
+
+	var vn struct {
+		Date struct {
+			N int64 `json:"$numberLong,string"`
+		} `json:"$date"`
+		Func struct {
+			S int64
+		} `json:"$dateFunc"`
+	}
+	err := jdec(data, &vn)
+	if err != nil {
+		return nil, fmt.Errorf("cannot parse date: %q", data)
+	}
+	n := vn.Date.N
+	if n == 0 {
+		n = vn.Func.S
+	}
+	return time.Unix(n/1000, n%1000*1e6).UTC(), nil
+}
+
+func jencDate(v interface{}) ([]byte, error) {
+	t := v.(time.Time)
+	return fbytes(`{"$date":%q}`, t.Format(jdateFormat)), nil
+}
+
+func jdecTimestamp(data []byte) (interface{}, error) {
+	var v struct {
+		Func struct {
+			T int32 `json:"t"`
+			I int32 `json:"i"`
+		} `json:"$timestamp"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return MongoTimestamp(uint64(v.Func.T)<<32 | uint64(uint32(v.Func.I))), nil
+}
+
+func jencTimestamp(v interface{}) ([]byte, error) {
+	ts := uint64(v.(MongoTimestamp))
+	return fbytes(`{"$timestamp":{"t":%d,"i":%d}}`, ts>>32, uint32(ts)), nil
+}
+
+func jdecRegEx(data []byte) (interface{}, error) {
+	var v struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return RegEx{v.Regex, v.Options}, nil
+}
+
+func jencRegEx(v interface{}) ([]byte, error) {
+	re := v.(RegEx)
+	type regex struct {
+		Regex   string `json:"$regex"`
+		Options string `json:"$options"`
+	}
+	return json.Marshal(regex{re.Pattern, re.Options})
+}
+
+func jdecObjectId(data []byte) (interface{}, error) {
+	var v struct {
+		Id   string `json:"$oid"`
+		Func struct {
+			Id string
+		} `json:"$oidFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.Id == "" {
+		v.Id = v.Func.Id
+	}
+	return ObjectIdHex(v.Id), nil
+}
+
+func jencObjectId(v interface{}) ([]byte, error) {
+	return fbytes(`{"$oid":"%s"}`, v.(ObjectId).Hex()), nil
+}
+
+func jdecDBRef(data []byte) (interface{}, error) {
+	// TODO Support unmarshaling $ref and $id into the input value.
+	var v struct {
+		Obj map[string]interface{} `json:"$dbrefFunc"`
+	}
+	// TODO Fix this. Must not be required.
+	v.Obj = make(map[string]interface{})
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	return v.Obj, nil
+}
+
+func jdecNumberLong(data []byte) (interface{}, error) {
+	var v struct {
+		N    int64 `json:"$numberLong,string"`
+		Func struct {
+			N int64 `json:",string"`
+		} `json:"$numberLongFunc"`
+	}
+	var vn struct {
+		N    int64 `json:"$numberLong"`
+		Func struct {
+			N int64
+		} `json:"$numberLongFunc"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		err = jdec(data, &vn)
+		v.N = vn.N
+		v.Func.N = vn.Func.N
+	}
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 0 {
+		return v.N, nil
+	}
+	return v.Func.N, nil
+}
+
+func jencNumberLong(v interface{}) ([]byte, error) {
+	n := v.(int64)
+	f := `{"$numberLong":"%d"}`
+	if n <= 1<<53 {
+		f = `{"$numberLong":%d}`
+	}
+	return fbytes(f, n), nil
+}
+
+func jencInt(v interface{}) ([]byte, error) {
+	n := v.(int)
+	f := `{"$numberLong":"%d"}`
+	if int64(n) <= 1<<53 {
+		f = `%d`
+	}
+	return fbytes(f, n), nil
+}
+
+func jdecMinKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$minKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $minKey object: %s", data)
+	}
+	return MinKey, nil
+}
+
+func jdecMaxKey(data []byte) (interface{}, error) {
+	var v struct {
+		N int64 `json:"$maxKey"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if v.N != 1 {
+		return nil, fmt.Errorf("invalid $maxKey object: %s", data)
+	}
+	return MaxKey, nil
+}
+
+func jencMinMaxKey(v interface{}) ([]byte, error) {
+	switch v.(orderKey) {
+	case MinKey:
+		return []byte(`{"$minKey":1}`), nil
+	case MaxKey:
+		return []byte(`{"$maxKey":1}`), nil
+	}
+	panic(fmt.Sprintf("invalid $minKey/$maxKey value: %d", v))
+}
+
+func jdecUndefined(data []byte) (interface{}, error) {
+	var v struct {
+		B bool `json:"$undefined"`
+	}
+	err := jdec(data, &v)
+	if err != nil {
+		return nil, err
+	}
+	if !v.B {
+		return nil, fmt.Errorf("invalid $undefined object: %s", data)
+	}
+	return Undefined, nil
+}
+
+func jencUndefined(v interface{}) ([]byte, error) {
+	return []byte(`{"$undefined":true}`), nil
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/LICENSE b/switchq/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
new file mode 100644
index 0000000..7448756
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/decode.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/decode.go
new file mode 100644
index 0000000..ce7c7d2
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/decode.go
@@ -0,0 +1,1685 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Represents JSON data structure using native Go types: booleans, floats,
+// strings, arrays, and maps.
+
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"errors"
+	"fmt"
+	"reflect"
+	"runtime"
+	"strconv"
+	"unicode"
+	"unicode/utf16"
+	"unicode/utf8"
+)
+
+// Unmarshal parses the JSON-encoded data and stores the result
+// in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the encodings that
+// Marshal uses, allocating maps, slices, and pointers as necessary,
+// with the following additional rules:
+//
+// To unmarshal JSON into a pointer, Unmarshal first handles the case of
+// the JSON being the JSON literal null. In that case, Unmarshal sets
+// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into
+// the value pointed at by the pointer. If the pointer is nil, Unmarshal
+// allocates a new value for it to point to.
+//
+// To unmarshal JSON into a struct, Unmarshal matches incoming object
+// keys to the keys used by Marshal (either the struct field name or its tag),
+// preferring an exact match but also accepting a case-insensitive match.
+// Unmarshal will only set exported fields of the struct.
+//
+// To unmarshal JSON into an interface value,
+// Unmarshal stores one of these in the interface value:
+//
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	string, for JSON strings
+//	[]interface{}, for JSON arrays
+//	map[string]interface{}, for JSON objects
+//	nil for JSON null
+//
+// To unmarshal a JSON array into a slice, Unmarshal resets the slice length
+// to zero and then appends each element to the slice.
+// As a special case, to unmarshal an empty JSON array into a slice,
+// Unmarshal replaces the slice with a new empty slice.
+//
+// To unmarshal a JSON array into a Go array, Unmarshal decodes
+// JSON array elements into corresponding Go array elements.
+// If the Go array is smaller than the JSON array,
+// the additional JSON array elements are discarded.
+// If the JSON array is smaller than the Go array,
+// the additional Go array elements are set to zero values.
+//
+// To unmarshal a JSON object into a map, Unmarshal first establishes a map to
+// use, If the map is nil, Unmarshal allocates a new map. Otherwise Unmarshal
+// reuses the existing map, keeping existing entries. Unmarshal then stores key-
+// value pairs from the JSON object into the map.  The map's key type must
+// either be a string or implement encoding.TextUnmarshaler.
+//
+// If a JSON value is not appropriate for a given target type,
+// or if a JSON number overflows the target type, Unmarshal
+// skips that field and completes the unmarshaling as best it can.
+// If no more serious errors are encountered, Unmarshal returns
+// an UnmarshalTypeError describing the earliest such error.
+//
+// The JSON null value unmarshals into an interface, map, pointer, or slice
+// by setting that Go value to nil. Because null is often used in JSON to mean
+// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
+// on the value and produces no error.
+//
+// When unmarshaling quoted strings, invalid UTF-8 or
+// invalid UTF-16 surrogate pairs are not treated as an error.
+// Instead, they are replaced by the Unicode replacement
+// character U+FFFD.
+//
+func Unmarshal(data []byte, v interface{}) error {
+	// Check for well-formedness.
+	// Avoids filling out half a data structure
+	// before discovering a JSON syntax error.
+	var d decodeState
+	err := checkValid(data, &d.scan)
+	if err != nil {
+		return err
+	}
+
+	d.init(data)
+	return d.unmarshal(v)
+}
+
+// Unmarshaler is the interface implemented by types
+// that can unmarshal a JSON description of themselves.
+// The input can be assumed to be a valid encoding of
+// a JSON value. UnmarshalJSON must copy the JSON data
+// if it wishes to retain the data after returning.
+type Unmarshaler interface {
+	UnmarshalJSON([]byte) error
+}
+
+// An UnmarshalTypeError describes a JSON value that was
+// not appropriate for a value of a specific Go type.
+type UnmarshalTypeError struct {
+	Value  string       // description of JSON value - "bool", "array", "number -5"
+	Type   reflect.Type // type of Go value it could not be assigned to
+	Offset int64        // error occurred after reading Offset bytes
+}
+
+func (e *UnmarshalTypeError) Error() string {
+	return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String()
+}
+
+// An UnmarshalFieldError describes a JSON object key that
+// led to an unexported (and therefore unwritable) struct field.
+// (No longer used; kept for compatibility.)
+type UnmarshalFieldError struct {
+	Key   string
+	Type  reflect.Type
+	Field reflect.StructField
+}
+
+func (e *UnmarshalFieldError) Error() string {
+	return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String()
+}
+
+// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal.
+// (The argument to Unmarshal must be a non-nil pointer.)
+type InvalidUnmarshalError struct {
+	Type reflect.Type
+}
+
+func (e *InvalidUnmarshalError) Error() string {
+	if e.Type == nil {
+		return "json: Unmarshal(nil)"
+	}
+
+	if e.Type.Kind() != reflect.Ptr {
+		return "json: Unmarshal(non-pointer " + e.Type.String() + ")"
+	}
+	return "json: Unmarshal(nil " + e.Type.String() + ")"
+}
+
+func (d *decodeState) unmarshal(v interface{}) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			err = r.(error)
+		}
+	}()
+
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Ptr || rv.IsNil() {
+		return &InvalidUnmarshalError{reflect.TypeOf(v)}
+	}
+
+	d.scan.reset()
+	// We decode rv not rv.Elem because the Unmarshaler interface
+	// test must be applied at the top level of the value.
+	d.value(rv)
+	return d.savedError
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+	return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+	return strconv.ParseInt(string(n), 10, 64)
+}
+
+// isValidNumber reports whether s is a valid JSON number literal.
+func isValidNumber(s string) bool {
+	// This function implements the JSON numbers grammar.
+	// See https://tools.ietf.org/html/rfc7159#section-6
+	// and http://json.org/number.gif
+
+	if s == "" {
+		return false
+	}
+
+	// Optional -
+	if s[0] == '-' {
+		s = s[1:]
+		if s == "" {
+			return false
+		}
+	}
+
+	// Digits
+	switch {
+	default:
+		return false
+
+	case s[0] == '0':
+		s = s[1:]
+
+	case '1' <= s[0] && s[0] <= '9':
+		s = s[1:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// . followed by 1 or more digits.
+	if len(s) >= 2 && s[0] == '.' && '0' <= s[1] && s[1] <= '9' {
+		s = s[2:]
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// e or E followed by an optional - or + and
+	// 1 or more digits.
+	if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') {
+		s = s[1:]
+		if s[0] == '+' || s[0] == '-' {
+			s = s[1:]
+			if s == "" {
+				return false
+			}
+		}
+		for len(s) > 0 && '0' <= s[0] && s[0] <= '9' {
+			s = s[1:]
+		}
+	}
+
+	// Make sure we are at the end.
+	return s == ""
+}
+
+// decodeState represents the state while decoding a JSON value.
+type decodeState struct {
+	data       []byte
+	off        int // read offset in data
+	scan       scanner
+	nextscan   scanner // for calls to nextValue
+	savedError error
+	useNumber  bool
+	ext        Extension
+}
+
+// errPhase is used for errors that should not happen unless
+// there is a bug in the JSON decoder or something is editing
+// the data slice while the decoder executes.
+var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?")
+
+func (d *decodeState) init(data []byte) *decodeState {
+	d.data = data
+	d.off = 0
+	d.savedError = nil
+	return d
+}
+
+// error aborts the decoding by panicking with err.
+func (d *decodeState) error(err error) {
+	panic(err)
+}
+
+// saveError saves the first err it is called with,
+// for reporting at the end of the unmarshal.
+func (d *decodeState) saveError(err error) {
+	if d.savedError == nil {
+		d.savedError = err
+	}
+}
+
+// next cuts off and returns the next full JSON value in d.data[d.off:].
+// The next value is known to be an object or array, not a literal.
+func (d *decodeState) next() []byte {
+	c := d.data[d.off]
+	item, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+	if err != nil {
+		d.error(err)
+	}
+	d.off = len(d.data) - len(rest)
+
+	// Our scanner has seen the opening brace/bracket
+	// and thinks we're still in the middle of the object.
+	// invent a closing brace/bracket to get it out.
+	if c == '{' {
+		d.scan.step(&d.scan, '}')
+	} else if c == '[' {
+		d.scan.step(&d.scan, ']')
+	} else {
+		// Was inside a function name. Get out of it.
+		d.scan.step(&d.scan, '(')
+		d.scan.step(&d.scan, ')')
+	}
+
+	return item
+}
+
+// scanWhile processes bytes in d.data[d.off:] until it
+// receives a scan code not equal to op.
+// It updates d.off and returns the new scan code.
+func (d *decodeState) scanWhile(op int) int {
+	var newOp int
+	for {
+		if d.off >= len(d.data) {
+			newOp = d.scan.eof()
+			d.off = len(d.data) + 1 // mark processed EOF with len+1
+		} else {
+			c := d.data[d.off]
+			d.off++
+			newOp = d.scan.step(&d.scan, c)
+		}
+		if newOp != op {
+			break
+		}
+	}
+	return newOp
+}
+
+// value decodes a JSON value from d.data[d.off:] into the value.
+// it updates d.off to point past the decoded value.
+func (d *decodeState) value(v reflect.Value) {
+	if !v.IsValid() {
+		_, rest, err := nextValue(d.data[d.off:], &d.nextscan)
+		if err != nil {
+			d.error(err)
+		}
+		d.off = len(d.data) - len(rest)
+
+		// d.scan thinks we're still at the beginning of the item.
+		// Feed in an empty string - the shortest, simplest value -
+		// so that it knows we got to the end of the value.
+		if d.scan.redo {
+			// rewind.
+			d.scan.redo = false
+			d.scan.step = stateBeginValue
+		}
+		d.scan.step(&d.scan, '"')
+		d.scan.step(&d.scan, '"')
+
+		n := len(d.scan.parseState)
+		if n > 0 && d.scan.parseState[n-1] == parseObjectKey {
+			// d.scan thinks we just read an object key; finish the object
+			d.scan.step(&d.scan, ':')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '"')
+			d.scan.step(&d.scan, '}')
+		}
+
+		return
+	}
+
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(v)
+
+	case scanBeginObject:
+		d.object(v)
+
+	case scanBeginLiteral:
+		d.literal(v)
+
+	case scanBeginName:
+		d.name(v)
+	}
+}
+
+type unquotedValue struct{}
+
+// valueQuoted is like value but decodes a
+// quoted string literal or literal null into an interface value.
+// If it finds anything other than a quoted string literal or null,
+// valueQuoted returns unquotedValue{}.
+func (d *decodeState) valueQuoted() interface{} {
+	switch op := d.scanWhile(scanSkipSpace); op {
+	default:
+		d.error(errPhase)
+
+	case scanBeginArray:
+		d.array(reflect.Value{})
+
+	case scanBeginObject:
+		d.object(reflect.Value{})
+
+	case scanBeginName:
+		switch v := d.nameInterface().(type) {
+		case nil, string:
+			return v
+		}
+
+	case scanBeginLiteral:
+		switch v := d.literalInterface().(type) {
+		case nil, string:
+			return v
+		}
+	}
+	return unquotedValue{}
+}
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+	// If v is a named type and is addressable,
+	// start with its address, so that if the type has pointer methods,
+	// we find them.
+	if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+		v = v.Addr()
+	}
+	for {
+		// Load value from interface, but only if the result will be
+		// usefully addressable.
+		if v.Kind() == reflect.Interface && !v.IsNil() {
+			e := v.Elem()
+			if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+				v = e
+				continue
+			}
+		}
+
+		if v.Kind() != reflect.Ptr {
+			break
+		}
+
+		if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+			break
+		}
+		if v.IsNil() {
+			v.Set(reflect.New(v.Type().Elem()))
+		}
+		if v.Type().NumMethod() > 0 {
+			if u, ok := v.Interface().(Unmarshaler); ok {
+				return u, nil, v
+			}
+			if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+				return nil, u, v
+			}
+		}
+		v = v.Elem()
+	}
+	return nil, nil, v
+}
+
+// array consumes an array from d.data[d.off-1:], decoding into the value v.
+// the first byte of the array ('[') has been read already.
+func (d *decodeState) array(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	}
+
+	v = pv
+
+	// Check type of target.
+	switch v.Kind() {
+	case reflect.Interface:
+		if v.NumMethod() == 0 {
+			// Decoding into nil interface?  Switch to non-reflect code.
+			v.Set(reflect.ValueOf(d.arrayInterface()))
+			return
+		}
+		// Otherwise it's invalid.
+		fallthrough
+	default:
+		d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)})
+		d.off--
+		d.next()
+		return
+	case reflect.Array:
+	case reflect.Slice:
+		break
+	}
+
+	i := 0
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		// Get element of array, growing if necessary.
+		if v.Kind() == reflect.Slice {
+			// Grow slice if necessary
+			if i >= v.Cap() {
+				newcap := v.Cap() + v.Cap()/2
+				if newcap < 4 {
+					newcap = 4
+				}
+				newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+				reflect.Copy(newv, v)
+				v.Set(newv)
+			}
+			if i >= v.Len() {
+				v.SetLen(i + 1)
+			}
+		}
+
+		if i < v.Len() {
+			// Decode into element.
+			d.value(v.Index(i))
+		} else {
+			// Ran out of fixed array: skip.
+			d.value(reflect.Value{})
+		}
+		i++
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+
+	if i < v.Len() {
+		if v.Kind() == reflect.Array {
+			// Array. Zero the rest.
+			z := reflect.Zero(v.Type().Elem())
+			for ; i < v.Len(); i++ {
+				v.Index(i).Set(z)
+			}
+		} else {
+			v.SetLen(i)
+		}
+	}
+	if i == 0 && v.Kind() == reflect.Slice {
+		v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+	}
+}
+
+var nullLiteral = []byte("null")
+var textUnmarshalerType = reflect.TypeOf(new(encoding.TextUnmarshaler)).Elem()
+
+// object consumes an object from d.data[d.off-1:], decoding into the value v.
+// the first byte ('{') of the object has been read already.
+func (d *decodeState) object(v reflect.Value) {
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		v.Set(reflect.ValueOf(d.objectInterface()))
+		return
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	var mapElem reflect.Value
+
+	empty := true
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if !empty && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		empty = false
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key []byte
+		if unquotedKey {
+			key = item
+			// TODO Fix code below to quote item when necessary.
+		} else {
+			var ok bool
+			key, ok = unquoteBytes(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(item, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+}
+
+// isNull returns whether there's a null literal at the provided offset.
+func (d *decodeState) isNull(off int) bool {
+	if off+4 >= len(d.data) || d.data[off] != 'n' || d.data[off+1] != 'u' || d.data[off+2] != 'l' || d.data[off+3] != 'l' {
+		return false
+	}
+	d.nextscan.reset()
+	for i, c := range d.data[off:] {
+		if i > 4 {
+			return false
+		}
+		switch d.nextscan.step(&d.nextscan, c) {
+		case scanContinue, scanBeginName:
+			continue
+		}
+		break
+	}
+	return true
+}
+
+// name consumes a const or function from d.data[d.off-1:], decoding into the value v.
+// the first byte of the function name has been read already.
+func (d *decodeState) name(v reflect.Value) {
+	if d.isNull(d.off-1) {
+		d.literal(v)
+		return
+	}
+
+	// Check for unmarshaler.
+	u, ut, pv := d.indirect(v, false)
+	if d.storeKeyed(pv) {
+		return
+	}
+	if u != nil {
+		d.off--
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over function in input
+		return
+	}
+	v = pv
+
+	// Decoding into nil interface?  Switch to non-reflect code.
+	if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+		out := d.nameInterface()
+		if out == nil {
+			v.Set(reflect.Zero(v.Type()))
+		} else {
+			v.Set(reflect.ValueOf(out))
+		}
+		return
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			d.storeValue(v, l)
+			return
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	// Check type of target:
+	//   struct or
+	//   map[string]T or map[encoding.TextUnmarshaler]T
+	switch v.Kind() {
+	case reflect.Map:
+		// Map key must either have string kind or be an encoding.TextUnmarshaler.
+		t := v.Type()
+		if t.Key().Kind() != reflect.String &&
+			!reflect.PtrTo(t.Key()).Implements(textUnmarshalerType) {
+			d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+			d.off--
+			d.next() // skip over { } in input
+			return
+		}
+		if v.IsNil() {
+			v.Set(reflect.MakeMap(t))
+		}
+	case reflect.Struct:
+
+	default:
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+		d.off--
+		d.next() // skip over { } in input
+		return
+	}
+
+	// TODO Fix case of func field as map.
+	//topv := v
+
+	// Figure out field corresponding to function.
+	key := []byte(funcData.key)
+	if v.Kind() == reflect.Map {
+		elemType := v.Type().Elem()
+		v = reflect.New(elemType).Elem()
+	} else {
+		var f *field
+		fields := cachedTypeFields(v.Type())
+		for i := range fields {
+			ff := &fields[i]
+			if bytes.Equal(ff.nameBytes, key) {
+				f = ff
+				break
+			}
+			if f == nil && ff.equalFold(ff.nameBytes, key) {
+				f = ff
+			}
+		}
+		if f != nil {
+			for _, i := range f.index {
+				if v.Kind() == reflect.Ptr {
+					if v.IsNil() {
+						v.Set(reflect.New(v.Type().Elem()))
+					}
+					v = v.Elem()
+				}
+				v = v.Field(i)
+			}
+			if v.Kind() == reflect.Ptr {
+				if v.IsNil() {
+					v.Set(reflect.New(v.Type().Elem()))
+				}
+				v = v.Elem()
+			}
+		}
+	}
+
+	// Check for unmarshaler on func field itself.
+	u, ut, pv = d.indirect(v, false)
+	if u != nil {
+		d.off = nameStart
+		err := u.UnmarshalJSON(d.next())
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	var mapElem reflect.Value
+
+	// Parse function arguments.
+	for i := 0; ; i++ {
+		// closing ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		key := []byte(funcData.args[i])
+
+		// Figure out field corresponding to key.
+		var subv reflect.Value
+		destring := false // whether the value is wrapped in a string to be decoded first
+
+		if v.Kind() == reflect.Map {
+			elemType := v.Type().Elem()
+			if !mapElem.IsValid() {
+				mapElem = reflect.New(elemType).Elem()
+			} else {
+				mapElem.Set(reflect.Zero(elemType))
+			}
+			subv = mapElem
+		} else {
+			var f *field
+			fields := cachedTypeFields(v.Type())
+			for i := range fields {
+				ff := &fields[i]
+				if bytes.Equal(ff.nameBytes, key) {
+					f = ff
+					break
+				}
+				if f == nil && ff.equalFold(ff.nameBytes, key) {
+					f = ff
+				}
+			}
+			if f != nil {
+				subv = v
+				destring = f.quoted
+				for _, i := range f.index {
+					if subv.Kind() == reflect.Ptr {
+						if subv.IsNil() {
+							subv.Set(reflect.New(subv.Type().Elem()))
+						}
+						subv = subv.Elem()
+					}
+					subv = subv.Field(i)
+				}
+			}
+		}
+
+		// Read value.
+		if destring {
+			switch qv := d.valueQuoted().(type) {
+			case nil:
+				d.literalStore(nullLiteral, subv, false)
+			case string:
+				d.literalStore([]byte(qv), subv, true)
+			default:
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type()))
+			}
+		} else {
+			d.value(subv)
+		}
+
+		// Write value back to map;
+		// if using struct, subv points into struct already.
+		if v.Kind() == reflect.Map {
+			kt := v.Type().Key()
+			var kv reflect.Value
+			switch {
+			case kt.Kind() == reflect.String:
+				kv = reflect.ValueOf(key).Convert(v.Type().Key())
+			case reflect.PtrTo(kt).Implements(textUnmarshalerType):
+				kv = reflect.New(v.Type().Key())
+				d.literalStore(key, kv, true)
+				kv = kv.Elem()
+			default:
+				panic("json: Unexpected key type") // should never occur
+			}
+			v.SetMapIndex(kv, subv)
+		}
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+}
+
+// keyed attempts to decode an object or function using a keyed doc extension,
+// and returns the value and true on success, or nil and false otherwise.
+func (d *decodeState) keyed() (interface{}, bool) {
+	if len(d.ext.keyed) == 0 {
+		return nil, false
+	}
+
+	unquote := false
+
+	// Look-ahead first key to check for a keyed document extension.
+	d.nextscan.reset()
+	var start, end int
+	for i, c := range d.data[d.off-1:] {
+		switch op := d.nextscan.step(&d.nextscan, c); op {
+		case scanSkipSpace, scanContinue, scanBeginObject:
+			continue
+		case scanBeginLiteral, scanBeginName:
+			unquote = op == scanBeginLiteral
+			start = i
+			continue
+		}
+		end = i
+		break
+	}
+
+	name := d.data[d.off-1+start : d.off-1+end]
+
+	var key []byte
+	var ok bool
+	if unquote {
+		key, ok = unquoteBytes(name)
+		if !ok {
+			d.error(errPhase)
+		}
+	} else {
+		funcData, ok := d.ext.funcs[string(name)]
+		if !ok {
+			return nil, false
+		}
+		key = []byte(funcData.key)
+	}
+
+	decode, ok := d.ext.keyed[string(key)]
+	if !ok {
+		return nil, false
+	}
+
+	d.off--
+	out, err := decode(d.next())
+	if err != nil {
+		d.error(err)
+	}
+	return out, true
+}
+
+func (d *decodeState) storeKeyed(v reflect.Value) bool {
+	keyed, ok := d.keyed()
+	if !ok {
+		return false
+	}
+	d.storeValue(v, keyed)
+	return true
+}
+
+var (
+	trueBytes = []byte("true")
+	falseBytes = []byte("false")
+	nullBytes = []byte("null")
+)
+
+func (d *decodeState) storeValue(v reflect.Value, from interface{}) {
+	switch from {
+	case nil:
+		d.literalStore(nullBytes, v, false)
+		return
+	case true:
+		d.literalStore(trueBytes, v, false)
+		return
+	case false:
+		d.literalStore(falseBytes, v, false)
+		return
+	}
+	fromv := reflect.ValueOf(from)
+	for fromv.Kind() == reflect.Ptr && !fromv.IsNil() {
+		fromv = fromv.Elem()
+	}
+	fromt := fromv.Type()
+	for v.Kind() == reflect.Ptr && !v.IsNil() {
+		v = v.Elem()
+	}
+	vt := v.Type()
+	if fromt.AssignableTo(vt) {
+		v.Set(fromv)
+	} else if fromt.ConvertibleTo(vt) {
+		v.Set(fromv.Convert(vt))
+	} else {
+		d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)})
+	}
+}
+
+func (d *decodeState) convertLiteral(name []byte) (interface{}, bool) {
+	if len(name) == 0 {
+		return nil, false
+	}
+	switch name[0] {
+	case 't':
+		if bytes.Equal(name, trueBytes) {
+			return true, true
+		}
+	case 'f':
+		if bytes.Equal(name, falseBytes) {
+			return false, true
+		}
+	case 'n':
+		if bytes.Equal(name, nullBytes) {
+			return nil, true
+		}
+	}
+	if l, ok := d.ext.consts[string(name)]; ok {
+		return l, true
+	}
+	return nil, false
+}
+
+// literal consumes a literal from d.data[d.off-1:], decoding into the value v.
+// The first byte of the literal has been read already
+// (that's how the caller knows it's a literal).
+func (d *decodeState) literal(v reflect.Value) {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+
+	d.literalStore(d.data[start:d.off], v, false)
+}
+
+// convertNumber converts the number literal s to a float64 or a Number
+// depending on the setting of d.useNumber.
+func (d *decodeState) convertNumber(s string) (interface{}, error) {
+	if d.useNumber {
+		return Number(s), nil
+	}
+	f, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)}
+	}
+	return f, nil
+}
+
+var numberType = reflect.TypeOf(Number(""))
+
+// literalStore decodes a literal stored in item into v.
+//
+// fromQuoted indicates whether this literal came from unwrapping a
+// string from the ",string" struct tag option. this is used only to
+// produce more helpful error messages.
+func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) {
+	// Check for unmarshaler.
+	if len(item) == 0 {
+		//Empty string given
+		d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+		return
+	}
+	wantptr := item[0] == 'n' // null
+	u, ut, pv := d.indirect(v, wantptr)
+	if u != nil {
+		err := u.UnmarshalJSON(item)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+	if ut != nil {
+		if item[0] != '"' {
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+			return
+		}
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		err := ut.UnmarshalText(s)
+		if err != nil {
+			d.error(err)
+		}
+		return
+	}
+
+	v = pv
+
+	switch c := item[0]; c {
+	case 'n': // null
+		switch v.Kind() {
+		case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
+			v.Set(reflect.Zero(v.Type()))
+			// otherwise, ignore null for primitives/string
+		}
+	case 't', 'f': // true, false
+		value := c == 't'
+		switch v.Kind() {
+		default:
+			if fromQuoted {
+				d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		case reflect.Bool:
+			v.SetBool(value)
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(value))
+			} else {
+				d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)})
+			}
+		}
+
+	case '"': // string
+		s, ok := unquoteBytes(item)
+		if !ok {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		switch v.Kind() {
+		default:
+			d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+		case reflect.Slice:
+			if v.Type().Elem().Kind() != reflect.Uint8 {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+				break
+			}
+			b := make([]byte, base64.StdEncoding.DecodedLen(len(s)))
+			n, err := base64.StdEncoding.Decode(b, s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			v.SetBytes(b[:n])
+		case reflect.String:
+			v.SetString(string(s))
+		case reflect.Interface:
+			if v.NumMethod() == 0 {
+				v.Set(reflect.ValueOf(string(s)))
+			} else {
+				d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)})
+			}
+		}
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(errPhase)
+			}
+		}
+		s := string(item)
+		switch v.Kind() {
+		default:
+			if v.Kind() == reflect.String && v.Type() == numberType {
+				v.SetString(s)
+				if !isValidNumber(s) {
+					d.error(fmt.Errorf("json: invalid number literal, trying to unmarshal %q into Number", item))
+				}
+				break
+			}
+			if fromQuoted {
+				d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type()))
+			} else {
+				d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+			}
+		case reflect.Interface:
+			n, err := d.convertNumber(s)
+			if err != nil {
+				d.saveError(err)
+				break
+			}
+			if v.NumMethod() != 0 {
+				d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)})
+				break
+			}
+			v.Set(reflect.ValueOf(n))
+
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			n, err := strconv.ParseInt(s, 10, 64)
+			if err != nil || v.OverflowInt(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetInt(n)
+
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+			n, err := strconv.ParseUint(s, 10, 64)
+			if err != nil || v.OverflowUint(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetUint(n)
+
+		case reflect.Float32, reflect.Float64:
+			n, err := strconv.ParseFloat(s, v.Type().Bits())
+			if err != nil || v.OverflowFloat(n) {
+				d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)})
+				break
+			}
+			v.SetFloat(n)
+		}
+	}
+}
+
+// The xxxInterface routines build up a value to be stored
+// in an empty interface. They are not strictly necessary,
+// but they avoid the weight of reflection in this common case.
+
+// valueInterface is like value but returns interface{}
+func (d *decodeState) valueInterface() interface{} {
+	switch d.scanWhile(scanSkipSpace) {
+	default:
+		d.error(errPhase)
+		panic("unreachable")
+	case scanBeginArray:
+		return d.arrayInterface()
+	case scanBeginObject:
+		return d.objectInterface()
+	case scanBeginLiteral:
+		return d.literalInterface()
+	case scanBeginName:
+		return d.nameInterface()
+	}
+}
+
+func (d *decodeState) syntaxError(expected string) {
+	msg := fmt.Sprintf("invalid character '%c' looking for %s", d.data[d.off-1], expected)
+	d.error(&SyntaxError{msg, int64(d.off)})
+}
+
+// arrayInterface is like array but returns []interface{}.
+func (d *decodeState) arrayInterface() []interface{} {
+	var v = make([]interface{}, 0)
+	for {
+		// Look ahead for ] - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			if len(v) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of value")
+			}
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		v = append(v, d.valueInterface())
+
+		// Next token must be , or ].
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndArray {
+			break
+		}
+		if op != scanArrayValue {
+			d.error(errPhase)
+		}
+	}
+	return v
+}
+
+// objectInterface is like object but returns map[string]interface{}.
+func (d *decodeState) objectInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	m := make(map[string]interface{})
+	for {
+		// Read opening " of string key or closing }.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			if len(m) > 0 && !d.ext.trailingCommas {
+				d.syntaxError("beginning of object key string")
+			}
+			break
+		}
+		if op == scanBeginName {
+			if !d.ext.unquotedKeys {
+				d.syntaxError("beginning of object key string")
+			}
+		} else if op != scanBeginLiteral {
+			d.error(errPhase)
+		}
+		unquotedKey := op == scanBeginName
+
+		// Read string key.
+		start := d.off - 1
+		op = d.scanWhile(scanContinue)
+		item := d.data[start : d.off-1]
+		var key string
+		if unquotedKey {
+			key = string(item)
+		} else {
+			var ok bool
+			key, ok = unquote(item)
+			if !ok {
+				d.error(errPhase)
+			}
+		}
+
+		// Read : before value.
+		if op == scanSkipSpace {
+			op = d.scanWhile(scanSkipSpace)
+		}
+		if op != scanObjectKey {
+			d.error(errPhase)
+		}
+
+		// Read value.
+		m[key] = d.valueInterface()
+
+		// Next token must be , or }.
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndObject {
+			break
+		}
+		if op != scanObjectValue {
+			d.error(errPhase)
+		}
+	}
+	return m
+}
+
+// literalInterface is like literal but returns an interface value.
+func (d *decodeState) literalInterface() interface{} {
+	// All bytes inside literal return scanContinue op code.
+	start := d.off - 1
+	op := d.scanWhile(scanContinue)
+
+	// Scan read one byte too far; back up.
+	d.off--
+	d.scan.undo(op)
+	item := d.data[start:d.off]
+
+	switch c := item[0]; c {
+	case 'n': // null
+		return nil
+
+	case 't', 'f': // true, false
+		return c == 't'
+
+	case '"': // string
+		s, ok := unquote(item)
+		if !ok {
+			d.error(errPhase)
+		}
+		return s
+
+	default: // number
+		if c != '-' && (c < '0' || c > '9') {
+			d.error(errPhase)
+		}
+		n, err := d.convertNumber(string(item))
+		if err != nil {
+			d.saveError(err)
+		}
+		return n
+	}
+}
+
+// nameInterface is like function but returns map[string]interface{}.
+func (d *decodeState) nameInterface() interface{} {
+	v, ok := d.keyed()
+	if ok {
+		return v
+	}
+
+	nameStart := d.off - 1
+
+	op := d.scanWhile(scanContinue)
+
+	name := d.data[nameStart : d.off-1]
+	if op != scanParam {
+		// Back up so the byte just read is consumed next.
+		d.off--
+		d.scan.undo(op)
+		if l, ok := d.convertLiteral(name); ok {
+			return l
+		}
+		d.error(&SyntaxError{fmt.Sprintf("json: unknown constant %q", name), int64(d.off)})
+	}
+
+	funcName := string(name)
+	funcData := d.ext.funcs[funcName]
+	if funcData.key == "" {
+		d.error(fmt.Errorf("json: unknown function %q", funcName))
+	}
+
+	m := make(map[string]interface{})
+	for i := 0; ; i++ {
+		// Look ahead for ) - can only happen on first iteration.
+		op := d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+
+		// Back up so d.value can have the byte we just read.
+		d.off--
+		d.scan.undo(op)
+
+		if i >= len(funcData.args) {
+			d.error(fmt.Errorf("json: too many arguments for function %s", funcName))
+		}
+		m[funcData.args[i]] = d.valueInterface()
+
+		// Next token must be , or ).
+		op = d.scanWhile(scanSkipSpace)
+		if op == scanEndParams {
+			break
+		}
+		if op != scanParam {
+			d.error(errPhase)
+		}
+	}
+	return map[string]interface{}{funcData.key: m}
+}
+
+// getu4 decodes \uXXXX from the beginning of s, returning the hex value,
+// or it returns -1.
+func getu4(s []byte) rune {
+	if len(s) < 6 || s[0] != '\\' || s[1] != 'u' {
+		return -1
+	}
+	r, err := strconv.ParseUint(string(s[2:6]), 16, 64)
+	if err != nil {
+		return -1
+	}
+	return rune(r)
+}
+
+// unquote converts a quoted JSON string literal s into an actual string t.
+// The rules are different than for Go, so cannot use strconv.Unquote.
+func unquote(s []byte) (t string, ok bool) {
+	s, ok = unquoteBytes(s)
+	t = string(s)
+	return
+}
+
+func unquoteBytes(s []byte) (t []byte, ok bool) {
+	if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' {
+		return
+	}
+	s = s[1 : len(s)-1]
+
+	// Check for unusual characters. If there are none,
+	// then no unquoting is needed, so return a slice of the
+	// original bytes.
+	r := 0
+	for r < len(s) {
+		c := s[r]
+		if c == '\\' || c == '"' || c < ' ' {
+			break
+		}
+		if c < utf8.RuneSelf {
+			r++
+			continue
+		}
+		rr, size := utf8.DecodeRune(s[r:])
+		if rr == utf8.RuneError && size == 1 {
+			break
+		}
+		r += size
+	}
+	if r == len(s) {
+		return s, true
+	}
+
+	b := make([]byte, len(s)+2*utf8.UTFMax)
+	w := copy(b, s[0:r])
+	for r < len(s) {
+		// Out of room?  Can only happen if s is full of
+		// malformed UTF-8 and we're replacing each
+		// byte with RuneError.
+		if w >= len(b)-2*utf8.UTFMax {
+			nb := make([]byte, (len(b)+utf8.UTFMax)*2)
+			copy(nb, b[0:w])
+			b = nb
+		}
+		switch c := s[r]; {
+		case c == '\\':
+			r++
+			if r >= len(s) {
+				return
+			}
+			switch s[r] {
+			default:
+				return
+			case '"', '\\', '/', '\'':
+				b[w] = s[r]
+				r++
+				w++
+			case 'b':
+				b[w] = '\b'
+				r++
+				w++
+			case 'f':
+				b[w] = '\f'
+				r++
+				w++
+			case 'n':
+				b[w] = '\n'
+				r++
+				w++
+			case 'r':
+				b[w] = '\r'
+				r++
+				w++
+			case 't':
+				b[w] = '\t'
+				r++
+				w++
+			case 'u':
+				r--
+				rr := getu4(s[r:])
+				if rr < 0 {
+					return
+				}
+				r += 6
+				if utf16.IsSurrogate(rr) {
+					rr1 := getu4(s[r:])
+					if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar {
+						// A valid pair; consume.
+						r += 6
+						w += utf8.EncodeRune(b[w:], dec)
+						break
+					}
+					// Invalid surrogate; fall back to replacement rune.
+					rr = unicode.ReplacementChar
+				}
+				w += utf8.EncodeRune(b[w:], rr)
+			}
+
+		// Quote, control characters are invalid.
+		case c == '"', c < ' ':
+			return
+
+		// ASCII
+		case c < utf8.RuneSelf:
+			b[w] = c
+			r++
+			w++
+
+		// Coerce to well-formed UTF-8.
+		default:
+			rr, size := utf8.DecodeRune(s[r:])
+			r += size
+			w += utf8.EncodeRune(b[w:], rr)
+		}
+	}
+	return b[0:w], true
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/encode.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/encode.go
new file mode 100644
index 0000000..67a0f00
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/encode.go
@@ -0,0 +1,1256 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package json implements encoding and decoding of JSON as defined in
+// RFC 4627. The mapping between JSON and Go values is described
+// in the documentation for the Marshal and Unmarshal functions.
+//
+// See "JSON and Go" for an introduction to this package:
+// https://golang.org/doc/articles/json_and_go.html
+package json
+
+import (
+	"bytes"
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"unicode"
+	"unicode/utf8"
+)
+
+// Marshal returns the JSON encoding of v.
+//
+// Marshal traverses the value v recursively.
+// If an encountered value implements the Marshaler interface
+// and is not a nil pointer, Marshal calls its MarshalJSON method
+// to produce JSON. If no MarshalJSON method is present but the
+// value implements encoding.TextMarshaler instead, Marshal calls
+// its MarshalText method.
+// The nil pointer exception is not strictly necessary
+// but mimics a similar, necessary exception in the behavior of
+// UnmarshalJSON.
+//
+// Otherwise, Marshal uses the following type-dependent default encodings:
+//
+// Boolean values encode as JSON booleans.
+//
+// Floating point, integer, and Number values encode as JSON numbers.
+//
+// String values encode as JSON strings coerced to valid UTF-8,
+// replacing invalid bytes with the Unicode replacement rune.
+// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e"
+// to keep some browsers from misinterpreting JSON output as HTML.
+// Ampersand "&" is also escaped to "\u0026" for the same reason.
+// This escaping can be disabled using an Encoder with DisableHTMLEscaping.
+//
+// Array and slice values encode as JSON arrays, except that
+// []byte encodes as a base64-encoded string, and a nil slice
+// encodes as the null JSON value.
+//
+// Struct values encode as JSON objects. Each exported struct field
+// becomes a member of the object unless
+//   - the field's tag is "-", or
+//   - the field is empty and its tag specifies the "omitempty" option.
+// The empty values are false, 0, any
+// nil pointer or interface value, and any array, slice, map, or string of
+// length zero. The object's default key string is the struct field name
+// but can be specified in the struct field's tag value. The "json" key in
+// the struct field's tag value is the key name, followed by an optional comma
+// and options. Examples:
+//
+//   // Field is ignored by this package.
+//   Field int `json:"-"`
+//
+//   // Field appears in JSON as key "myName".
+//   Field int `json:"myName"`
+//
+//   // Field appears in JSON as key "myName" and
+//   // the field is omitted from the object if its value is empty,
+//   // as defined above.
+//   Field int `json:"myName,omitempty"`
+//
+//   // Field appears in JSON as key "Field" (the default), but
+//   // the field is skipped if empty.
+//   // Note the leading comma.
+//   Field int `json:",omitempty"`
+//
+// The "string" option signals that a field is stored as JSON inside a
+// JSON-encoded string. It applies only to fields of string, floating point,
+// integer, or boolean types. This extra level of encoding is sometimes used
+// when communicating with JavaScript programs:
+//
+//    Int64String int64 `json:",string"`
+//
+// The key name will be used if it's a non-empty string consisting of
+// only Unicode letters, digits, dollar signs, percent signs, hyphens,
+// underscores and slashes.
+//
+// Anonymous struct fields are usually marshaled as if their inner exported fields
+// were fields in the outer struct, subject to the usual Go visibility rules amended
+// as described in the next paragraph.
+// An anonymous struct field with a name given in its JSON tag is treated as
+// having that name, rather than being anonymous.
+// An anonymous struct field of interface type is treated the same as having
+// that type as its name, rather than being anonymous.
+//
+// The Go visibility rules for struct fields are amended for JSON when
+// deciding which field to marshal or unmarshal. If there are
+// multiple fields at the same level, and that level is the least
+// nested (and would therefore be the nesting level selected by the
+// usual Go rules), the following extra rules apply:
+//
+// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered,
+// even if there are multiple untagged fields that would otherwise conflict.
+// 2) If there is exactly one field (tagged or not according to the first rule), that is selected.
+// 3) Otherwise there are multiple fields, and all are ignored; no error occurs.
+//
+// Handling of anonymous struct fields is new in Go 1.1.
+// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of
+// an anonymous struct field in both current and earlier versions, give the field
+// a JSON tag of "-".
+//
+// Map values encode as JSON objects. The map's key type must either be a string
+// or implement encoding.TextMarshaler.  The map keys are used as JSON object
+// keys, subject to the UTF-8 coercion described for string values above.
+//
+// Pointer values encode as the value pointed to.
+// A nil pointer encodes as the null JSON value.
+//
+// Interface values encode as the value contained in the interface.
+// A nil interface value encodes as the null JSON value.
+//
+// Channel, complex, and function values cannot be encoded in JSON.
+// Attempting to encode such a value causes Marshal to return
+// an UnsupportedTypeError.
+//
+// JSON cannot represent cyclic data structures and Marshal does not
+// handle them. Passing cyclic structures to Marshal will result in
+// an infinite recursion.
+//
+func Marshal(v interface{}) ([]byte, error) {
+	e := &encodeState{}
+	err := e.marshal(v, encOpts{escapeHTML: true})
+	if err != nil {
+		return nil, err
+	}
+	return e.Bytes(), nil
+}
+
+// MarshalIndent is like Marshal but applies Indent to format the output.
+func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) {
+	b, err := Marshal(v)
+	if err != nil {
+		return nil, err
+	}
+	var buf bytes.Buffer
+	err = Indent(&buf, b, prefix, indent)
+	if err != nil {
+		return nil, err
+	}
+	return buf.Bytes(), nil
+}
+
+// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029
+// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029
+// so that the JSON will be safe to embed inside HTML <script> tags.
+// For historical reasons, web browsers don't honor standard HTML
+// escaping within <script> tags, so an alternative JSON encoding must
+// be used.
+func HTMLEscape(dst *bytes.Buffer, src []byte) {
+	// The characters can only appear in string literals,
+	// so just scan the string one byte at a time.
+	start := 0
+	for i, c := range src {
+		if c == '<' || c == '>' || c == '&' {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid JSON.
+type Marshaler interface {
+	MarshalJSON() ([]byte, error)
+}
+
+// An UnsupportedTypeError is returned by Marshal when attempting
+// to encode an unsupported value type.
+type UnsupportedTypeError struct {
+	Type reflect.Type
+}
+
+func (e *UnsupportedTypeError) Error() string {
+	return "json: unsupported type: " + e.Type.String()
+}
+
+type UnsupportedValueError struct {
+	Value reflect.Value
+	Str   string
+}
+
+func (e *UnsupportedValueError) Error() string {
+	return "json: unsupported value: " + e.Str
+}
+
+// Before Go 1.2, an InvalidUTF8Error was returned by Marshal when
+// attempting to encode a string value with invalid UTF-8 sequences.
+// As of Go 1.2, Marshal instead coerces the string to valid UTF-8 by
+// replacing invalid bytes with the Unicode replacement rune U+FFFD.
+// This error is no longer generated but is kept for backwards compatibility
+// with programs that might mention it.
+type InvalidUTF8Error struct {
+	S string // the whole string value that caused the error
+}
+
+func (e *InvalidUTF8Error) Error() string {
+	return "json: invalid UTF-8 in string: " + strconv.Quote(e.S)
+}
+
+type MarshalerError struct {
+	Type reflect.Type
+	Err  error
+}
+
+func (e *MarshalerError) Error() string {
+	return "json: error calling MarshalJSON for type " + e.Type.String() + ": " + e.Err.Error()
+}
+
+var hex = "0123456789abcdef"
+
+// An encodeState encodes JSON into a bytes.Buffer.
+type encodeState struct {
+	bytes.Buffer // accumulated output
+	scratch      [64]byte
+	ext          Extension
+}
+
+var encodeStatePool sync.Pool
+
+func newEncodeState() *encodeState {
+	if v := encodeStatePool.Get(); v != nil {
+		e := v.(*encodeState)
+		e.Reset()
+		return e
+	}
+	return new(encodeState)
+}
+
+func (e *encodeState) marshal(v interface{}, opts encOpts) (err error) {
+	defer func() {
+		if r := recover(); r != nil {
+			if _, ok := r.(runtime.Error); ok {
+				panic(r)
+			}
+			if s, ok := r.(string); ok {
+				panic(s)
+			}
+			err = r.(error)
+		}
+	}()
+	e.reflectValue(reflect.ValueOf(v), opts)
+	return nil
+}
+
+func (e *encodeState) error(err error) {
+	panic(err)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+		return v.Len() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	}
+	return false
+}
+
+func (e *encodeState) reflectValue(v reflect.Value, opts encOpts) {
+	valueEncoder(v)(e, v, opts)
+}
+
+type encOpts struct {
+	// quoted causes primitive fields to be encoded inside JSON strings.
+	quoted bool
+	// escapeHTML causes '<', '>', and '&' to be escaped in JSON strings.
+	escapeHTML bool
+}
+
+type encoderFunc func(e *encodeState, v reflect.Value, opts encOpts)
+
+var encoderCache struct {
+	sync.RWMutex
+	m map[reflect.Type]encoderFunc
+}
+
+func valueEncoder(v reflect.Value) encoderFunc {
+	if !v.IsValid() {
+		return invalidValueEncoder
+	}
+	return typeEncoder(v.Type())
+}
+
+func typeEncoder(t reflect.Type) encoderFunc {
+	encoderCache.RLock()
+	f := encoderCache.m[t]
+	encoderCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// To deal with recursive types, populate the map with an
+	// indirect func before we build it. This type waits on the
+	// real func (f) to be ready and then calls it. This indirect
+	// func is only used for recursive types.
+	encoderCache.Lock()
+	if encoderCache.m == nil {
+		encoderCache.m = make(map[reflect.Type]encoderFunc)
+	}
+	var wg sync.WaitGroup
+	wg.Add(1)
+	encoderCache.m[t] = func(e *encodeState, v reflect.Value, opts encOpts) {
+		wg.Wait()
+		f(e, v, opts)
+	}
+	encoderCache.Unlock()
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	innerf := newTypeEncoder(t, true)
+	f = func(e *encodeState, v reflect.Value, opts encOpts) {
+		encode, ok := e.ext.encode[v.Type()]
+		if !ok {
+			innerf(e, v, opts)
+			return
+		}
+
+		b, err := encode(v.Interface())
+		if err == nil {
+			// copy JSON into buffer, checking validity.
+			err = compact(&e.Buffer, b, opts.escapeHTML)
+		}
+		if err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	wg.Done()
+	encoderCache.Lock()
+	encoderCache.m[t] = f
+	encoderCache.Unlock()
+	return f
+}
+
+var (
+	marshalerType     = reflect.TypeOf(new(Marshaler)).Elem()
+	textMarshalerType = reflect.TypeOf(new(encoding.TextMarshaler)).Elem()
+)
+
+// newTypeEncoder constructs an encoderFunc for a type.
+// The returned encoder only checks CanAddr when allowAddr is true.
+func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc {
+	if t.Implements(marshalerType) {
+		return marshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(marshalerType) {
+			return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	if t.Implements(textMarshalerType) {
+		return textMarshalerEncoder
+	}
+	if t.Kind() != reflect.Ptr && allowAddr {
+		if reflect.PtrTo(t).Implements(textMarshalerType) {
+			return newCondAddrEncoder(addrTextMarshalerEncoder, newTypeEncoder(t, false))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		return boolEncoder
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return intEncoder
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return uintEncoder
+	case reflect.Float32:
+		return float32Encoder
+	case reflect.Float64:
+		return float64Encoder
+	case reflect.String:
+		return stringEncoder
+	case reflect.Interface:
+		return interfaceEncoder
+	case reflect.Struct:
+		return newStructEncoder(t)
+	case reflect.Map:
+		return newMapEncoder(t)
+	case reflect.Slice:
+		return newSliceEncoder(t)
+	case reflect.Array:
+		return newArrayEncoder(t)
+	case reflect.Ptr:
+		return newPtrEncoder(t)
+	default:
+		return unsupportedTypeEncoder
+	}
+}
+
+func invalidValueEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.WriteString("null")
+}
+
+func marshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, opts.escapeHTML)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func addrMarshalerEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(Marshaler)
+	b, err := m.MarshalJSON()
+	if err == nil {
+		// copy JSON into buffer, checking validity.
+		err = compact(&e.Buffer, b, true)
+	}
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+}
+
+func textMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Kind() == reflect.Ptr && v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := v.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func addrTextMarshalerEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	va := v.Addr()
+	if va.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	m := va.Interface().(encoding.TextMarshaler)
+	b, err := m.MarshalText()
+	if err != nil {
+		e.error(&MarshalerError{v.Type(), err})
+	}
+	e.stringBytes(b, opts.escapeHTML)
+}
+
+func boolEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	if v.Bool() {
+		e.WriteString("true")
+	} else {
+		e.WriteString("false")
+	}
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func intEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendInt(e.scratch[:0], v.Int(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+func uintEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	b := strconv.AppendUint(e.scratch[:0], v.Uint(), 10)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+type floatEncoder int // number of bits
+
+func (bits floatEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	f := v.Float()
+	if math.IsInf(f, 0) || math.IsNaN(f) {
+		e.error(&UnsupportedValueError{v, strconv.FormatFloat(f, 'g', -1, int(bits))})
+	}
+	b := strconv.AppendFloat(e.scratch[:0], f, 'g', -1, int(bits))
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+	e.Write(b)
+	if opts.quoted {
+		e.WriteByte('"')
+	}
+}
+
+var (
+	float32Encoder = (floatEncoder(32)).encode
+	float64Encoder = (floatEncoder(64)).encode
+)
+
+func stringEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.Type() == numberType {
+		numStr := v.String()
+		// In Go1.5 the empty string encodes to "0", while this is not a valid number literal
+		// we keep compatibility so check validity after this.
+		if numStr == "" {
+			numStr = "0" // Number's zero-val
+		}
+		if !isValidNumber(numStr) {
+			e.error(fmt.Errorf("json: invalid number literal %q", numStr))
+		}
+		e.WriteString(numStr)
+		return
+	}
+	if opts.quoted {
+		sb, err := Marshal(v.String())
+		if err != nil {
+			e.error(err)
+		}
+		e.string(string(sb), opts.escapeHTML)
+	} else {
+		e.string(v.String(), opts.escapeHTML)
+	}
+}
+
+func interfaceEncoder(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.reflectValue(v.Elem(), opts)
+}
+
+func unsupportedTypeEncoder(e *encodeState, v reflect.Value, _ encOpts) {
+	e.error(&UnsupportedTypeError{v.Type()})
+}
+
+type structEncoder struct {
+	fields    []field
+	fieldEncs []encoderFunc
+}
+
+func (se *structEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('{')
+	first := true
+	for i, f := range se.fields {
+		fv := fieldByIndex(v, f.index)
+		if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+			continue
+		}
+		if first {
+			first = false
+		} else {
+			e.WriteByte(',')
+		}
+		e.string(f.name, opts.escapeHTML)
+		e.WriteByte(':')
+		opts.quoted = f.quoted
+		se.fieldEncs[i](e, fv, opts)
+	}
+	e.WriteByte('}')
+}
+
+func newStructEncoder(t reflect.Type) encoderFunc {
+	fields := cachedTypeFields(t)
+	se := &structEncoder{
+		fields:    fields,
+		fieldEncs: make([]encoderFunc, len(fields)),
+	}
+	for i, f := range fields {
+		se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index))
+	}
+	return se.encode
+}
+
+type mapEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (me *mapEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	e.WriteByte('{')
+
+	// Extract and sort the keys.
+	keys := v.MapKeys()
+	sv := make([]reflectWithString, len(keys))
+	for i, v := range keys {
+		sv[i].v = v
+		if err := sv[i].resolve(); err != nil {
+			e.error(&MarshalerError{v.Type(), err})
+		}
+	}
+	sort.Sort(byString(sv))
+
+	for i, kv := range sv {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		e.string(kv.s, opts.escapeHTML)
+		e.WriteByte(':')
+		me.elemEnc(e, v.MapIndex(kv.v), opts)
+	}
+	e.WriteByte('}')
+}
+
+func newMapEncoder(t reflect.Type) encoderFunc {
+	if t.Key().Kind() != reflect.String && !t.Key().Implements(textMarshalerType) {
+		return unsupportedTypeEncoder
+	}
+	me := &mapEncoder{typeEncoder(t.Elem())}
+	return me.encode
+}
+
+func encodeByteSlice(e *encodeState, v reflect.Value, _ encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	s := v.Bytes()
+	e.WriteByte('"')
+	if len(s) < 1024 {
+		// for small buffers, using Encode directly is much faster.
+		dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+		base64.StdEncoding.Encode(dst, s)
+		e.Write(dst)
+	} else {
+		// for large buffers, avoid unnecessary extra temporary
+		// buffer space.
+		enc := base64.NewEncoder(base64.StdEncoding, e)
+		enc.Write(s)
+		enc.Close()
+	}
+	e.WriteByte('"')
+}
+
+// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil.
+type sliceEncoder struct {
+	arrayEnc encoderFunc
+}
+
+func (se *sliceEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	se.arrayEnc(e, v, opts)
+}
+
+func newSliceEncoder(t reflect.Type) encoderFunc {
+	// Byte slices get special treatment; arrays don't.
+	if t.Elem().Kind() == reflect.Uint8 &&
+		!t.Elem().Implements(marshalerType) &&
+		!t.Elem().Implements(textMarshalerType) {
+		return encodeByteSlice
+	}
+	enc := &sliceEncoder{newArrayEncoder(t)}
+	return enc.encode
+}
+
+type arrayEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (ae *arrayEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	e.WriteByte('[')
+	n := v.Len()
+	for i := 0; i < n; i++ {
+		if i > 0 {
+			e.WriteByte(',')
+		}
+		ae.elemEnc(e, v.Index(i), opts)
+	}
+	e.WriteByte(']')
+}
+
+func newArrayEncoder(t reflect.Type) encoderFunc {
+	enc := &arrayEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type ptrEncoder struct {
+	elemEnc encoderFunc
+}
+
+func (pe *ptrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.IsNil() {
+		e.WriteString("null")
+		return
+	}
+	pe.elemEnc(e, v.Elem(), opts)
+}
+
+func newPtrEncoder(t reflect.Type) encoderFunc {
+	enc := &ptrEncoder{typeEncoder(t.Elem())}
+	return enc.encode
+}
+
+type condAddrEncoder struct {
+	canAddrEnc, elseEnc encoderFunc
+}
+
+func (ce *condAddrEncoder) encode(e *encodeState, v reflect.Value, opts encOpts) {
+	if v.CanAddr() {
+		ce.canAddrEnc(e, v, opts)
+	} else {
+		ce.elseEnc(e, v, opts)
+	}
+}
+
+// newCondAddrEncoder returns an encoder that checks whether its value
+// CanAddr and delegates to canAddrEnc if so, else to elseEnc.
+func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc {
+	enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc}
+	return enc.encode
+}
+
+func isValidTag(s string) bool {
+	if s == "" {
+		return false
+	}
+	for _, c := range s {
+		switch {
+		case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+			// Backslash and quote chars are reserved, but
+			// otherwise any punctuation chars are allowed
+			// in a tag name.
+		default:
+			if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+	for _, i := range index {
+		if v.Kind() == reflect.Ptr {
+			if v.IsNil() {
+				return reflect.Value{}
+			}
+			v = v.Elem()
+		}
+		v = v.Field(i)
+	}
+	return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+	for _, i := range index {
+		if t.Kind() == reflect.Ptr {
+			t = t.Elem()
+		}
+		t = t.Field(i).Type
+	}
+	return t
+}
+
+type reflectWithString struct {
+	v reflect.Value
+	s string
+}
+
+func (w *reflectWithString) resolve() error {
+	if w.v.Kind() == reflect.String {
+		w.s = w.v.String()
+		return nil
+	}
+	buf, err := w.v.Interface().(encoding.TextMarshaler).MarshalText()
+	w.s = string(buf)
+	return err
+}
+
+// byString is a slice of reflectWithString where the reflect.Value is either
+// a string or an encoding.TextMarshaler.
+// It implements the methods to sort by string.
+type byString []reflectWithString
+
+func (sv byString) Len() int           { return len(sv) }
+func (sv byString) Swap(i, j int)      { sv[i], sv[j] = sv[j], sv[i] }
+func (sv byString) Less(i, j int) bool { return sv[i].s < sv[j].s }
+
+// NOTE: keep in sync with stringBytes below.
+func (e *encodeState) string(s string, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRuneInString(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.WriteString(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.WriteString(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// NOTE: keep in sync with string above.
+func (e *encodeState) stringBytes(s []byte, escapeHTML bool) int {
+	len0 := e.Len()
+	e.WriteByte('"')
+	start := 0
+	for i := 0; i < len(s); {
+		if b := s[i]; b < utf8.RuneSelf {
+			if 0x20 <= b && b != '\\' && b != '"' &&
+				(!escapeHTML || b != '<' && b != '>' && b != '&') {
+				i++
+				continue
+			}
+			if start < i {
+				e.Write(s[start:i])
+			}
+			switch b {
+			case '\\', '"':
+				e.WriteByte('\\')
+				e.WriteByte(b)
+			case '\n':
+				e.WriteByte('\\')
+				e.WriteByte('n')
+			case '\r':
+				e.WriteByte('\\')
+				e.WriteByte('r')
+			case '\t':
+				e.WriteByte('\\')
+				e.WriteByte('t')
+			default:
+				// This encodes bytes < 0x20 except for \t, \n and \r.
+				// If escapeHTML is set, it also escapes <, >, and &
+				// because they can lead to security holes when
+				// user-controlled strings are rendered into JSON
+				// and served to some browsers.
+				e.WriteString(`\u00`)
+				e.WriteByte(hex[b>>4])
+				e.WriteByte(hex[b&0xF])
+			}
+			i++
+			start = i
+			continue
+		}
+		c, size := utf8.DecodeRune(s[i:])
+		if c == utf8.RuneError && size == 1 {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\ufffd`)
+			i += size
+			start = i
+			continue
+		}
+		// U+2028 is LINE SEPARATOR.
+		// U+2029 is PARAGRAPH SEPARATOR.
+		// They are both technically valid characters in JSON strings,
+		// but don't work in JSONP, which has to be evaluated as JavaScript,
+		// and can lead to security holes there. It is valid JSON to
+		// escape them, so we do so unconditionally.
+		// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
+		if c == '\u2028' || c == '\u2029' {
+			if start < i {
+				e.Write(s[start:i])
+			}
+			e.WriteString(`\u202`)
+			e.WriteByte(hex[c&0xF])
+			i += size
+			start = i
+			continue
+		}
+		i += size
+	}
+	if start < len(s) {
+		e.Write(s[start:])
+	}
+	e.WriteByte('"')
+	return e.Len() - len0
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+	name      string
+	nameBytes []byte                 // []byte(name)
+	equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+	tag       bool
+	index     []int
+	typ       reflect.Type
+	omitEmpty bool
+	quoted    bool
+}
+
+func fillField(f field) field {
+	f.nameBytes = []byte(f.name)
+	f.equalFold = foldFunc(f.nameBytes)
+	return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+	if x[i].name != x[j].name {
+		return x[i].name < x[j].name
+	}
+	if len(x[i].index) != len(x[j].index) {
+		return len(x[i].index) < len(x[j].index)
+	}
+	if x[i].tag != x[j].tag {
+		return x[i].tag
+	}
+	return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+	for k, xik := range x[i].index {
+		if k >= len(x[j].index) {
+			return false
+		}
+		if xik != x[j].index[k] {
+			return xik < x[j].index[k]
+		}
+	}
+	return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+	// Anonymous fields to explore at the current level and the next.
+	current := []field{}
+	next := []field{{typ: t}}
+
+	// Count of queued names for current level and the next.
+	count := map[reflect.Type]int{}
+	nextCount := map[reflect.Type]int{}
+
+	// Types already visited at an earlier level.
+	visited := map[reflect.Type]bool{}
+
+	// Fields found.
+	var fields []field
+
+	for len(next) > 0 {
+		current, next = next, current[:0]
+		count, nextCount = nextCount, map[reflect.Type]int{}
+
+		for _, f := range current {
+			if visited[f.typ] {
+				continue
+			}
+			visited[f.typ] = true
+
+			// Scan f.typ for fields to include.
+			for i := 0; i < f.typ.NumField(); i++ {
+				sf := f.typ.Field(i)
+				if sf.PkgPath != "" && !sf.Anonymous { // unexported
+					continue
+				}
+				tag := sf.Tag.Get("json")
+				if tag == "-" {
+					continue
+				}
+				name, opts := parseTag(tag)
+				if !isValidTag(name) {
+					name = ""
+				}
+				index := make([]int, len(f.index)+1)
+				copy(index, f.index)
+				index[len(f.index)] = i
+
+				ft := sf.Type
+				if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+					// Follow pointer.
+					ft = ft.Elem()
+				}
+
+				// Only strings, floats, integers, and booleans can be quoted.
+				quoted := false
+				if opts.Contains("string") {
+					switch ft.Kind() {
+					case reflect.Bool,
+						reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+						reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+						reflect.Float32, reflect.Float64,
+						reflect.String:
+						quoted = true
+					}
+				}
+
+				// Record found field and index sequence.
+				if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+					tagged := name != ""
+					if name == "" {
+						name = sf.Name
+					}
+					fields = append(fields, fillField(field{
+						name:      name,
+						tag:       tagged,
+						index:     index,
+						typ:       ft,
+						omitEmpty: opts.Contains("omitempty"),
+						quoted:    quoted,
+					}))
+					if count[f.typ] > 1 {
+						// If there were multiple instances, add a second,
+						// so that the annihilation code will see a duplicate.
+						// It only cares about the distinction between 1 or 2,
+						// so don't bother generating any more copies.
+						fields = append(fields, fields[len(fields)-1])
+					}
+					continue
+				}
+
+				// Record new anonymous struct to explore in next round.
+				nextCount[ft]++
+				if nextCount[ft] == 1 {
+					next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+				}
+			}
+		}
+	}
+
+	sort.Sort(byName(fields))
+
+	// Delete all fields that are hidden by the Go rules for embedded fields,
+	// except that fields with JSON tags are promoted.
+
+	// The fields are sorted in primary order of name, secondary order
+	// of field index length. Loop over names; for each name, delete
+	// hidden fields by choosing the one dominant field that survives.
+	out := fields[:0]
+	for advance, i := 0, 0; i < len(fields); i += advance {
+		// One iteration per name.
+		// Find the sequence of fields with the name of this first field.
+		fi := fields[i]
+		name := fi.name
+		for advance = 1; i+advance < len(fields); advance++ {
+			fj := fields[i+advance]
+			if fj.name != name {
+				break
+			}
+		}
+		if advance == 1 { // Only one field with this name
+			out = append(out, fi)
+			continue
+		}
+		dominant, ok := dominantField(fields[i : i+advance])
+		if ok {
+			out = append(out, dominant)
+		}
+	}
+
+	fields = out
+	sort.Sort(byIndex(fields))
+
+	return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+	// The fields are sorted in increasing index-length order. The winner
+	// must therefore be one with the shortest index length. Drop all
+	// longer entries, which is easy: just truncate the slice.
+	length := len(fields[0].index)
+	tagged := -1 // Index of first tagged field.
+	for i, f := range fields {
+		if len(f.index) > length {
+			fields = fields[:i]
+			break
+		}
+		if f.tag {
+			if tagged >= 0 {
+				// Multiple tagged fields at the same level: conflict.
+				// Return no field.
+				return field{}, false
+			}
+			tagged = i
+		}
+	}
+	if tagged >= 0 {
+		return fields[tagged], true
+	}
+	// All remaining fields have the same length. If there's more than one,
+	// we have a conflict (two fields named "X" at the same level) and we
+	// return no field.
+	if len(fields) > 1 {
+		return field{}, false
+	}
+	return fields[0], true
+}
+
+var fieldCache struct {
+	sync.RWMutex
+	m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+	fieldCache.RLock()
+	f := fieldCache.m[t]
+	fieldCache.RUnlock()
+	if f != nil {
+		return f
+	}
+
+	// Compute fields without lock.
+	// Might duplicate effort but won't hold other computations back.
+	f = typeFields(t)
+	if f == nil {
+		f = []field{}
+	}
+
+	fieldCache.Lock()
+	if fieldCache.m == nil {
+		fieldCache.m = map[reflect.Type][]field{}
+	}
+	fieldCache.m[t] = f
+	fieldCache.Unlock()
+	return f
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/extension.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/extension.go
new file mode 100644
index 0000000..1c8fd45
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/extension.go
@@ -0,0 +1,95 @@
+package json
+
+import (
+	"reflect"
+)
+
+// Extension holds a set of additional rules to be used when unmarshaling
+// strict JSON or JSON-like content.
+type Extension struct {
+	funcs  map[string]funcExt
+	consts map[string]interface{}
+	keyed  map[string]func([]byte) (interface{}, error)
+	encode map[reflect.Type]func(v interface{}) ([]byte, error)
+
+	unquotedKeys   bool
+	trailingCommas bool
+}
+
+type funcExt struct {
+	key  string
+	args []string
+}
+
+// Extend changes the decoder behavior to consider the provided extension.
+func (dec *Decoder) Extend(ext *Extension) { dec.d.ext = *ext }
+
+// Extend changes the encoder behavior to consider the provided extension.
+func (enc *Encoder) Extend(ext *Extension) { enc.ext = *ext }
+
+// Extend includes in e the extensions defined in ext.
+func (e *Extension) Extend(ext *Extension) {
+	for name, fext := range ext.funcs {
+		e.DecodeFunc(name, fext.key, fext.args...)
+	}
+	for name, value := range ext.consts {
+		e.DecodeConst(name, value)
+	}
+	for key, decode := range ext.keyed {
+		e.DecodeKeyed(key, decode)
+	}
+	for typ, encode := range ext.encode {
+		if e.encode == nil {
+			e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+		}
+		e.encode[typ] = encode
+	}
+}
+
+// DecodeFunc defines a function call that may be observed inside JSON content.
+// A function with the provided name will be unmarshaled as the document
+// {key: {args[0]: ..., args[N]: ...}}.
+func (e *Extension) DecodeFunc(name string, key string, args ...string) {
+	if e.funcs == nil {
+		e.funcs = make(map[string]funcExt)
+	}
+	e.funcs[name] = funcExt{key, args}
+}
+
+// DecodeConst defines a constant name that may be observed inside JSON content
+// and will be decoded with the provided value.
+func (e *Extension) DecodeConst(name string, value interface{}) {
+	if e.consts == nil {
+		e.consts = make(map[string]interface{})
+	}
+	e.consts[name] = value
+}
+
+// DecodeKeyed defines a key that when observed as the first element inside a
+// JSON document triggers the decoding of that document via the provided
+// decode function.
+func (e *Extension) DecodeKeyed(key string, decode func(data []byte) (interface{}, error)) {
+	if e.keyed == nil {
+		e.keyed = make(map[string]func([]byte) (interface{}, error))
+	}
+	e.keyed[key] = decode
+}
+
+// DecodeUnquotedKeys defines whether to accept map keys that are unquoted strings.
+func (e *Extension) DecodeUnquotedKeys(accept bool) {
+	e.unquotedKeys = accept
+}
+
+// DecodeTrailingCommas defines whether to accept trailing commas in maps and arrays.
+func (e *Extension) DecodeTrailingCommas(accept bool) {
+	e.trailingCommas = accept
+}
+
+// EncodeType registers a function to encode values with the same type of the
+// provided sample.
+func (e *Extension) EncodeType(sample interface{}, encode func(v interface{}) ([]byte, error)) {
+	if e.encode == nil {
+		e.encode = make(map[reflect.Type]func(v interface{}) ([]byte, error))
+	}
+	e.encode[reflect.TypeOf(sample)] = encode
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/fold.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/fold.go
new file mode 100644
index 0000000..9e17012
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/fold.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"unicode/utf8"
+)
+
+const (
+	caseMask     = ^byte(0x20) // Mask to ignore case in ASCII.
+	kelvin       = '\u212a'
+	smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+//  * S maps to s and to U+017F 'ſ' Latin small letter long s
+//  * k maps to K and to U+212A 'K' Kelvin sign
+// See https://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+	nonLetter := false
+	special := false // special letter
+	for _, b := range s {
+		if b >= utf8.RuneSelf {
+			return bytes.EqualFold
+		}
+		upper := b & caseMask
+		if upper < 'A' || upper > 'Z' {
+			nonLetter = true
+		} else if upper == 'K' || upper == 'S' {
+			// See above for why these letters are special.
+			special = true
+		}
+	}
+	if special {
+		return equalFoldRight
+	}
+	if nonLetter {
+		return asciiEqualFold
+	}
+	return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+	for _, sb := range s {
+		if len(t) == 0 {
+			return false
+		}
+		tb := t[0]
+		if tb < utf8.RuneSelf {
+			if sb != tb {
+				sbUpper := sb & caseMask
+				if 'A' <= sbUpper && sbUpper <= 'Z' {
+					if sbUpper != tb&caseMask {
+						return false
+					}
+				} else {
+					return false
+				}
+			}
+			t = t[1:]
+			continue
+		}
+		// sb is ASCII and t is not. t must be either kelvin
+		// sign or long s; sb must be s, S, k, or K.
+		tr, size := utf8.DecodeRune(t)
+		switch sb {
+		case 's', 'S':
+			if tr != smallLongEss {
+				return false
+			}
+		case 'k', 'K':
+			if tr != kelvin {
+				return false
+			}
+		default:
+			return false
+		}
+		t = t[size:]
+
+	}
+	if len(t) > 0 {
+		return false
+	}
+	return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, sb := range s {
+		tb := t[i]
+		if sb == tb {
+			continue
+		}
+		if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+			if sb&caseMask != tb&caseMask {
+				return false
+			}
+		} else {
+			return false
+		}
+	}
+	return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+	if len(s) != len(t) {
+		return false
+	}
+	for i, b := range s {
+		if b&caseMask != t[i]&caseMask {
+			return false
+		}
+	}
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/indent.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/indent.go
new file mode 100644
index 0000000..fba1954
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/indent.go
@@ -0,0 +1,141 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import "bytes"
+
+// Compact appends to dst the JSON-encoded src with
+// insignificant space characters elided.
+func Compact(dst *bytes.Buffer, src []byte) error {
+	return compact(dst, src, false)
+}
+
+func compact(dst *bytes.Buffer, src []byte, escape bool) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	start := 0
+	for i, c := range src {
+		if escape && (c == '<' || c == '>' || c == '&') {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u00`)
+			dst.WriteByte(hex[c>>4])
+			dst.WriteByte(hex[c&0xF])
+			start = i + 1
+		}
+		// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
+		if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			dst.WriteString(`\u202`)
+			dst.WriteByte(hex[src[i+2]&0xF])
+			start = i + 3
+		}
+		v := scan.step(&scan, c)
+		if v >= scanSkipSpace {
+			if v == scanError {
+				break
+			}
+			if start < i {
+				dst.Write(src[start:i])
+			}
+			start = i + 1
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	if start < len(src) {
+		dst.Write(src[start:])
+	}
+	return nil
+}
+
+func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
+	dst.WriteByte('\n')
+	dst.WriteString(prefix)
+	for i := 0; i < depth; i++ {
+		dst.WriteString(indent)
+	}
+}
+
+// Indent appends to dst an indented form of the JSON-encoded src.
+// Each element in a JSON object or array begins on a new,
+// indented line beginning with prefix followed by one or more
+// copies of indent according to the indentation nesting.
+// The data appended to dst does not begin with the prefix nor
+// any indentation, to make it easier to embed inside other formatted JSON data.
+// Although leading space characters (space, tab, carriage return, newline)
+// at the beginning of src are dropped, trailing space characters
+// at the end of src are preserved and copied to dst.
+// For example, if src has no trailing spaces, neither will dst;
+// if src ends in a trailing newline, so will dst.
+func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
+	origLen := dst.Len()
+	var scan scanner
+	scan.reset()
+	needIndent := false
+	depth := 0
+	for _, c := range src {
+		scan.bytes++
+		v := scan.step(&scan, c)
+		if v == scanSkipSpace {
+			continue
+		}
+		if v == scanError {
+			break
+		}
+		if needIndent && v != scanEndObject && v != scanEndArray {
+			needIndent = false
+			depth++
+			newline(dst, prefix, indent, depth)
+		}
+
+		// Emit semantically uninteresting bytes
+		// (in particular, punctuation in strings) unmodified.
+		if v == scanContinue {
+			dst.WriteByte(c)
+			continue
+		}
+
+		// Add spacing around real punctuation.
+		switch c {
+		case '{', '[':
+			// delay indent so that empty object and array are formatted as {} and [].
+			needIndent = true
+			dst.WriteByte(c)
+
+		case ',':
+			dst.WriteByte(c)
+			newline(dst, prefix, indent, depth)
+
+		case ':':
+			dst.WriteByte(c)
+			dst.WriteByte(' ')
+
+		case '}', ']':
+			if needIndent {
+				// suppress indent in empty object/array
+				needIndent = false
+			} else {
+				depth--
+				newline(dst, prefix, indent, depth)
+			}
+			dst.WriteByte(c)
+
+		default:
+			dst.WriteByte(c)
+		}
+	}
+	if scan.eof() == scanError {
+		dst.Truncate(origLen)
+		return scan.err
+	}
+	return nil
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/scanner.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/scanner.go
new file mode 100644
index 0000000..9708043
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/scanner.go
@@ -0,0 +1,697 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+// JSON value parser state machine.
+// Just about at the limit of what is reasonable to write by hand.
+// Some parts are a bit tedious, but overall it nicely factors out the
+// otherwise common code from the multiple scanning functions
+// in this package (Compact, Indent, checkValid, nextValue, etc).
+//
+// This file starts with two simple examples using the scanner
+// before diving into the scanner itself.
+
+import "strconv"
+
+// checkValid verifies that data is valid JSON-encoded data.
+// scan is passed in for use by checkValid to avoid an allocation.
+func checkValid(data []byte, scan *scanner) error {
+	scan.reset()
+	for _, c := range data {
+		scan.bytes++
+		if scan.step(scan, c) == scanError {
+			return scan.err
+		}
+	}
+	if scan.eof() == scanError {
+		return scan.err
+	}
+	return nil
+}
+
+// nextValue splits data after the next whole JSON value,
+// returning that value and the bytes that follow it as separate slices.
+// scan is passed in for use by nextValue to avoid an allocation.
+func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
+	scan.reset()
+	for i, c := range data {
+		v := scan.step(scan, c)
+		if v >= scanEndObject {
+			switch v {
+			// probe the scanner with a space to determine whether we will
+			// get scanEnd on the next character. Otherwise, if the next character
+			// is not a space, scanEndTop allocates a needless error.
+			case scanEndObject, scanEndArray, scanEndParams:
+				if scan.step(scan, ' ') == scanEnd {
+					return data[:i+1], data[i+1:], nil
+				}
+			case scanError:
+				return nil, nil, scan.err
+			case scanEnd:
+				return data[:i], data[i:], nil
+			}
+		}
+	}
+	if scan.eof() == scanError {
+		return nil, nil, scan.err
+	}
+	return data, nil, nil
+}
+
+// A SyntaxError is a description of a JSON syntax error.
+type SyntaxError struct {
+	msg    string // description of error
+	Offset int64  // error occurred after reading Offset bytes
+}
+
+func (e *SyntaxError) Error() string { return e.msg }
+
+// A scanner is a JSON scanning state machine.
+// Callers call scan.reset() and then pass bytes in one at a time
+// by calling scan.step(&scan, c) for each byte.
+// The return value, referred to as an opcode, tells the
+// caller about significant parsing events like beginning
+// and ending literals, objects, and arrays, so that the
+// caller can follow along if it wishes.
+// The return value scanEnd indicates that a single top-level
+// JSON value has been completed, *before* the byte that
+// just got passed in.  (The indication must be delayed in order
+// to recognize the end of numbers: is 123 a whole value or
+// the beginning of 12345e+6?).
+type scanner struct {
+	// The step is a func to be called to execute the next transition.
+	// Also tried using an integer constant and a single func
+	// with a switch, but using the func directly was 10% faster
+	// on a 64-bit Mac Mini, and it's nicer to read.
+	step func(*scanner, byte) int
+
+	// Reached end of top-level value.
+	endTop bool
+
+	// Stack of what we're in the middle of - array values, object keys, object values.
+	parseState []int
+
+	// Error that happened, if any.
+	err error
+
+	// 1-byte redo (see undo method)
+	redo      bool
+	redoCode  int
+	redoState func(*scanner, byte) int
+
+	// total bytes consumed, updated by decoder.Decode
+	bytes int64
+}
+
+// These values are returned by the state transition functions
+// assigned to scanner.state and the method scanner.eof.
+// They give details about the current state of the scan that
+// callers might be interested to know about.
+// It is okay to ignore the return value of any particular
+// call to scanner.state: if one call returns scanError,
+// every subsequent call will return scanError too.
+const (
+	// Continue.
+	scanContinue     = iota // uninteresting byte
+	scanBeginLiteral        // end implied by next result != scanContinue
+	scanBeginObject         // begin object
+	scanObjectKey           // just finished object key (string)
+	scanObjectValue         // just finished non-last object value
+	scanEndObject           // end object (implies scanObjectValue if possible)
+	scanBeginArray          // begin array
+	scanArrayValue          // just finished array value
+	scanEndArray            // end array (implies scanArrayValue if possible)
+	scanBeginName           // begin function call
+	scanParam               // begin function argument
+	scanEndParams           // end function call
+	scanSkipSpace           // space byte; can skip; known to be last "continue" result
+
+	// Stop.
+	scanEnd   // top-level value ended *before* this byte; known to be first "stop" result
+	scanError // hit an error, scanner.err.
+)
+
+// These values are stored in the parseState stack.
+// They give the current state of a composite value
+// being scanned. If the parser is inside a nested value
+// the parseState describes the nested state, outermost at entry 0.
+const (
+	parseObjectKey   = iota // parsing object key (before colon)
+	parseObjectValue        // parsing object value (after colon)
+	parseArrayValue         // parsing array value
+	parseName               // parsing unquoted name
+	parseParam              // parsing function argument value
+)
+
+// reset prepares the scanner for use.
+// It must be called before calling s.step.
+func (s *scanner) reset() {
+	s.step = stateBeginValue
+	s.parseState = s.parseState[0:0]
+	s.err = nil
+	s.redo = false
+	s.endTop = false
+}
+
+// eof tells the scanner that the end of input has been reached.
+// It returns a scan status just as s.step does.
+func (s *scanner) eof() int {
+	if s.err != nil {
+		return scanError
+	}
+	if s.endTop {
+		return scanEnd
+	}
+	s.step(s, ' ')
+	if s.endTop {
+		return scanEnd
+	}
+	if s.err == nil {
+		s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
+	}
+	return scanError
+}
+
+// pushParseState pushes a new parse state p onto the parse stack.
+func (s *scanner) pushParseState(p int) {
+	s.parseState = append(s.parseState, p)
+}
+
+// popParseState pops a parse state (already obtained) off the stack
+// and updates s.step accordingly.
+func (s *scanner) popParseState() {
+	n := len(s.parseState) - 1
+	s.parseState = s.parseState[0:n]
+	s.redo = false
+	if n == 0 {
+		s.step = stateEndTop
+		s.endTop = true
+	} else {
+		s.step = stateEndValue
+	}
+}
+
+func isSpace(c byte) bool {
+	return c == ' ' || c == '\t' || c == '\r' || c == '\n'
+}
+
+// stateBeginValueOrEmpty is the state after reading `[`.
+func stateBeginValueOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ']' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateBeginValue is the state at the beginning of the input.
+func stateBeginValue(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	switch c {
+	case '{':
+		s.step = stateBeginStringOrEmpty
+		s.pushParseState(parseObjectKey)
+		return scanBeginObject
+	case '[':
+		s.step = stateBeginValueOrEmpty
+		s.pushParseState(parseArrayValue)
+		return scanBeginArray
+	case '"':
+		s.step = stateInString
+		return scanBeginLiteral
+	case '-':
+		s.step = stateNeg
+		return scanBeginLiteral
+	case '0': // beginning of 0.123
+		s.step = state0
+		return scanBeginLiteral
+	case 'n':
+		s.step = stateNew0
+		return scanBeginName
+	}
+	if '1' <= c && c <= '9' { // beginning of 1234.5
+		s.step = state1
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of value")
+}
+
+func isName(c byte) bool {
+	return c == '$' || c == '_' || 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9'
+}
+
+// stateBeginStringOrEmpty is the state after reading `{`.
+func stateBeginStringOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '}' {
+		n := len(s.parseState)
+		s.parseState[n-1] = parseObjectValue
+		return stateEndValue(s, c)
+	}
+	return stateBeginString(s, c)
+}
+
+// stateBeginString is the state after reading `{"key": value,`.
+func stateBeginString(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == '"' {
+		s.step = stateInString
+		return scanBeginLiteral
+	}
+	if isName(c) {
+		s.step = stateName
+		return scanBeginName
+	}
+	return s.error(c, "looking for beginning of object key string")
+}
+
+// stateEndValue is the state after completing a value,
+// such as after reading `{}` or `true` or `["x"`.
+func stateEndValue(s *scanner, c byte) int {
+	n := len(s.parseState)
+	if n == 0 {
+		// Completed top-level before the current byte.
+		s.step = stateEndTop
+		s.endTop = true
+		return stateEndTop(s, c)
+	}
+	if c <= ' ' && isSpace(c) {
+		s.step = stateEndValue
+		return scanSkipSpace
+	}
+	ps := s.parseState[n-1]
+	switch ps {
+	case parseObjectKey:
+		if c == ':' {
+			s.parseState[n-1] = parseObjectValue
+			s.step = stateBeginValue
+			return scanObjectKey
+		}
+		return s.error(c, "after object key")
+	case parseObjectValue:
+		if c == ',' {
+			s.parseState[n-1] = parseObjectKey
+			s.step = stateBeginStringOrEmpty
+			return scanObjectValue
+		}
+		if c == '}' {
+			s.popParseState()
+			return scanEndObject
+		}
+		return s.error(c, "after object key:value pair")
+	case parseArrayValue:
+		if c == ',' {
+			s.step = stateBeginValueOrEmpty
+			return scanArrayValue
+		}
+		if c == ']' {
+			s.popParseState()
+			return scanEndArray
+		}
+		return s.error(c, "after array element")
+	case parseParam:
+		if c == ',' {
+			s.step = stateBeginValue
+			return scanParam
+		}
+		if c == ')' {
+			s.popParseState()
+			return scanEndParams
+		}
+		return s.error(c, "after array element")
+	}
+	return s.error(c, "")
+}
+
+// stateEndTop is the state after finishing the top-level value,
+// such as after reading `{}` or `[1,2,3]`.
+// Only space characters should be seen now.
+func stateEndTop(s *scanner, c byte) int {
+	if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+		// Complain about non-space byte on next call.
+		s.error(c, "after top-level value")
+	}
+	return scanEnd
+}
+
+// stateInString is the state after reading `"`.
+func stateInString(s *scanner, c byte) int {
+	if c == '"' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	if c == '\\' {
+		s.step = stateInStringEsc
+		return scanContinue
+	}
+	if c < 0x20 {
+		return s.error(c, "in string literal")
+	}
+	return scanContinue
+}
+
+// stateInStringEsc is the state after reading `"\` during a quoted string.
+func stateInStringEsc(s *scanner, c byte) int {
+	switch c {
+	case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
+		s.step = stateInString
+		return scanContinue
+	case 'u':
+		s.step = stateInStringEscU
+		return scanContinue
+	}
+	return s.error(c, "in string escape code")
+}
+
+// stateInStringEscU is the state after reading `"\u` during a quoted string.
+func stateInStringEscU(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU1
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
+func stateInStringEscU1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU12
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
+func stateInStringEscU12(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInStringEscU123
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
+func stateInStringEscU123(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
+		s.step = stateInString
+		return scanContinue
+	}
+	// numbers
+	return s.error(c, "in \\u hexadecimal character escape")
+}
+
+// stateNeg is the state after reading `-` during a number.
+func stateNeg(s *scanner, c byte) int {
+	if c == '0' {
+		s.step = state0
+		return scanContinue
+	}
+	if '1' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return s.error(c, "in numeric literal")
+}
+
+// state1 is the state after reading a non-zero integer during a number,
+// such as after reading `1` or `100` but not `0`.
+func state1(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = state1
+		return scanContinue
+	}
+	return state0(s, c)
+}
+
+// state0 is the state after reading `0` during a number.
+func state0(s *scanner, c byte) int {
+	if c == '.' {
+		s.step = stateDot
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateDot is the state after reading the integer and decimal point in a number,
+// such as after reading `1.`.
+func stateDot(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateDot0
+		return scanContinue
+	}
+	return s.error(c, "after decimal point in numeric literal")
+}
+
+// stateDot0 is the state after reading the integer, decimal point, and subsequent
+// digits of a number, such as after reading `3.14`.
+func stateDot0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	if c == 'e' || c == 'E' {
+		s.step = stateE
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateE is the state after reading the mantissa and e in a number,
+// such as after reading `314e` or `0.314e`.
+func stateE(s *scanner, c byte) int {
+	if c == '+' || c == '-' {
+		s.step = stateESign
+		return scanContinue
+	}
+	return stateESign(s, c)
+}
+
+// stateESign is the state after reading the mantissa, e, and sign in a number,
+// such as after reading `314e-` or `0.314e+`.
+func stateESign(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		s.step = stateE0
+		return scanContinue
+	}
+	return s.error(c, "in exponent of numeric literal")
+}
+
+// stateE0 is the state after reading the mantissa, e, optional sign,
+// and at least one digit of the exponent in a number,
+// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
+func stateE0(s *scanner, c byte) int {
+	if '0' <= c && c <= '9' {
+		return scanContinue
+	}
+	return stateEndValue(s, c)
+}
+
+// stateNew0 is the state after reading `n`.
+func stateNew0(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateNew1
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew1 is the state after reading `ne`.
+func stateNew1(s *scanner, c byte) int {
+	if c == 'w' {
+		s.step = stateNew2
+		return scanContinue
+	}
+	s.step = stateName
+	return stateName(s, c)
+}
+
+// stateNew2 is the state after reading `new`.
+func stateNew2(s *scanner, c byte) int {
+	s.step = stateName
+	if c == ' ' {
+		return scanContinue
+	}
+	return stateName(s, c)
+}
+
+// stateName is the state while reading an unquoted function name.
+func stateName(s *scanner, c byte) int {
+	if isName(c) {
+		return scanContinue
+	}
+	if c == '(' {
+		s.step = stateParamOrEmpty
+		s.pushParseState(parseParam)
+		return scanParam
+	}
+	return stateEndValue(s, c)
+}
+
+// stateParamOrEmpty is the state after reading `(`.
+func stateParamOrEmpty(s *scanner, c byte) int {
+	if c <= ' ' && isSpace(c) {
+		return scanSkipSpace
+	}
+	if c == ')' {
+		return stateEndValue(s, c)
+	}
+	return stateBeginValue(s, c)
+}
+
+// stateT is the state after reading `t`.
+func stateT(s *scanner, c byte) int {
+	if c == 'r' {
+		s.step = stateTr
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'r')")
+}
+
+// stateTr is the state after reading `tr`.
+func stateTr(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateTru
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'u')")
+}
+
+// stateTru is the state after reading `tru`.
+func stateTru(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal true (expecting 'e')")
+}
+
+// stateF is the state after reading `f`.
+func stateF(s *scanner, c byte) int {
+	if c == 'a' {
+		s.step = stateFa
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'a')")
+}
+
+// stateFa is the state after reading `fa`.
+func stateFa(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateFal
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'l')")
+}
+
+// stateFal is the state after reading `fal`.
+func stateFal(s *scanner, c byte) int {
+	if c == 's' {
+		s.step = stateFals
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 's')")
+}
+
+// stateFals is the state after reading `fals`.
+func stateFals(s *scanner, c byte) int {
+	if c == 'e' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal false (expecting 'e')")
+}
+
+// stateN is the state after reading `n`.
+func stateN(s *scanner, c byte) int {
+	if c == 'u' {
+		s.step = stateNu
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'u')")
+}
+
+// stateNu is the state after reading `nu`.
+func stateNu(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateNul
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateNul is the state after reading `nul`.
+func stateNul(s *scanner, c byte) int {
+	if c == 'l' {
+		s.step = stateEndValue
+		return scanContinue
+	}
+	return s.error(c, "in literal null (expecting 'l')")
+}
+
+// stateError is the state after reaching a syntax error,
+// such as after reading `[1}` or `5.1.2`.
+func stateError(s *scanner, c byte) int {
+	return scanError
+}
+
+// error records an error and switches to the error state.
+func (s *scanner) error(c byte, context string) int {
+	s.step = stateError
+	s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
+	return scanError
+}
+
+// quoteChar formats c as a quoted character literal
+func quoteChar(c byte) string {
+	// special cases - different from quoted strings
+	if c == '\'' {
+		return `'\''`
+	}
+	if c == '"' {
+		return `'"'`
+	}
+
+	// use quoted string with different quotation marks
+	s := strconv.Quote(string(c))
+	return "'" + s[1:len(s)-1] + "'"
+}
+
+// undo causes the scanner to return scanCode from the next state transition.
+// This gives callers a simple 1-byte undo mechanism.
+func (s *scanner) undo(scanCode int) {
+	if s.redo {
+		panic("json: invalid use of scanner")
+	}
+	s.redoCode = scanCode
+	s.redoState = s.step
+	s.step = stateRedo
+	s.redo = true
+}
+
+// stateRedo helps implement the scanner's 1-byte undo.
+func stateRedo(s *scanner, c byte) int {
+	s.redo = false
+	s.step = s.redoState
+	return s.redoCode
+}
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/stream.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/stream.go
new file mode 100644
index 0000000..e023702
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/stream.go
@@ -0,0 +1,510 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"bytes"
+	"errors"
+	"io"
+)
+
+// A Decoder reads and decodes JSON values from an input stream.
+type Decoder struct {
+	r     io.Reader
+	buf   []byte
+	d     decodeState
+	scanp int // start of unread data in buf
+	scan  scanner
+	err   error
+
+	tokenState int
+	tokenStack []int
+}
+
+// NewDecoder returns a new decoder that reads from r.
+//
+// The decoder introduces its own buffering and may
+// read data from r beyond the JSON values requested.
+func NewDecoder(r io.Reader) *Decoder {
+	return &Decoder{r: r}
+}
+
+// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
+// Number instead of as a float64.
+func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
+
+// Decode reads the next JSON-encoded value from its
+// input and stores it in the value pointed to by v.
+//
+// See the documentation for Unmarshal for details about
+// the conversion of JSON into a Go value.
+func (dec *Decoder) Decode(v interface{}) error {
+	if dec.err != nil {
+		return dec.err
+	}
+
+	if err := dec.tokenPrepareForDecode(); err != nil {
+		return err
+	}
+
+	if !dec.tokenValueAllowed() {
+		return &SyntaxError{msg: "not at beginning of value"}
+	}
+
+	// Read whole value into buffer.
+	n, err := dec.readValue()
+	if err != nil {
+		return err
+	}
+	dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
+	dec.scanp += n
+
+	// Don't save err from unmarshal into dec.err:
+	// the connection is still usable since we read a complete JSON
+	// object from it before the error happened.
+	err = dec.d.unmarshal(v)
+
+	// fixup token streaming state
+	dec.tokenValueEnd()
+
+	return err
+}
+
+// Buffered returns a reader of the data remaining in the Decoder's
+// buffer. The reader is valid until the next call to Decode.
+func (dec *Decoder) Buffered() io.Reader {
+	return bytes.NewReader(dec.buf[dec.scanp:])
+}
+
+// readValue reads a JSON value into dec.buf.
+// It returns the length of the encoding.
+func (dec *Decoder) readValue() (int, error) {
+	dec.scan.reset()
+
+	scanp := dec.scanp
+	var err error
+Input:
+	for {
+		// Look in the buffer for a new value.
+		for i, c := range dec.buf[scanp:] {
+			dec.scan.bytes++
+			v := dec.scan.step(&dec.scan, c)
+			if v == scanEnd {
+				scanp += i
+				break Input
+			}
+			// scanEnd is delayed one byte.
+			// We might block trying to get that byte from src,
+			// so instead invent a space byte.
+			if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
+				scanp += i + 1
+				break Input
+			}
+			if v == scanError {
+				dec.err = dec.scan.err
+				return 0, dec.scan.err
+			}
+		}
+		scanp = len(dec.buf)
+
+		// Did the last read have an error?
+		// Delayed until now to allow buffer scan.
+		if err != nil {
+			if err == io.EOF {
+				if dec.scan.step(&dec.scan, ' ') == scanEnd {
+					break Input
+				}
+				if nonSpace(dec.buf) {
+					err = io.ErrUnexpectedEOF
+				}
+			}
+			dec.err = err
+			return 0, err
+		}
+
+		n := scanp - dec.scanp
+		err = dec.refill()
+		scanp = dec.scanp + n
+	}
+	return scanp - dec.scanp, nil
+}
+
+func (dec *Decoder) refill() error {
+	// Make room to read more into the buffer.
+	// First slide down data already consumed.
+	if dec.scanp > 0 {
+		n := copy(dec.buf, dec.buf[dec.scanp:])
+		dec.buf = dec.buf[:n]
+		dec.scanp = 0
+	}
+
+	// Grow buffer if not large enough.
+	const minRead = 512
+	if cap(dec.buf)-len(dec.buf) < minRead {
+		newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
+		copy(newBuf, dec.buf)
+		dec.buf = newBuf
+	}
+
+	// Read. Delay error for next iteration (after scan).
+	n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
+	dec.buf = dec.buf[0 : len(dec.buf)+n]
+
+	return err
+}
+
+func nonSpace(b []byte) bool {
+	for _, c := range b {
+		if !isSpace(c) {
+			return true
+		}
+	}
+	return false
+}
+
+// An Encoder writes JSON values to an output stream.
+type Encoder struct {
+	w          io.Writer
+	err        error
+	escapeHTML bool
+
+	indentBuf    *bytes.Buffer
+	indentPrefix string
+	indentValue  string
+
+	ext Extension
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+	return &Encoder{w: w, escapeHTML: true}
+}
+
+// Encode writes the JSON encoding of v to the stream,
+// followed by a newline character.
+//
+// See the documentation for Marshal for details about the
+// conversion of Go values to JSON.
+func (enc *Encoder) Encode(v interface{}) error {
+	if enc.err != nil {
+		return enc.err
+	}
+	e := newEncodeState()
+	e.ext = enc.ext
+	err := e.marshal(v, encOpts{escapeHTML: enc.escapeHTML})
+	if err != nil {
+		return err
+	}
+
+	// Terminate each value with a newline.
+	// This makes the output look a little nicer
+	// when debugging, and some kind of space
+	// is required if the encoded value was a number,
+	// so that the reader knows there aren't more
+	// digits coming.
+	e.WriteByte('\n')
+
+	b := e.Bytes()
+	if enc.indentBuf != nil {
+		enc.indentBuf.Reset()
+		err = Indent(enc.indentBuf, b, enc.indentPrefix, enc.indentValue)
+		if err != nil {
+			return err
+		}
+		b = enc.indentBuf.Bytes()
+	}
+	if _, err = enc.w.Write(b); err != nil {
+		enc.err = err
+	}
+	encodeStatePool.Put(e)
+	return err
+}
+
+// Indent sets the encoder to format each encoded value with Indent.
+func (enc *Encoder) Indent(prefix, indent string) {
+	enc.indentBuf = new(bytes.Buffer)
+	enc.indentPrefix = prefix
+	enc.indentValue = indent
+}
+
+// DisableHTMLEscaping causes the encoder not to escape angle brackets
+// ("<" and ">") or ampersands ("&") in JSON strings.
+func (enc *Encoder) DisableHTMLEscaping() {
+	enc.escapeHTML = false
+}
+
+// RawMessage is a raw encoded JSON value.
+// It implements Marshaler and Unmarshaler and can
+// be used to delay JSON decoding or precompute a JSON encoding.
+type RawMessage []byte
+
+// MarshalJSON returns *m as the JSON encoding of m.
+func (m *RawMessage) MarshalJSON() ([]byte, error) {
+	return *m, nil
+}
+
+// UnmarshalJSON sets *m to a copy of data.
+func (m *RawMessage) UnmarshalJSON(data []byte) error {
+	if m == nil {
+		return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
+	}
+	*m = append((*m)[0:0], data...)
+	return nil
+}
+
+var _ Marshaler = (*RawMessage)(nil)
+var _ Unmarshaler = (*RawMessage)(nil)
+
+// A Token holds a value of one of these types:
+//
+//	Delim, for the four JSON delimiters [ ] { }
+//	bool, for JSON booleans
+//	float64, for JSON numbers
+//	Number, for JSON numbers
+//	string, for JSON string literals
+//	nil, for JSON null
+//
+type Token interface{}
+
+const (
+	tokenTopValue = iota
+	tokenArrayStart
+	tokenArrayValue
+	tokenArrayComma
+	tokenObjectStart
+	tokenObjectKey
+	tokenObjectColon
+	tokenObjectValue
+	tokenObjectComma
+)
+
+// advance tokenstate from a separator state to a value state
+func (dec *Decoder) tokenPrepareForDecode() error {
+	// Note: Not calling peek before switch, to avoid
+	// putting peek into the standard Decode path.
+	// peek is only called when using the Token API.
+	switch dec.tokenState {
+	case tokenArrayComma:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ',' {
+			return &SyntaxError{"expected comma after array element", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenArrayValue
+	case tokenObjectColon:
+		c, err := dec.peek()
+		if err != nil {
+			return err
+		}
+		if c != ':' {
+			return &SyntaxError{"expected colon after object key", 0}
+		}
+		dec.scanp++
+		dec.tokenState = tokenObjectValue
+	}
+	return nil
+}
+
+func (dec *Decoder) tokenValueAllowed() bool {
+	switch dec.tokenState {
+	case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		return true
+	}
+	return false
+}
+
+func (dec *Decoder) tokenValueEnd() {
+	switch dec.tokenState {
+	case tokenArrayStart, tokenArrayValue:
+		dec.tokenState = tokenArrayComma
+	case tokenObjectValue:
+		dec.tokenState = tokenObjectComma
+	}
+}
+
+// A Delim is a JSON array or object delimiter, one of [ ] { or }.
+type Delim rune
+
+func (d Delim) String() string {
+	return string(d)
+}
+
+// Token returns the next JSON token in the input stream.
+// At the end of the input stream, Token returns nil, io.EOF.
+//
+// Token guarantees that the delimiters [ ] { } it returns are
+// properly nested and matched: if Token encounters an unexpected
+// delimiter in the input, it will return an error.
+//
+// The input stream consists of basic JSON values—bool, string,
+// number, and null—along with delimiters [ ] { } of type Delim
+// to mark the start and end of arrays and objects.
+// Commas and colons are elided.
+func (dec *Decoder) Token() (Token, error) {
+	for {
+		c, err := dec.peek()
+		if err != nil {
+			return nil, err
+		}
+		switch c {
+		case '[':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenArrayStart
+			return Delim('['), nil
+
+		case ']':
+			if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim(']'), nil
+
+		case '{':
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenStack = append(dec.tokenStack, dec.tokenState)
+			dec.tokenState = tokenObjectStart
+			return Delim('{'), nil
+
+		case '}':
+			if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
+			dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
+			dec.tokenValueEnd()
+			return Delim('}'), nil
+
+		case ':':
+			if dec.tokenState != tokenObjectColon {
+				return dec.tokenError(c)
+			}
+			dec.scanp++
+			dec.tokenState = tokenObjectValue
+			continue
+
+		case ',':
+			if dec.tokenState == tokenArrayComma {
+				dec.scanp++
+				dec.tokenState = tokenArrayValue
+				continue
+			}
+			if dec.tokenState == tokenObjectComma {
+				dec.scanp++
+				dec.tokenState = tokenObjectKey
+				continue
+			}
+			return dec.tokenError(c)
+
+		case '"':
+			if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
+				var x string
+				old := dec.tokenState
+				dec.tokenState = tokenTopValue
+				err := dec.Decode(&x)
+				dec.tokenState = old
+				if err != nil {
+					clearOffset(err)
+					return nil, err
+				}
+				dec.tokenState = tokenObjectColon
+				return x, nil
+			}
+			fallthrough
+
+		default:
+			if !dec.tokenValueAllowed() {
+				return dec.tokenError(c)
+			}
+			var x interface{}
+			if err := dec.Decode(&x); err != nil {
+				clearOffset(err)
+				return nil, err
+			}
+			return x, nil
+		}
+	}
+}
+
+func clearOffset(err error) {
+	if s, ok := err.(*SyntaxError); ok {
+		s.Offset = 0
+	}
+}
+
+func (dec *Decoder) tokenError(c byte) (Token, error) {
+	var context string
+	switch dec.tokenState {
+	case tokenTopValue:
+		context = " looking for beginning of value"
+	case tokenArrayStart, tokenArrayValue, tokenObjectValue:
+		context = " looking for beginning of value"
+	case tokenArrayComma:
+		context = " after array element"
+	case tokenObjectKey:
+		context = " looking for beginning of object key string"
+	case tokenObjectColon:
+		context = " after object key"
+	case tokenObjectComma:
+		context = " after object key:value pair"
+	}
+	return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
+}
+
+// More reports whether there is another element in the
+// current array or object being parsed.
+func (dec *Decoder) More() bool {
+	c, err := dec.peek()
+	return err == nil && c != ']' && c != '}'
+}
+
+func (dec *Decoder) peek() (byte, error) {
+	var err error
+	for {
+		for i := dec.scanp; i < len(dec.buf); i++ {
+			c := dec.buf[i]
+			if isSpace(c) {
+				continue
+			}
+			dec.scanp = i
+			return c, nil
+		}
+		// buffer has been scanned, now report any error
+		if err != nil {
+			return 0, err
+		}
+		err = dec.refill()
+	}
+}
+
+/*
+TODO
+
+// EncodeToken writes the given JSON token to the stream.
+// It returns an error if the delimiters [ ] { } are not properly used.
+//
+// EncodeToken does not call Flush, because usually it is part of
+// a larger operation such as Encode, and those will call Flush when finished.
+// Callers that create an Encoder and then invoke EncodeToken directly,
+// without using Encode, need to call Flush when finished to ensure that
+// the JSON is written to the underlying writer.
+func (e *Encoder) EncodeToken(t Token) error  {
+	...
+}
+
+*/
diff --git a/switchq/vendor/gopkg.in/mgo.v2/internal/json/tags.go b/switchq/vendor/gopkg.in/mgo.v2/internal/json/tags.go
new file mode 100644
index 0000000..c38fd51
--- /dev/null
+++ b/switchq/vendor/gopkg.in/mgo.v2/internal/json/tags.go
@@ -0,0 +1,44 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package json
+
+import (
+	"strings"
+)
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+	if idx := strings.Index(tag, ","); idx != -1 {
+		return tag[:idx], tagOptions(tag[idx+1:])
+	}
+	return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+	if len(o) == 0 {
+		return false
+	}
+	s := string(o)
+	for s != "" {
+		var next string
+		i := strings.Index(s, ",")
+		if i >= 0 {
+			s, next = s[:i], s[i+1:]
+		}
+		if s == optionName {
+			return true
+		}
+		s = next
+	}
+	return false
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/LICENSE b/switchq/vendor/gopkg.in/yaml.v2/LICENSE
new file mode 100644
index 0000000..866d74a
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2011-2016 Canonical Ltd.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/switchq/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/switchq/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
new file mode 100644
index 0000000..8da58fb
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/LICENSE.libyaml
@@ -0,0 +1,31 @@
+The following files were ported to Go from C files of libyaml, and thus
+are still covered by their original copyright and license:
+
+    apic.go
+    emitterc.go
+    parserc.go
+    readerc.go
+    scannerc.go
+    writerc.go
+    yamlh.go
+    yamlprivateh.go
+
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/switchq/vendor/gopkg.in/yaml.v2/README.md b/switchq/vendor/gopkg.in/yaml.v2/README.md
new file mode 100644
index 0000000..1884de6
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/README.md
@@ -0,0 +1,131 @@
+# YAML support for the Go language
+
+Introduction
+------------
+
+The yaml package enables Go programs to comfortably encode and decode YAML
+values. It was developed within [Canonical](https://www.canonical.com) as
+part of the [juju](https://juju.ubuntu.com) project, and is based on a
+pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
+C library to parse and generate YAML data quickly and reliably.
+
+Compatibility
+-------------
+
+The yaml package supports most of YAML 1.1 and 1.2, including support for
+anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
+implemented, and base-60 floats from YAML 1.1 are purposefully not
+supported since they're a poor design and are gone in YAML 1.2.
+
+Installation and usage
+----------------------
+
+The import path for the package is *gopkg.in/yaml.v2*.
+
+To install it, run:
+
+    go get gopkg.in/yaml.v2
+
+API documentation
+-----------------
+
+If opened in a browser, the import path itself leads to the API documentation:
+
+  * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
+
+API stability
+-------------
+
+The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
+
+
+License
+-------
+
+The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details.
+
+
+Example
+-------
+
+```Go
+package main
+
+import (
+        "fmt"
+        "log"
+
+        "gopkg.in/yaml.v2"
+)
+
+var data = `
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+`
+
+type T struct {
+        A string
+        B struct {
+                RenamedC int   `yaml:"c"`
+                D        []int `yaml:",flow"`
+        }
+}
+
+func main() {
+        t := T{}
+    
+        err := yaml.Unmarshal([]byte(data), &t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t:\n%v\n\n", t)
+    
+        d, err := yaml.Marshal(&t)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- t dump:\n%s\n\n", string(d))
+    
+        m := make(map[interface{}]interface{})
+    
+        err = yaml.Unmarshal([]byte(data), &m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m:\n%v\n\n", m)
+    
+        d, err = yaml.Marshal(&m)
+        if err != nil {
+                log.Fatalf("error: %v", err)
+        }
+        fmt.Printf("--- m dump:\n%s\n\n", string(d))
+}
+```
+
+This example will generate the following output:
+
+```
+--- t:
+{Easy! {2 [3 4]}}
+
+--- t dump:
+a: Easy!
+b:
+  c: 2
+  d: [3, 4]
+
+
+--- m:
+map[a:Easy! b:map[c:2 d:[3 4]]]
+
+--- m dump:
+a: Easy!
+b:
+  c: 2
+  d:
+  - 3
+  - 4
+```
+
diff --git a/switchq/vendor/gopkg.in/yaml.v2/apic.go b/switchq/vendor/gopkg.in/yaml.v2/apic.go
new file mode 100644
index 0000000..95ec014
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/apic.go
@@ -0,0 +1,742 @@
+package yaml
+
+import (
+	"io"
+	"os"
+)
+
+func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+	//fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
+
+	// Check if we can move the queue at the beginning of the buffer.
+	if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+		if parser.tokens_head != len(parser.tokens) {
+			copy(parser.tokens, parser.tokens[parser.tokens_head:])
+		}
+		parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+		parser.tokens_head = 0
+	}
+	parser.tokens = append(parser.tokens, *token)
+	if pos < 0 {
+		return
+	}
+	copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+	parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// Create a new parser object.
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+	*parser = yaml_parser_t{
+		raw_buffer: make([]byte, 0, input_raw_buffer_size),
+		buffer:     make([]byte, 0, input_buffer_size),
+	}
+	return true
+}
+
+// Destroy a parser object.
+func yaml_parser_delete(parser *yaml_parser_t) {
+	*parser = yaml_parser_t{}
+}
+
+// String read handler.
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	if parser.input_pos == len(parser.input) {
+		return 0, io.EOF
+	}
+	n = copy(buffer, parser.input[parser.input_pos:])
+	parser.input_pos += n
+	return n, nil
+}
+
+// File read handler.
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
+	return parser.input_file.Read(buffer)
+}
+
+// Set a string input.
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_string_read_handler
+	parser.input = input
+	parser.input_pos = 0
+}
+
+// Set a file input.
+func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
+	if parser.read_handler != nil {
+		panic("must set the input source only once")
+	}
+	parser.read_handler = yaml_file_read_handler
+	parser.input_file = file
+}
+
+// Set the source encoding.
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+	if parser.encoding != yaml_ANY_ENCODING {
+		panic("must set the encoding only once")
+	}
+	parser.encoding = encoding
+}
+
+// Create a new emitter object.
+func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
+	*emitter = yaml_emitter_t{
+		buffer:     make([]byte, output_buffer_size),
+		raw_buffer: make([]byte, 0, output_raw_buffer_size),
+		states:     make([]yaml_emitter_state_t, 0, initial_stack_size),
+		events:     make([]yaml_event_t, 0, initial_queue_size),
+	}
+	return true
+}
+
+// Destroy an emitter object.
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+	*emitter = yaml_emitter_t{}
+}
+
+// String write handler.
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+	return nil
+}
+
+// File write handler.
+func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+	_, err := emitter.output_file.Write(buffer)
+	return err
+}
+
+// Set a string output.
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_string_write_handler
+	emitter.output_buffer = output_buffer
+}
+
+// Set a file output.
+func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
+	if emitter.write_handler != nil {
+		panic("must set the output target only once")
+	}
+	emitter.write_handler = yaml_file_write_handler
+	emitter.output_file = file
+}
+
+// Set the output encoding.
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+	if emitter.encoding != yaml_ANY_ENCODING {
+		panic("must set the output encoding only once")
+	}
+	emitter.encoding = encoding
+}
+
+// Set the canonical output style.
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+	emitter.canonical = canonical
+}
+
+//// Set the indentation increment.
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+	if indent < 2 || indent > 9 {
+		indent = 2
+	}
+	emitter.best_indent = indent
+}
+
+// Set the preferred line width.
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+	if width < 0 {
+		width = -1
+	}
+	emitter.best_width = width
+}
+
+// Set if unescaped non-ASCII characters are allowed.
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+	emitter.unicode = unicode
+}
+
+// Set the preferred line break character.
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+	emitter.line_break = line_break
+}
+
+///*
+// * Destroy a token object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_token_delete(yaml_token_t *token)
+//{
+//    assert(token);  // Non-NULL token object expected.
+//
+//    switch (token.type)
+//    {
+//        case YAML_TAG_DIRECTIVE_TOKEN:
+//            yaml_free(token.data.tag_directive.handle);
+//            yaml_free(token.data.tag_directive.prefix);
+//            break;
+//
+//        case YAML_ALIAS_TOKEN:
+//            yaml_free(token.data.alias.value);
+//            break;
+//
+//        case YAML_ANCHOR_TOKEN:
+//            yaml_free(token.data.anchor.value);
+//            break;
+//
+//        case YAML_TAG_TOKEN:
+//            yaml_free(token.data.tag.handle);
+//            yaml_free(token.data.tag.suffix);
+//            break;
+//
+//        case YAML_SCALAR_TOKEN:
+//            yaml_free(token.data.scalar.value);
+//            break;
+//
+//        default:
+//            break;
+//    }
+//
+//    memset(token, 0, sizeof(yaml_token_t));
+//}
+//
+///*
+// * Check if a string is a valid UTF-8 sequence.
+// *
+// * Check 'reader.c' for more details on UTF-8 encoding.
+// */
+//
+//static int
+//yaml_check_utf8(yaml_char_t *start, size_t length)
+//{
+//    yaml_char_t *end = start+length;
+//    yaml_char_t *pointer = start;
+//
+//    while (pointer < end) {
+//        unsigned char octet;
+//        unsigned int width;
+//        unsigned int value;
+//        size_t k;
+//
+//        octet = pointer[0];
+//        width = (octet & 0x80) == 0x00 ? 1 :
+//                (octet & 0xE0) == 0xC0 ? 2 :
+//                (octet & 0xF0) == 0xE0 ? 3 :
+//                (octet & 0xF8) == 0xF0 ? 4 : 0;
+//        value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+//                (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+//                (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+//                (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+//        if (!width) return 0;
+//        if (pointer+width > end) return 0;
+//        for (k = 1; k < width; k ++) {
+//            octet = pointer[k];
+//            if ((octet & 0xC0) != 0x80) return 0;
+//            value = (value << 6) + (octet & 0x3F);
+//        }
+//        if (!((width == 1) ||
+//            (width == 2 && value >= 0x80) ||
+//            (width == 3 && value >= 0x800) ||
+//            (width == 4 && value >= 0x10000))) return 0;
+//
+//        pointer += width;
+//    }
+//
+//    return 1;
+//}
+//
+
+// Create STREAM-START.
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_STREAM_START_EVENT,
+		encoding: encoding,
+	}
+	return true
+}
+
+// Create STREAM-END.
+func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_STREAM_END_EVENT,
+	}
+	return true
+}
+
+// Create DOCUMENT-START.
+func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
+	tag_directives []yaml_tag_directive_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:               yaml_DOCUMENT_START_EVENT,
+		version_directive: version_directive,
+		tag_directives:    tag_directives,
+		implicit:          implicit,
+	}
+	return true
+}
+
+// Create DOCUMENT-END.
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
+	*event = yaml_event_t{
+		typ:      yaml_DOCUMENT_END_EVENT,
+		implicit: implicit,
+	}
+	return true
+}
+
+///*
+// * Create ALIAS.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
+//{
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    anchor_copy *yaml_char_t = NULL
+//
+//    assert(event) // Non-NULL event object is expected.
+//    assert(anchor) // Non-NULL anchor is expected.
+//
+//    if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
+//
+//    anchor_copy = yaml_strdup(anchor)
+//    if (!anchor_copy)
+//        return 0
+//
+//    ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
+//
+//    return 1
+//}
+
+// Create SCALAR.
+func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
+	*event = yaml_event_t{
+		typ:             yaml_SCALAR_EVENT,
+		anchor:          anchor,
+		tag:             tag,
+		value:           value,
+		implicit:        plain_implicit,
+		quoted_implicit: quoted_implicit,
+		style:           yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-START.
+func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_SEQUENCE_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create SEQUENCE-END.
+func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_SEQUENCE_END_EVENT,
+	}
+	return true
+}
+
+// Create MAPPING-START.
+func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
+	*event = yaml_event_t{
+		typ:      yaml_MAPPING_START_EVENT,
+		anchor:   anchor,
+		tag:      tag,
+		implicit: implicit,
+		style:    yaml_style_t(style),
+	}
+	return true
+}
+
+// Create MAPPING-END.
+func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
+	*event = yaml_event_t{
+		typ: yaml_MAPPING_END_EVENT,
+	}
+	return true
+}
+
+// Destroy an event object.
+func yaml_event_delete(event *yaml_event_t) {
+	*event = yaml_event_t{}
+}
+
+///*
+// * Create a document object.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_initialize(document *yaml_document_t,
+//        version_directive *yaml_version_directive_t,
+//        tag_directives_start *yaml_tag_directive_t,
+//        tag_directives_end *yaml_tag_directive_t,
+//        start_implicit int, end_implicit int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    struct {
+//        start *yaml_node_t
+//        end *yaml_node_t
+//        top *yaml_node_t
+//    } nodes = { NULL, NULL, NULL }
+//    version_directive_copy *yaml_version_directive_t = NULL
+//    struct {
+//        start *yaml_tag_directive_t
+//        end *yaml_tag_directive_t
+//        top *yaml_tag_directive_t
+//    } tag_directives_copy = { NULL, NULL, NULL }
+//    value yaml_tag_directive_t = { NULL, NULL }
+//    mark yaml_mark_t = { 0, 0, 0 }
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert((tag_directives_start && tag_directives_end) ||
+//            (tag_directives_start == tag_directives_end))
+//                            // Valid tag directives are expected.
+//
+//    if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
+//
+//    if (version_directive) {
+//        version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
+//        if (!version_directive_copy) goto error
+//        version_directive_copy.major = version_directive.major
+//        version_directive_copy.minor = version_directive.minor
+//    }
+//
+//    if (tag_directives_start != tag_directives_end) {
+//        tag_directive *yaml_tag_directive_t
+//        if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+//            goto error
+//        for (tag_directive = tag_directives_start
+//                tag_directive != tag_directives_end; tag_directive ++) {
+//            assert(tag_directive.handle)
+//            assert(tag_directive.prefix)
+//            if (!yaml_check_utf8(tag_directive.handle,
+//                        strlen((char *)tag_directive.handle)))
+//                goto error
+//            if (!yaml_check_utf8(tag_directive.prefix,
+//                        strlen((char *)tag_directive.prefix)))
+//                goto error
+//            value.handle = yaml_strdup(tag_directive.handle)
+//            value.prefix = yaml_strdup(tag_directive.prefix)
+//            if (!value.handle || !value.prefix) goto error
+//            if (!PUSH(&context, tag_directives_copy, value))
+//                goto error
+//            value.handle = NULL
+//            value.prefix = NULL
+//        }
+//    }
+//
+//    DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+//            tag_directives_copy.start, tag_directives_copy.top,
+//            start_implicit, end_implicit, mark, mark)
+//
+//    return 1
+//
+//error:
+//    STACK_DEL(&context, nodes)
+//    yaml_free(version_directive_copy)
+//    while (!STACK_EMPTY(&context, tag_directives_copy)) {
+//        value yaml_tag_directive_t = POP(&context, tag_directives_copy)
+//        yaml_free(value.handle)
+//        yaml_free(value.prefix)
+//    }
+//    STACK_DEL(&context, tag_directives_copy)
+//    yaml_free(value.handle)
+//    yaml_free(value.prefix)
+//
+//    return 0
+//}
+//
+///*
+// * Destroy a document object.
+// */
+//
+//YAML_DECLARE(void)
+//yaml_document_delete(document *yaml_document_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    tag_directive *yaml_tag_directive_t
+//
+//    context.error = YAML_NO_ERROR // Eliminate a compliler warning.
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    while (!STACK_EMPTY(&context, document.nodes)) {
+//        node yaml_node_t = POP(&context, document.nodes)
+//        yaml_free(node.tag)
+//        switch (node.type) {
+//            case YAML_SCALAR_NODE:
+//                yaml_free(node.data.scalar.value)
+//                break
+//            case YAML_SEQUENCE_NODE:
+//                STACK_DEL(&context, node.data.sequence.items)
+//                break
+//            case YAML_MAPPING_NODE:
+//                STACK_DEL(&context, node.data.mapping.pairs)
+//                break
+//            default:
+//                assert(0) // Should not happen.
+//        }
+//    }
+//    STACK_DEL(&context, document.nodes)
+//
+//    yaml_free(document.version_directive)
+//    for (tag_directive = document.tag_directives.start
+//            tag_directive != document.tag_directives.end
+//            tag_directive++) {
+//        yaml_free(tag_directive.handle)
+//        yaml_free(tag_directive.prefix)
+//    }
+//    yaml_free(document.tag_directives.start)
+//
+//    memset(document, 0, sizeof(yaml_document_t))
+//}
+//
+///**
+// * Get a document node.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_node(document *yaml_document_t, index int)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+//        return document.nodes.start + index - 1
+//    }
+//    return NULL
+//}
+//
+///**
+// * Get the root object.
+// */
+//
+//YAML_DECLARE(yaml_node_t *)
+//yaml_document_get_root_node(document *yaml_document_t)
+//{
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (document.nodes.top != document.nodes.start) {
+//        return document.nodes.start
+//    }
+//    return NULL
+//}
+//
+///*
+// * Add a scalar node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_scalar(document *yaml_document_t,
+//        tag *yaml_char_t, value *yaml_char_t, length int,
+//        style yaml_scalar_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    value_copy *yaml_char_t = NULL
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//    assert(value) // Non-NULL value is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (length < 0) {
+//        length = strlen((char *)value)
+//    }
+//
+//    if (!yaml_check_utf8(value, length)) goto error
+//    value_copy = yaml_malloc(length+1)
+//    if (!value_copy) goto error
+//    memcpy(value_copy, value, length)
+//    value_copy[length] = '\0'
+//
+//    SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    yaml_free(tag_copy)
+//    yaml_free(value_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a sequence node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_sequence(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_sequence_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_item_t
+//        end *yaml_node_item_t
+//        top *yaml_node_item_t
+//    } items = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
+//
+//    SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, items)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Add a mapping node to a document.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_add_mapping(document *yaml_document_t,
+//        tag *yaml_char_t, style yaml_mapping_style_t)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//    mark yaml_mark_t = { 0, 0, 0 }
+//    tag_copy *yaml_char_t = NULL
+//    struct {
+//        start *yaml_node_pair_t
+//        end *yaml_node_pair_t
+//        top *yaml_node_pair_t
+//    } pairs = { NULL, NULL, NULL }
+//    node yaml_node_t
+//
+//    assert(document) // Non-NULL document object is expected.
+//
+//    if (!tag) {
+//        tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
+//    }
+//
+//    if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
+//    tag_copy = yaml_strdup(tag)
+//    if (!tag_copy) goto error
+//
+//    if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
+//
+//    MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+//            style, mark, mark)
+//    if (!PUSH(&context, document.nodes, node)) goto error
+//
+//    return document.nodes.top - document.nodes.start
+//
+//error:
+//    STACK_DEL(&context, pairs)
+//    yaml_free(tag_copy)
+//
+//    return 0
+//}
+//
+///*
+// * Append an item to a sequence node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_sequence_item(document *yaml_document_t,
+//        sequence int, item int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(sequence > 0
+//            && document.nodes.start + sequence <= document.nodes.top)
+//                            // Valid sequence id is required.
+//    assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
+//                            // A sequence node is required.
+//    assert(item > 0 && document.nodes.start + item <= document.nodes.top)
+//                            // Valid item id is required.
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[sequence-1].data.sequence.items, item))
+//        return 0
+//
+//    return 1
+//}
+//
+///*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+//YAML_DECLARE(int)
+//yaml_document_append_mapping_pair(document *yaml_document_t,
+//        mapping int, key int, value int)
+//{
+//    struct {
+//        error yaml_error_type_t
+//    } context
+//
+//    pair yaml_node_pair_t
+//
+//    assert(document) // Non-NULL document is required.
+//    assert(mapping > 0
+//            && document.nodes.start + mapping <= document.nodes.top)
+//                            // Valid mapping id is required.
+//    assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
+//                            // A mapping node is required.
+//    assert(key > 0 && document.nodes.start + key <= document.nodes.top)
+//                            // Valid key id is required.
+//    assert(value > 0 && document.nodes.start + value <= document.nodes.top)
+//                            // Valid value id is required.
+//
+//    pair.key = key
+//    pair.value = value
+//
+//    if (!PUSH(&context,
+//                document.nodes.start[mapping-1].data.mapping.pairs, pair))
+//        return 0
+//
+//    return 1
+//}
+//
+//
diff --git a/switchq/vendor/gopkg.in/yaml.v2/decode.go b/switchq/vendor/gopkg.in/yaml.v2/decode.go
new file mode 100644
index 0000000..b13ab9f
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/decode.go
@@ -0,0 +1,683 @@
+package yaml
+
+import (
+	"encoding"
+	"encoding/base64"
+	"fmt"
+	"math"
+	"reflect"
+	"strconv"
+	"time"
+)
+
+const (
+	documentNode = 1 << iota
+	mappingNode
+	sequenceNode
+	scalarNode
+	aliasNode
+)
+
+type node struct {
+	kind         int
+	line, column int
+	tag          string
+	value        string
+	implicit     bool
+	children     []*node
+	anchors      map[string]*node
+}
+
+// ----------------------------------------------------------------------------
+// Parser, produces a node tree out of a libyaml event stream.
+
+type parser struct {
+	parser yaml_parser_t
+	event  yaml_event_t
+	doc    *node
+}
+
+func newParser(b []byte) *parser {
+	p := parser{}
+	if !yaml_parser_initialize(&p.parser) {
+		panic("failed to initialize YAML emitter")
+	}
+
+	if len(b) == 0 {
+		b = []byte{'\n'}
+	}
+
+	yaml_parser_set_input_string(&p.parser, b)
+
+	p.skip()
+	if p.event.typ != yaml_STREAM_START_EVENT {
+		panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return &p
+}
+
+func (p *parser) destroy() {
+	if p.event.typ != yaml_NO_EVENT {
+		yaml_event_delete(&p.event)
+	}
+	yaml_parser_delete(&p.parser)
+}
+
+func (p *parser) skip() {
+	if p.event.typ != yaml_NO_EVENT {
+		if p.event.typ == yaml_STREAM_END_EVENT {
+			failf("attempted to go past the end of stream; corrupted value?")
+		}
+		yaml_event_delete(&p.event)
+	}
+	if !yaml_parser_parse(&p.parser, &p.event) {
+		p.fail()
+	}
+}
+
+func (p *parser) fail() {
+	var where string
+	var line int
+	if p.parser.problem_mark.line != 0 {
+		line = p.parser.problem_mark.line
+	} else if p.parser.context_mark.line != 0 {
+		line = p.parser.context_mark.line
+	}
+	if line != 0 {
+		where = "line " + strconv.Itoa(line) + ": "
+	}
+	var msg string
+	if len(p.parser.problem) > 0 {
+		msg = p.parser.problem
+	} else {
+		msg = "unknown problem parsing YAML content"
+	}
+	failf("%s%s", where, msg)
+}
+
+func (p *parser) anchor(n *node, anchor []byte) {
+	if anchor != nil {
+		p.doc.anchors[string(anchor)] = n
+	}
+}
+
+func (p *parser) parse() *node {
+	switch p.event.typ {
+	case yaml_SCALAR_EVENT:
+		return p.scalar()
+	case yaml_ALIAS_EVENT:
+		return p.alias()
+	case yaml_MAPPING_START_EVENT:
+		return p.mapping()
+	case yaml_SEQUENCE_START_EVENT:
+		return p.sequence()
+	case yaml_DOCUMENT_START_EVENT:
+		return p.document()
+	case yaml_STREAM_END_EVENT:
+		// Happens when attempting to decode an empty buffer.
+		return nil
+	default:
+		panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
+	}
+	panic("unreachable")
+}
+
+func (p *parser) node(kind int) *node {
+	return &node{
+		kind:   kind,
+		line:   p.event.start_mark.line,
+		column: p.event.start_mark.column,
+	}
+}
+
+func (p *parser) document() *node {
+	n := p.node(documentNode)
+	n.anchors = make(map[string]*node)
+	p.doc = n
+	p.skip()
+	n.children = append(n.children, p.parse())
+	if p.event.typ != yaml_DOCUMENT_END_EVENT {
+		panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) alias() *node {
+	n := p.node(aliasNode)
+	n.value = string(p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) scalar() *node {
+	n := p.node(scalarNode)
+	n.value = string(p.event.value)
+	n.tag = string(p.event.tag)
+	n.implicit = p.event.implicit
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	return n
+}
+
+func (p *parser) sequence() *node {
+	n := p.node(sequenceNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_SEQUENCE_END_EVENT {
+		n.children = append(n.children, p.parse())
+	}
+	p.skip()
+	return n
+}
+
+func (p *parser) mapping() *node {
+	n := p.node(mappingNode)
+	p.anchor(n, p.event.anchor)
+	p.skip()
+	for p.event.typ != yaml_MAPPING_END_EVENT {
+		n.children = append(n.children, p.parse(), p.parse())
+	}
+	p.skip()
+	return n
+}
+
+// ----------------------------------------------------------------------------
+// Decoder, unmarshals a node into a provided value.
+
+type decoder struct {
+	doc     *node
+	aliases map[string]bool
+	mapType reflect.Type
+	terrors []string
+}
+
+var (
+	mapItemType    = reflect.TypeOf(MapItem{})
+	durationType   = reflect.TypeOf(time.Duration(0))
+	defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
+	ifaceType      = defaultMapType.Elem()
+)
+
+func newDecoder() *decoder {
+	d := &decoder{mapType: defaultMapType}
+	d.aliases = make(map[string]bool)
+	return d
+}
+
+func (d *decoder) terror(n *node, tag string, out reflect.Value) {
+	if n.tag != "" {
+		tag = n.tag
+	}
+	value := n.value
+	if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
+		if len(value) > 10 {
+			value = " `" + value[:7] + "...`"
+		} else {
+			value = " `" + value + "`"
+		}
+	}
+	d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
+}
+
+func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
+	terrlen := len(d.terrors)
+	err := u.UnmarshalYAML(func(v interface{}) (err error) {
+		defer handleErr(&err)
+		d.unmarshal(n, reflect.ValueOf(v))
+		if len(d.terrors) > terrlen {
+			issues := d.terrors[terrlen:]
+			d.terrors = d.terrors[:terrlen]
+			return &TypeError{issues}
+		}
+		return nil
+	})
+	if e, ok := err.(*TypeError); ok {
+		d.terrors = append(d.terrors, e.Errors...)
+		return false
+	}
+	if err != nil {
+		fail(err)
+	}
+	return true
+}
+
+// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
+// if a value is found to implement it.
+// It returns the initialized and dereferenced out value, whether
+// unmarshalling was already done by UnmarshalYAML, and if so whether
+// its types unmarshalled appropriately.
+//
+// If n holds a null value, prepare returns before doing anything.
+func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
+	if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
+		return out, false, false
+	}
+	again := true
+	for again {
+		again = false
+		if out.Kind() == reflect.Ptr {
+			if out.IsNil() {
+				out.Set(reflect.New(out.Type().Elem()))
+			}
+			out = out.Elem()
+			again = true
+		}
+		if out.CanAddr() {
+			if u, ok := out.Addr().Interface().(Unmarshaler); ok {
+				good = d.callUnmarshaler(n, u)
+				return out, true, good
+			}
+		}
+	}
+	return out, false, false
+}
+
+func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
+	switch n.kind {
+	case documentNode:
+		return d.document(n, out)
+	case aliasNode:
+		return d.alias(n, out)
+	}
+	out, unmarshaled, good := d.prepare(n, out)
+	if unmarshaled {
+		return good
+	}
+	switch n.kind {
+	case scalarNode:
+		good = d.scalar(n, out)
+	case mappingNode:
+		good = d.mapping(n, out)
+	case sequenceNode:
+		good = d.sequence(n, out)
+	default:
+		panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
+	}
+	return good
+}
+
+func (d *decoder) document(n *node, out reflect.Value) (good bool) {
+	if len(n.children) == 1 {
+		d.doc = n
+		d.unmarshal(n.children[0], out)
+		return true
+	}
+	return false
+}
+
+func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
+	an, ok := d.doc.anchors[n.value]
+	if !ok {
+		failf("unknown anchor '%s' referenced", n.value)
+	}
+	if d.aliases[n.value] {
+		failf("anchor '%s' value contains itself", n.value)
+	}
+	d.aliases[n.value] = true
+	good = d.unmarshal(an, out)
+	delete(d.aliases, n.value)
+	return good
+}
+
+var zeroValue reflect.Value
+
+func resetMap(out reflect.Value) {
+	for _, k := range out.MapKeys() {
+		out.SetMapIndex(k, zeroValue)
+	}
+}
+
+func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
+	var tag string
+	var resolved interface{}
+	if n.tag == "" && !n.implicit {
+		tag = yaml_STR_TAG
+		resolved = n.value
+	} else {
+		tag, resolved = resolve(n.tag, n.value)
+		if tag == yaml_BINARY_TAG {
+			data, err := base64.StdEncoding.DecodeString(resolved.(string))
+			if err != nil {
+				failf("!!binary value contains invalid base64 data")
+			}
+			resolved = string(data)
+		}
+	}
+	if resolved == nil {
+		if out.Kind() == reflect.Map && !out.CanAddr() {
+			resetMap(out)
+		} else {
+			out.Set(reflect.Zero(out.Type()))
+		}
+		return true
+	}
+	if s, ok := resolved.(string); ok && out.CanAddr() {
+		if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
+			err := u.UnmarshalText([]byte(s))
+			if err != nil {
+				fail(err)
+			}
+			return true
+		}
+	}
+	switch out.Kind() {
+	case reflect.String:
+		if tag == yaml_BINARY_TAG {
+			out.SetString(resolved.(string))
+			good = true
+		} else if resolved != nil {
+			out.SetString(n.value)
+			good = true
+		}
+	case reflect.Interface:
+		if resolved == nil {
+			out.Set(reflect.Zero(out.Type()))
+		} else {
+			out.Set(reflect.ValueOf(resolved))
+		}
+		good = true
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		switch resolved := resolved.(type) {
+		case int:
+			if !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case int64:
+			if !out.OverflowInt(resolved) {
+				out.SetInt(resolved)
+				good = true
+			}
+		case uint64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
+				out.SetInt(int64(resolved))
+				good = true
+			}
+		case string:
+			if out.Type() == durationType {
+				d, err := time.ParseDuration(resolved)
+				if err == nil {
+					out.SetInt(int64(d))
+					good = true
+				}
+			}
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		switch resolved := resolved.(type) {
+		case int:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case int64:
+			if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case uint64:
+			if !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		case float64:
+			if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
+				out.SetUint(uint64(resolved))
+				good = true
+			}
+		}
+	case reflect.Bool:
+		switch resolved := resolved.(type) {
+		case bool:
+			out.SetBool(resolved)
+			good = true
+		}
+	case reflect.Float32, reflect.Float64:
+		switch resolved := resolved.(type) {
+		case int:
+			out.SetFloat(float64(resolved))
+			good = true
+		case int64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case uint64:
+			out.SetFloat(float64(resolved))
+			good = true
+		case float64:
+			out.SetFloat(resolved)
+			good = true
+		}
+	case reflect.Ptr:
+		if out.Type().Elem() == reflect.TypeOf(resolved) {
+			// TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
+			elem := reflect.New(out.Type().Elem())
+			elem.Elem().Set(reflect.ValueOf(resolved))
+			out.Set(elem)
+			good = true
+		}
+	}
+	if !good {
+		d.terror(n, tag, out)
+	}
+	return good
+}
+
+func settableValueOf(i interface{}) reflect.Value {
+	v := reflect.ValueOf(i)
+	sv := reflect.New(v.Type()).Elem()
+	sv.Set(v)
+	return sv
+}
+
+func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
+	l := len(n.children)
+
+	var iface reflect.Value
+	switch out.Kind() {
+	case reflect.Slice:
+		out.Set(reflect.MakeSlice(out.Type(), l, l))
+	case reflect.Interface:
+		// No type hints. Will have to use a generic sequence.
+		iface = out
+		out = settableValueOf(make([]interface{}, l))
+	default:
+		d.terror(n, yaml_SEQ_TAG, out)
+		return false
+	}
+	et := out.Type().Elem()
+
+	j := 0
+	for i := 0; i < l; i++ {
+		e := reflect.New(et).Elem()
+		if ok := d.unmarshal(n.children[i], e); ok {
+			out.Index(j).Set(e)
+			j++
+		}
+	}
+	out.Set(out.Slice(0, j))
+	if iface.IsValid() {
+		iface.Set(out)
+	}
+	return true
+}
+
+func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
+	switch out.Kind() {
+	case reflect.Struct:
+		return d.mappingStruct(n, out)
+	case reflect.Slice:
+		return d.mappingSlice(n, out)
+	case reflect.Map:
+		// okay
+	case reflect.Interface:
+		if d.mapType.Kind() == reflect.Map {
+			iface := out
+			out = reflect.MakeMap(d.mapType)
+			iface.Set(out)
+		} else {
+			slicev := reflect.New(d.mapType).Elem()
+			if !d.mappingSlice(n, slicev) {
+				return false
+			}
+			out.Set(slicev)
+			return true
+		}
+	default:
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+	outt := out.Type()
+	kt := outt.Key()
+	et := outt.Elem()
+
+	mapType := d.mapType
+	if outt.Key() == ifaceType && outt.Elem() == ifaceType {
+		d.mapType = outt
+	}
+
+	if out.IsNil() {
+		out.Set(reflect.MakeMap(outt))
+	}
+	l := len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		k := reflect.New(kt).Elem()
+		if d.unmarshal(n.children[i], k) {
+			kkind := k.Kind()
+			if kkind == reflect.Interface {
+				kkind = k.Elem().Kind()
+			}
+			if kkind == reflect.Map || kkind == reflect.Slice {
+				failf("invalid map key: %#v", k.Interface())
+			}
+			e := reflect.New(et).Elem()
+			if d.unmarshal(n.children[i+1], e) {
+				out.SetMapIndex(k, e)
+			}
+		}
+	}
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
+	outt := out.Type()
+	if outt.Elem() != mapItemType {
+		d.terror(n, yaml_MAP_TAG, out)
+		return false
+	}
+
+	mapType := d.mapType
+	d.mapType = outt
+
+	var slice []MapItem
+	var l = len(n.children)
+	for i := 0; i < l; i += 2 {
+		if isMerge(n.children[i]) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		item := MapItem{}
+		k := reflect.ValueOf(&item.Key).Elem()
+		if d.unmarshal(n.children[i], k) {
+			v := reflect.ValueOf(&item.Value).Elem()
+			if d.unmarshal(n.children[i+1], v) {
+				slice = append(slice, item)
+			}
+		}
+	}
+	out.Set(reflect.ValueOf(slice))
+	d.mapType = mapType
+	return true
+}
+
+func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
+	sinfo, err := getStructInfo(out.Type())
+	if err != nil {
+		panic(err)
+	}
+	name := settableValueOf("")
+	l := len(n.children)
+
+	var inlineMap reflect.Value
+	var elemType reflect.Type
+	if sinfo.InlineMap != -1 {
+		inlineMap = out.Field(sinfo.InlineMap)
+		inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
+		elemType = inlineMap.Type().Elem()
+	}
+
+	for i := 0; i < l; i += 2 {
+		ni := n.children[i]
+		if isMerge(ni) {
+			d.merge(n.children[i+1], out)
+			continue
+		}
+		if !d.unmarshal(ni, name) {
+			continue
+		}
+		if info, ok := sinfo.FieldsMap[name.String()]; ok {
+			var field reflect.Value
+			if info.Inline == nil {
+				field = out.Field(info.Num)
+			} else {
+				field = out.FieldByIndex(info.Inline)
+			}
+			d.unmarshal(n.children[i+1], field)
+		} else if sinfo.InlineMap != -1 {
+			if inlineMap.IsNil() {
+				inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
+			}
+			value := reflect.New(elemType).Elem()
+			d.unmarshal(n.children[i+1], value)
+			inlineMap.SetMapIndex(name, value)
+		}
+	}
+	return true
+}
+
+func failWantMap() {
+	failf("map merge requires map or sequence of maps as the value")
+}
+
+func (d *decoder) merge(n *node, out reflect.Value) {
+	switch n.kind {
+	case mappingNode:
+		d.unmarshal(n, out)
+	case aliasNode:
+		an, ok := d.doc.anchors[n.value]
+		if ok && an.kind != mappingNode {
+			failWantMap()
+		}
+		d.unmarshal(n, out)
+	case sequenceNode:
+		// Step backwards as earlier nodes take precedence.
+		for i := len(n.children) - 1; i >= 0; i-- {
+			ni := n.children[i]
+			if ni.kind == aliasNode {
+				an, ok := d.doc.anchors[ni.value]
+				if ok && an.kind != mappingNode {
+					failWantMap()
+				}
+			} else if ni.kind != mappingNode {
+				failWantMap()
+			}
+			d.unmarshal(ni, out)
+		}
+	default:
+		failWantMap()
+	}
+}
+
+func isMerge(n *node) bool {
+	return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/emitterc.go b/switchq/vendor/gopkg.in/yaml.v2/emitterc.go
new file mode 100644
index 0000000..2befd55
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/emitterc.go
@@ -0,0 +1,1685 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// Flush the buffer if needed.
+func flush(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) {
+		return yaml_emitter_flush(emitter)
+	}
+	return true
+}
+
+// Put a character to the output buffer.
+func put(emitter *yaml_emitter_t, value byte) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.buffer[emitter.buffer_pos] = value
+	emitter.buffer_pos++
+	emitter.column++
+	return true
+}
+
+// Put a line break to the output buffer.
+func put_break(emitter *yaml_emitter_t) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	switch emitter.line_break {
+	case yaml_CR_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\r'
+		emitter.buffer_pos += 1
+	case yaml_LN_BREAK:
+		emitter.buffer[emitter.buffer_pos] = '\n'
+		emitter.buffer_pos += 1
+	case yaml_CRLN_BREAK:
+		emitter.buffer[emitter.buffer_pos+0] = '\r'
+		emitter.buffer[emitter.buffer_pos+1] = '\n'
+		emitter.buffer_pos += 2
+	default:
+		panic("unknown line break setting")
+	}
+	emitter.column = 0
+	emitter.line++
+	return true
+}
+
+// Copy a character from a string into buffer.
+func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
+		return false
+	}
+	p := emitter.buffer_pos
+	w := width(s[*i])
+	switch w {
+	case 4:
+		emitter.buffer[p+3] = s[*i+3]
+		fallthrough
+	case 3:
+		emitter.buffer[p+2] = s[*i+2]
+		fallthrough
+	case 2:
+		emitter.buffer[p+1] = s[*i+1]
+		fallthrough
+	case 1:
+		emitter.buffer[p+0] = s[*i+0]
+	default:
+		panic("unknown character width")
+	}
+	emitter.column++
+	emitter.buffer_pos += w
+	*i += w
+	return true
+}
+
+// Write a whole string into buffer.
+func write_all(emitter *yaml_emitter_t, s []byte) bool {
+	for i := 0; i < len(s); {
+		if !write(emitter, s, &i) {
+			return false
+		}
+	}
+	return true
+}
+
+// Copy a line break character from a string into buffer.
+func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
+	if s[*i] == '\n' {
+		if !put_break(emitter) {
+			return false
+		}
+		*i++
+	} else {
+		if !write(emitter, s, i) {
+			return false
+		}
+		emitter.column = 0
+		emitter.line++
+	}
+	return true
+}
+
+// Set an emitter error and return false.
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_EMITTER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Emit an event.
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.events = append(emitter.events, *event)
+	for !yaml_emitter_need_more_events(emitter) {
+		event := &emitter.events[emitter.events_head]
+		if !yaml_emitter_analyze_event(emitter, event) {
+			return false
+		}
+		if !yaml_emitter_state_machine(emitter, event) {
+			return false
+		}
+		yaml_event_delete(event)
+		emitter.events_head++
+	}
+	return true
+}
+
+// Check if we need to accumulate more events before emitting.
+//
+// We accumulate extra
+//  - 1 event for DOCUMENT-START
+//  - 2 events for SEQUENCE-START
+//  - 3 events for MAPPING-START
+//
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+	if emitter.events_head == len(emitter.events) {
+		return true
+	}
+	var accumulate int
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_DOCUMENT_START_EVENT:
+		accumulate = 1
+		break
+	case yaml_SEQUENCE_START_EVENT:
+		accumulate = 2
+		break
+	case yaml_MAPPING_START_EVENT:
+		accumulate = 3
+		break
+	default:
+		return false
+	}
+	if len(emitter.events)-emitter.events_head > accumulate {
+		return false
+	}
+	var level int
+	for i := emitter.events_head; i < len(emitter.events); i++ {
+		switch emitter.events[i].typ {
+		case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+			level++
+		case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+			level--
+		}
+		if level == 0 {
+			return false
+		}
+	}
+	return true
+}
+
+// Append a directive to the directives stack.
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
+		}
+	}
+
+	// [Go] Do we actually need to copy this given garbage collection
+	// and the lack of deallocating destructors?
+	tag_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(tag_copy.handle, value.handle)
+	copy(tag_copy.prefix, value.prefix)
+	emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+	return true
+}
+
+// Increase the indentation level.
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
+	emitter.indents = append(emitter.indents, emitter.indent)
+	if emitter.indent < 0 {
+		if flow {
+			emitter.indent = emitter.best_indent
+		} else {
+			emitter.indent = 0
+		}
+	} else if !indentless {
+		emitter.indent += emitter.best_indent
+	}
+	return true
+}
+
+// State dispatcher.
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	switch emitter.state {
+	default:
+	case yaml_EMIT_STREAM_START_STATE:
+		return yaml_emitter_emit_stream_start(emitter, event)
+
+	case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, true)
+
+	case yaml_EMIT_DOCUMENT_START_STATE:
+		return yaml_emitter_emit_document_start(emitter, event, false)
+
+	case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+		return yaml_emitter_emit_document_content(emitter, event)
+
+	case yaml_EMIT_DOCUMENT_END_STATE:
+		return yaml_emitter_emit_document_end(emitter, event)
+
+	case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+		return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+		return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+	case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+	case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+	case yaml_EMIT_END_STATE:
+		return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
+	}
+	panic("invalid emitter state")
+}
+
+// Expect STREAM-START.
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_STREAM_START_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
+	}
+	if emitter.encoding == yaml_ANY_ENCODING {
+		emitter.encoding = event.encoding
+		if emitter.encoding == yaml_ANY_ENCODING {
+			emitter.encoding = yaml_UTF8_ENCODING
+		}
+	}
+	if emitter.best_indent < 2 || emitter.best_indent > 9 {
+		emitter.best_indent = 2
+	}
+	if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+		emitter.best_width = 80
+	}
+	if emitter.best_width < 0 {
+		emitter.best_width = 1<<31 - 1
+	}
+	if emitter.line_break == yaml_ANY_BREAK {
+		emitter.line_break = yaml_LN_BREAK
+	}
+
+	emitter.indent = -1
+	emitter.line = 0
+	emitter.column = 0
+	emitter.whitespace = true
+	emitter.indention = true
+
+	if emitter.encoding != yaml_UTF8_ENCODING {
+		if !yaml_emitter_write_bom(emitter) {
+			return false
+		}
+	}
+	emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+	return true
+}
+
+// Expect DOCUMENT-START or STREAM-END.
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+
+	if event.typ == yaml_DOCUMENT_START_EVENT {
+
+		if event.version_directive != nil {
+			if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(event.tag_directives); i++ {
+			tag_directive := &event.tag_directives[i]
+			if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+				return false
+			}
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+				return false
+			}
+		}
+
+		for i := 0; i < len(default_tag_directives); i++ {
+			tag_directive := &default_tag_directives[i]
+			if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
+				return false
+			}
+		}
+
+		implicit := event.implicit
+		if !first || emitter.canonical {
+			implicit = false
+		}
+
+		if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if event.version_directive != nil {
+			implicit = false
+			if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+
+		if len(event.tag_directives) > 0 {
+			implicit = false
+			for i := 0; i < len(event.tag_directives); i++ {
+				tag_directive := &event.tag_directives[i]
+				if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+					return false
+				}
+				if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+					return false
+				}
+				if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+					return false
+				}
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		if yaml_emitter_check_empty_document(emitter) {
+			implicit = false
+		}
+		if !implicit {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+			if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+				return false
+			}
+			if emitter.canonical {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+		}
+
+		emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+		return true
+	}
+
+	if event.typ == yaml_STREAM_END_EVENT {
+		if emitter.open_ended {
+			if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_flush(emitter) {
+			return false
+		}
+		emitter.state = yaml_EMIT_END_STATE
+		return true
+	}
+
+	return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
+}
+
+// Expect the root node.
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+	return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+// Expect DOCUMENT-END.
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if event.typ != yaml_DOCUMENT_END_EVENT {
+		return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !event.implicit {
+		// [Go] Allocate the slice elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	if !yaml_emitter_flush(emitter) {
+		return false
+	}
+	emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+	emitter.tag_directives = emitter.tag_directives[:0]
+	return true
+}
+
+// Expect a flow item node.
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a flow key node.
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
+			return false
+		}
+		if !yaml_emitter_increase_indent(emitter, true, false) {
+			return false
+		}
+		emitter.flow_level++
+	}
+
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.flow_level--
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		if emitter.canonical && !first {
+			if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+				return false
+			}
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
+			return false
+		}
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+
+	if !first {
+		if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
+			return false
+		}
+	}
+	if emitter.canonical || emitter.column > emitter.best_width {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+	}
+
+	if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a flow value node.
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if emitter.canonical || emitter.column > emitter.best_width {
+			if !yaml_emitter_write_indent(emitter) {
+				return false
+			}
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block item node.
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
+			return false
+		}
+	}
+	if event.typ == yaml_SEQUENCE_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+// Expect a block key node.
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+	if first {
+		if !yaml_emitter_increase_indent(emitter, false, false) {
+			return false
+		}
+	}
+	if event.typ == yaml_MAPPING_END_EVENT {
+		emitter.indent = emitter.indents[len(emitter.indents)-1]
+		emitter.indents = emitter.indents[:len(emitter.indents)-1]
+		emitter.state = emitter.states[len(emitter.states)-1]
+		emitter.states = emitter.states[:len(emitter.states)-1]
+		return true
+	}
+	if !yaml_emitter_write_indent(emitter) {
+		return false
+	}
+	if yaml_emitter_check_simple_key(emitter) {
+		emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+		return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
+		return false
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a block value node.
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
+	if simple {
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
+			return false
+		}
+	} else {
+		if !yaml_emitter_write_indent(emitter) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
+			return false
+		}
+	}
+	emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+	return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+// Expect a node.
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+	root bool, sequence bool, mapping bool, simple_key bool) bool {
+
+	emitter.root_context = root
+	emitter.sequence_context = sequence
+	emitter.mapping_context = mapping
+	emitter.simple_key_context = simple_key
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		return yaml_emitter_emit_alias(emitter, event)
+	case yaml_SCALAR_EVENT:
+		return yaml_emitter_emit_scalar(emitter, event)
+	case yaml_SEQUENCE_START_EVENT:
+		return yaml_emitter_emit_sequence_start(emitter, event)
+	case yaml_MAPPING_START_EVENT:
+		return yaml_emitter_emit_mapping_start(emitter, event)
+	default:
+		return yaml_emitter_set_emitter_error(emitter,
+			"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+	}
+	return false
+}
+
+// Expect ALIAS.
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SCALAR.
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_select_scalar_style(emitter, event) {
+		return false
+	}
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if !yaml_emitter_increase_indent(emitter, true, false) {
+		return false
+	}
+	if !yaml_emitter_process_scalar(emitter) {
+		return false
+	}
+	emitter.indent = emitter.indents[len(emitter.indents)-1]
+	emitter.indents = emitter.indents[:len(emitter.indents)-1]
+	emitter.state = emitter.states[len(emitter.states)-1]
+	emitter.states = emitter.states[:len(emitter.states)-1]
+	return true
+}
+
+// Expect SEQUENCE-START.
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
+		yaml_emitter_check_empty_sequence(emitter) {
+		emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+	}
+	return true
+}
+
+// Expect MAPPING-START.
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+	if !yaml_emitter_process_anchor(emitter) {
+		return false
+	}
+	if !yaml_emitter_process_tag(emitter) {
+		return false
+	}
+	if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
+		yaml_emitter_check_empty_mapping(emitter) {
+		emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+	} else {
+		emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+	}
+	return true
+}
+
+// Check if the document content is an empty scalar.
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+	return false // [Go] Huh?
+}
+
+// Check if the next events represent an empty sequence.
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
+}
+
+// Check if the next events represent an empty mapping.
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+	if len(emitter.events)-emitter.events_head < 2 {
+		return false
+	}
+	return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
+		emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
+}
+
+// Check if the next node can be expressed as a simple key.
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+	length := 0
+	switch emitter.events[emitter.events_head].typ {
+	case yaml_ALIAS_EVENT:
+		length += len(emitter.anchor_data.anchor)
+	case yaml_SCALAR_EVENT:
+		if emitter.scalar_data.multiline {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix) +
+			len(emitter.scalar_data.value)
+	case yaml_SEQUENCE_START_EVENT:
+		if !yaml_emitter_check_empty_sequence(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	case yaml_MAPPING_START_EVENT:
+		if !yaml_emitter_check_empty_mapping(emitter) {
+			return false
+		}
+		length += len(emitter.anchor_data.anchor) +
+			len(emitter.tag_data.handle) +
+			len(emitter.tag_data.suffix)
+	default:
+		return false
+	}
+	return length <= 128
+}
+
+// Determine an acceptable scalar style.
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+	if no_tag && !event.implicit && !event.quoted_implicit {
+		return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
+	}
+
+	style := event.scalar_style()
+	if style == yaml_ANY_SCALAR_STYLE {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	if emitter.canonical {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	if emitter.simple_key_context && emitter.scalar_data.multiline {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+
+	if style == yaml_PLAIN_SCALAR_STYLE {
+		if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
+			emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+		if no_tag && !event.implicit {
+			style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+		if !emitter.scalar_data.single_quoted_allowed {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+	if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+		if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
+			style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+		}
+	}
+
+	if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
+		emitter.tag_data.handle = []byte{'!'}
+	}
+	emitter.scalar_data.style = style
+	return true
+}
+
+// Write an achor.
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+	if emitter.anchor_data.anchor == nil {
+		return true
+	}
+	c := []byte{'&'}
+	if emitter.anchor_data.alias {
+		c[0] = '*'
+	}
+	if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
+		return false
+	}
+	return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+// Write a tag.
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+	if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+		return true
+	}
+	if len(emitter.tag_data.handle) > 0 {
+		if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+			return false
+		}
+		if len(emitter.tag_data.suffix) > 0 {
+			if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+				return false
+			}
+		}
+	} else {
+		// [Go] Allocate these slices elsewhere.
+		if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+			return false
+		}
+		if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+			return false
+		}
+		if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+// Write a scalar.
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+	switch emitter.scalar_data.style {
+	case yaml_PLAIN_SCALAR_STYLE:
+		return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+		return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
+
+	case yaml_LITERAL_SCALAR_STYLE:
+		return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
+
+	case yaml_FOLDED_SCALAR_STYLE:
+		return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
+	}
+	panic("unknown scalar style")
+}
+
+// Check if a %YAML directive is valid.
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
+	if version_directive.major != 1 || version_directive.minor != 1 {
+		return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
+	}
+	return true
+}
+
+// Check if a %TAG directive is valid.
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
+	handle := tag_directive.handle
+	prefix := tag_directive.prefix
+	if len(handle) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
+	}
+	if handle[0] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
+	}
+	if handle[len(handle)-1] != '!' {
+		return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
+	}
+	for i := 1; i < len(handle)-1; i += width(handle[i]) {
+		if !is_alpha(handle, i) {
+			return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
+		}
+	}
+	if len(prefix) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
+	}
+	return true
+}
+
+// Check if an anchor is valid.
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
+	if len(anchor) == 0 {
+		problem := "anchor value must not be empty"
+		if alias {
+			problem = "alias value must not be empty"
+		}
+		return yaml_emitter_set_emitter_error(emitter, problem)
+	}
+	for i := 0; i < len(anchor); i += width(anchor[i]) {
+		if !is_alpha(anchor, i) {
+			problem := "anchor value must contain alphanumerical characters only"
+			if alias {
+				problem = "alias value must contain alphanumerical characters only"
+			}
+			return yaml_emitter_set_emitter_error(emitter, problem)
+		}
+	}
+	emitter.anchor_data.anchor = anchor
+	emitter.anchor_data.alias = alias
+	return true
+}
+
+// Check if a tag is valid.
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+	if len(tag) == 0 {
+		return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
+	}
+	for i := 0; i < len(emitter.tag_directives); i++ {
+		tag_directive := &emitter.tag_directives[i]
+		if bytes.HasPrefix(tag, tag_directive.prefix) {
+			emitter.tag_data.handle = tag_directive.handle
+			emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+			return true
+		}
+	}
+	emitter.tag_data.suffix = tag
+	return true
+}
+
+// Check if a scalar is valid.
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	var (
+		block_indicators   = false
+		flow_indicators    = false
+		line_breaks        = false
+		special_characters = false
+
+		leading_space  = false
+		leading_break  = false
+		trailing_space = false
+		trailing_break = false
+		break_space    = false
+		space_break    = false
+
+		preceeded_by_whitespace = false
+		followed_by_whitespace  = false
+		previous_space          = false
+		previous_break          = false
+	)
+
+	emitter.scalar_data.value = value
+
+	if len(value) == 0 {
+		emitter.scalar_data.multiline = false
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = true
+		emitter.scalar_data.single_quoted_allowed = true
+		emitter.scalar_data.block_allowed = false
+		return true
+	}
+
+	if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+		block_indicators = true
+		flow_indicators = true
+	}
+
+	preceeded_by_whitespace = true
+	for i, w := 0, 0; i < len(value); i += w {
+		w = width(value[i])
+		followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
+
+		if i == 0 {
+			switch value[i] {
+			case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+				flow_indicators = true
+				block_indicators = true
+			case '?', ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '-':
+				if followed_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		} else {
+			switch value[i] {
+			case ',', '?', '[', ']', '{', '}':
+				flow_indicators = true
+			case ':':
+				flow_indicators = true
+				if followed_by_whitespace {
+					block_indicators = true
+				}
+			case '#':
+				if preceeded_by_whitespace {
+					flow_indicators = true
+					block_indicators = true
+				}
+			}
+		}
+
+		if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
+			special_characters = true
+		}
+		if is_space(value, i) {
+			if i == 0 {
+				leading_space = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_space = true
+			}
+			if previous_break {
+				break_space = true
+			}
+			previous_space = true
+			previous_break = false
+		} else if is_break(value, i) {
+			line_breaks = true
+			if i == 0 {
+				leading_break = true
+			}
+			if i+width(value[i]) == len(value) {
+				trailing_break = true
+			}
+			if previous_space {
+				space_break = true
+			}
+			previous_space = false
+			previous_break = true
+		} else {
+			previous_space = false
+			previous_break = false
+		}
+
+		// [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
+		preceeded_by_whitespace = is_blankz(value, i)
+	}
+
+	emitter.scalar_data.multiline = line_breaks
+	emitter.scalar_data.flow_plain_allowed = true
+	emitter.scalar_data.block_plain_allowed = true
+	emitter.scalar_data.single_quoted_allowed = true
+	emitter.scalar_data.block_allowed = true
+
+	if leading_space || leading_break || trailing_space || trailing_break {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if trailing_space {
+		emitter.scalar_data.block_allowed = false
+	}
+	if break_space {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+	}
+	if space_break || special_characters {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+		emitter.scalar_data.single_quoted_allowed = false
+		emitter.scalar_data.block_allowed = false
+	}
+	if line_breaks {
+		emitter.scalar_data.flow_plain_allowed = false
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	if flow_indicators {
+		emitter.scalar_data.flow_plain_allowed = false
+	}
+	if block_indicators {
+		emitter.scalar_data.block_plain_allowed = false
+	}
+	return true
+}
+
+// Check if the event data is valid.
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+	emitter.anchor_data.anchor = nil
+	emitter.tag_data.handle = nil
+	emitter.tag_data.suffix = nil
+	emitter.scalar_data.value = nil
+
+	switch event.typ {
+	case yaml_ALIAS_EVENT:
+		if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
+			return false
+		}
+
+	case yaml_SCALAR_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+		if !yaml_emitter_analyze_scalar(emitter, event.value) {
+			return false
+		}
+
+	case yaml_SEQUENCE_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+
+	case yaml_MAPPING_START_EVENT:
+		if len(event.anchor) > 0 {
+			if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
+				return false
+			}
+		}
+		if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
+			if !yaml_emitter_analyze_tag(emitter, event.tag) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// Write the BOM character.
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+	if !flush(emitter) {
+		return false
+	}
+	pos := emitter.buffer_pos
+	emitter.buffer[pos+0] = '\xEF'
+	emitter.buffer[pos+1] = '\xBB'
+	emitter.buffer[pos+2] = '\xBF'
+	emitter.buffer_pos += 3
+	return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+	indent := emitter.indent
+	if indent < 0 {
+		indent = 0
+	}
+	if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
+		if !put_break(emitter) {
+			return false
+		}
+	}
+	for emitter.column < indent {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	emitter.whitespace = true
+	emitter.indention = true
+	return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, indicator) {
+		return false
+	}
+	emitter.whitespace = is_whitespace
+	emitter.indention = (emitter.indention && is_indention)
+	emitter.open_ended = false
+	return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	if !write_all(emitter, value) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
+	if need_whitespace && !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+	for i := 0; i < len(value); {
+		var must_write bool
+		switch value[i] {
+		case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
+			must_write = true
+		default:
+			must_write = is_alpha(value, i)
+		}
+		if must_write {
+			if !write(emitter, value, &i) {
+				return false
+			}
+		} else {
+			w := width(value[i])
+			for k := 0; k < w; k++ {
+				octet := value[i]
+				i++
+				if !put(emitter, '%') {
+					return false
+				}
+
+				c := octet >> 4
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+
+				c = octet & 0x0f
+				if c < 10 {
+					c += '0'
+				} else {
+					c += 'A' - 10
+				}
+				if !put(emitter, c) {
+					return false
+				}
+			}
+		}
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	if !emitter.whitespace {
+		if !put(emitter, ' ') {
+			return false
+		}
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+
+	emitter.whitespace = false
+	emitter.indention = false
+	if emitter.root_context {
+		emitter.open_ended = true
+	}
+
+	return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
+		return false
+	}
+
+	spaces := false
+	breaks := false
+	for i := 0; i < len(value); {
+		if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			spaces = true
+		} else if is_break(value, i) {
+			if !breaks && value[i] == '\n' {
+				if !put_break(emitter) {
+					return false
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if value[i] == '\'' {
+				if !put(emitter, '\'') {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			spaces = false
+			breaks = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
+	spaces := false
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
+		return false
+	}
+
+	for i := 0; i < len(value); {
+		if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
+			is_bom(value, i) || is_break(value, i) ||
+			value[i] == '"' || value[i] == '\\' {
+
+			octet := value[i]
+
+			var w int
+			var v rune
+			switch {
+			case octet&0x80 == 0x00:
+				w, v = 1, rune(octet&0x7F)
+			case octet&0xE0 == 0xC0:
+				w, v = 2, rune(octet&0x1F)
+			case octet&0xF0 == 0xE0:
+				w, v = 3, rune(octet&0x0F)
+			case octet&0xF8 == 0xF0:
+				w, v = 4, rune(octet&0x07)
+			}
+			for k := 1; k < w; k++ {
+				octet = value[i+k]
+				v = (v << 6) + (rune(octet) & 0x3F)
+			}
+			i += w
+
+			if !put(emitter, '\\') {
+				return false
+			}
+
+			var ok bool
+			switch v {
+			case 0x00:
+				ok = put(emitter, '0')
+			case 0x07:
+				ok = put(emitter, 'a')
+			case 0x08:
+				ok = put(emitter, 'b')
+			case 0x09:
+				ok = put(emitter, 't')
+			case 0x0A:
+				ok = put(emitter, 'n')
+			case 0x0b:
+				ok = put(emitter, 'v')
+			case 0x0c:
+				ok = put(emitter, 'f')
+			case 0x0d:
+				ok = put(emitter, 'r')
+			case 0x1b:
+				ok = put(emitter, 'e')
+			case 0x22:
+				ok = put(emitter, '"')
+			case 0x5c:
+				ok = put(emitter, '\\')
+			case 0x85:
+				ok = put(emitter, 'N')
+			case 0xA0:
+				ok = put(emitter, '_')
+			case 0x2028:
+				ok = put(emitter, 'L')
+			case 0x2029:
+				ok = put(emitter, 'P')
+			default:
+				if v <= 0xFF {
+					ok = put(emitter, 'x')
+					w = 2
+				} else if v <= 0xFFFF {
+					ok = put(emitter, 'u')
+					w = 4
+				} else {
+					ok = put(emitter, 'U')
+					w = 8
+				}
+				for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
+					digit := byte((v >> uint(k)) & 0x0F)
+					if digit < 10 {
+						ok = put(emitter, digit+'0')
+					} else {
+						ok = put(emitter, digit+'A'-10)
+					}
+				}
+			}
+			if !ok {
+				return false
+			}
+			spaces = false
+		} else if is_space(value, i) {
+			if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				if is_space(value, i+1) {
+					if !put(emitter, '\\') {
+						return false
+					}
+				}
+				i += width(value[i])
+			} else if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = true
+		} else {
+			if !write(emitter, value, &i) {
+				return false
+			}
+			spaces = false
+		}
+	}
+	if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
+		return false
+	}
+	emitter.whitespace = false
+	emitter.indention = false
+	return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+	if is_space(value, 0) || is_break(value, 0) {
+		indent_hint := []byte{'0' + byte(emitter.best_indent)}
+		if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+			return false
+		}
+	}
+
+	emitter.open_ended = false
+
+	var chomp_hint [1]byte
+	if len(value) == 0 {
+		chomp_hint[0] = '-'
+	} else {
+		i := len(value) - 1
+		for value[i]&0xC0 == 0x80 {
+			i--
+		}
+		if !is_break(value, i) {
+			chomp_hint[0] = '-'
+		} else if i == 0 {
+			chomp_hint[0] = '+'
+			emitter.open_ended = true
+		} else {
+			i--
+			for value[i]&0xC0 == 0x80 {
+				i--
+			}
+			if is_break(value, i) {
+				chomp_hint[0] = '+'
+				emitter.open_ended = true
+			}
+		}
+	}
+	if chomp_hint[0] != 0 {
+		if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+			return false
+		}
+	}
+	return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+	breaks := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+			}
+			if !write(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+
+	return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+	if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
+		return false
+	}
+	if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+		return false
+	}
+
+	if !put_break(emitter) {
+		return false
+	}
+	emitter.indention = true
+	emitter.whitespace = true
+
+	breaks := true
+	leading_spaces := true
+	for i := 0; i < len(value); {
+		if is_break(value, i) {
+			if !breaks && !leading_spaces && value[i] == '\n' {
+				k := 0
+				for is_break(value, k) {
+					k += width(value[k])
+				}
+				if !is_blankz(value, k) {
+					if !put_break(emitter) {
+						return false
+					}
+				}
+			}
+			if !write_break(emitter, value, &i) {
+				return false
+			}
+			emitter.indention = true
+			breaks = true
+		} else {
+			if breaks {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				leading_spaces = is_blank(value, i)
+			}
+			if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
+				if !yaml_emitter_write_indent(emitter) {
+					return false
+				}
+				i += width(value[i])
+			} else {
+				if !write(emitter, value, &i) {
+					return false
+				}
+			}
+			emitter.indention = false
+			breaks = false
+		}
+	}
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/encode.go b/switchq/vendor/gopkg.in/yaml.v2/encode.go
new file mode 100644
index 0000000..84f8499
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/encode.go
@@ -0,0 +1,306 @@
+package yaml
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+)
+
+type encoder struct {
+	emitter yaml_emitter_t
+	event   yaml_event_t
+	out     []byte
+	flow    bool
+}
+
+func newEncoder() (e *encoder) {
+	e = &encoder{}
+	e.must(yaml_emitter_initialize(&e.emitter))
+	yaml_emitter_set_output_string(&e.emitter, &e.out)
+	yaml_emitter_set_unicode(&e.emitter, true)
+	e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
+	e.emit()
+	e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
+	e.emit()
+	return e
+}
+
+func (e *encoder) finish() {
+	e.must(yaml_document_end_event_initialize(&e.event, true))
+	e.emit()
+	e.emitter.open_ended = false
+	e.must(yaml_stream_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) destroy() {
+	yaml_emitter_delete(&e.emitter)
+}
+
+func (e *encoder) emit() {
+	// This will internally delete the e.event value.
+	if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
+		e.must(false)
+	}
+}
+
+func (e *encoder) must(ok bool) {
+	if !ok {
+		msg := e.emitter.problem
+		if msg == "" {
+			msg = "unknown problem generating YAML content"
+		}
+		failf("%s", msg)
+	}
+}
+
+func (e *encoder) marshal(tag string, in reflect.Value) {
+	if !in.IsValid() {
+		e.nilv()
+		return
+	}
+	iface := in.Interface()
+	if m, ok := iface.(Marshaler); ok {
+		v, err := m.MarshalYAML()
+		if err != nil {
+			fail(err)
+		}
+		if v == nil {
+			e.nilv()
+			return
+		}
+		in = reflect.ValueOf(v)
+	} else if m, ok := iface.(encoding.TextMarshaler); ok {
+		text, err := m.MarshalText()
+		if err != nil {
+			fail(err)
+		}
+		in = reflect.ValueOf(string(text))
+	}
+	switch in.Kind() {
+	case reflect.Interface:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Map:
+		e.mapv(tag, in)
+	case reflect.Ptr:
+		if in.IsNil() {
+			e.nilv()
+		} else {
+			e.marshal(tag, in.Elem())
+		}
+	case reflect.Struct:
+		e.structv(tag, in)
+	case reflect.Slice:
+		if in.Type().Elem() == mapItemType {
+			e.itemsv(tag, in)
+		} else {
+			e.slicev(tag, in)
+		}
+	case reflect.String:
+		e.stringv(tag, in)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		if in.Type() == durationType {
+			e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
+		} else {
+			e.intv(tag, in)
+		}
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		e.uintv(tag, in)
+	case reflect.Float32, reflect.Float64:
+		e.floatv(tag, in)
+	case reflect.Bool:
+		e.boolv(tag, in)
+	default:
+		panic("cannot marshal type: " + in.Type().String())
+	}
+}
+
+func (e *encoder) mapv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		keys := keyList(in.MapKeys())
+		sort.Sort(keys)
+		for _, k := range keys {
+			e.marshal("", k)
+			e.marshal("", in.MapIndex(k))
+		}
+	})
+}
+
+func (e *encoder) itemsv(tag string, in reflect.Value) {
+	e.mappingv(tag, func() {
+		slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
+		for _, item := range slice {
+			e.marshal("", reflect.ValueOf(item.Key))
+			e.marshal("", reflect.ValueOf(item.Value))
+		}
+	})
+}
+
+func (e *encoder) structv(tag string, in reflect.Value) {
+	sinfo, err := getStructInfo(in.Type())
+	if err != nil {
+		panic(err)
+	}
+	e.mappingv(tag, func() {
+		for _, info := range sinfo.FieldsList {
+			var value reflect.Value
+			if info.Inline == nil {
+				value = in.Field(info.Num)
+			} else {
+				value = in.FieldByIndex(info.Inline)
+			}
+			if info.OmitEmpty && isZero(value) {
+				continue
+			}
+			e.marshal("", reflect.ValueOf(info.Key))
+			e.flow = info.Flow
+			e.marshal("", value)
+		}
+		if sinfo.InlineMap >= 0 {
+			m := in.Field(sinfo.InlineMap)
+			if m.Len() > 0 {
+				e.flow = false
+				keys := keyList(m.MapKeys())
+				sort.Sort(keys)
+				for _, k := range keys {
+					if _, found := sinfo.FieldsMap[k.String()]; found {
+						panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
+					}
+					e.marshal("", k)
+					e.flow = false
+					e.marshal("", m.MapIndex(k))
+				}
+			}
+		}
+	})
+}
+
+func (e *encoder) mappingv(tag string, f func()) {
+	implicit := tag == ""
+	style := yaml_BLOCK_MAPPING_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_MAPPING_STYLE
+	}
+	e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	f()
+	e.must(yaml_mapping_end_event_initialize(&e.event))
+	e.emit()
+}
+
+func (e *encoder) slicev(tag string, in reflect.Value) {
+	implicit := tag == ""
+	style := yaml_BLOCK_SEQUENCE_STYLE
+	if e.flow {
+		e.flow = false
+		style = yaml_FLOW_SEQUENCE_STYLE
+	}
+	e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
+	e.emit()
+	n := in.Len()
+	for i := 0; i < n; i++ {
+		e.marshal("", in.Index(i))
+	}
+	e.must(yaml_sequence_end_event_initialize(&e.event))
+	e.emit()
+}
+
+// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
+//
+// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
+// in YAML 1.2 and by this package, but these should be marshalled quoted for
+// the time being for compatibility with other parsers.
+func isBase60Float(s string) (result bool) {
+	// Fast path.
+	if s == "" {
+		return false
+	}
+	c := s[0]
+	if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
+		return false
+	}
+	// Do the full match.
+	return base60float.MatchString(s)
+}
+
+// From http://yaml.org/type/float.html, except the regular expression there
+// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
+var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
+
+func (e *encoder) stringv(tag string, in reflect.Value) {
+	var style yaml_scalar_style_t
+	s := in.String()
+	rtag, rs := resolve("", s)
+	if rtag == yaml_BINARY_TAG {
+		if tag == "" || tag == yaml_STR_TAG {
+			tag = rtag
+			s = rs.(string)
+		} else if tag == yaml_BINARY_TAG {
+			failf("explicitly tagged !!binary data must be base64-encoded")
+		} else {
+			failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
+		}
+	}
+	if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
+		style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	} else if strings.Contains(s, "\n") {
+		style = yaml_LITERAL_SCALAR_STYLE
+	} else {
+		style = yaml_PLAIN_SCALAR_STYLE
+	}
+	e.emitScalar(s, "", tag, style)
+}
+
+func (e *encoder) boolv(tag string, in reflect.Value) {
+	var s string
+	if in.Bool() {
+		s = "true"
+	} else {
+		s = "false"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) intv(tag string, in reflect.Value) {
+	s := strconv.FormatInt(in.Int(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) uintv(tag string, in reflect.Value) {
+	s := strconv.FormatUint(in.Uint(), 10)
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) floatv(tag string, in reflect.Value) {
+	// FIXME: Handle 64 bits here.
+	s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
+	switch s {
+	case "+Inf":
+		s = ".inf"
+	case "-Inf":
+		s = "-.inf"
+	case "NaN":
+		s = ".nan"
+	}
+	e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) nilv() {
+	e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+	implicit := tag == ""
+	e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
+	e.emit()
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/parserc.go b/switchq/vendor/gopkg.in/yaml.v2/parserc.go
new file mode 100644
index 0000000..0a7037a
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/parserc.go
@@ -0,0 +1,1096 @@
+package yaml
+
+import (
+	"bytes"
+)
+
+// The parser implements the following grammar:
+//
+// stream               ::= STREAM-START implicit_document? explicit_document* STREAM-END
+// implicit_document    ::= block_node DOCUMENT-END*
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          | properties (block_content | indentless_block_sequence)?
+//                          | block_content
+//                          | indentless_block_sequence
+// block_node           ::= ALIAS
+//                          | properties block_content?
+//                          | block_content
+// flow_node            ::= ALIAS
+//                          | properties flow_content?
+//                          | flow_content
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+// block_content        ::= block_collection | flow_collection | SCALAR
+// flow_content         ::= flow_collection | SCALAR
+// block_collection     ::= block_sequence | block_mapping
+// flow_collection      ::= flow_sequence | flow_mapping
+// block_sequence       ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                          BLOCK-END
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                          flow_sequence_entry?
+//                          FLOW-SEQUENCE-END
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                          flow_mapping_entry?
+//                          FLOW-MAPPING-END
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+// Peek the next token in the token queue.
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+	if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+		return &parser.tokens[parser.tokens_head]
+	}
+	return nil
+}
+
+// Remove the next token from the queue (must be called after peek_token).
+func skip_token(parser *yaml_parser_t) {
+	parser.token_available = false
+	parser.tokens_parsed++
+	parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
+	parser.tokens_head++
+}
+
+// Get the next event.
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+	// Erase the event object.
+	*event = yaml_event_t{}
+
+	// No events after the end of the stream or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
+		return true
+	}
+
+	// Generate the next event.
+	return yaml_parser_state_machine(parser, event)
+}
+
+// Set parser error.
+func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
+	parser.error = yaml_PARSER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = problem_mark
+	return false
+}
+
+// State dispatcher.
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+	//trace("yaml_parser_state_machine", "state:", parser.state.String())
+
+	switch parser.state {
+	case yaml_PARSE_STREAM_START_STATE:
+		return yaml_parser_parse_stream_start(parser, event)
+
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, true)
+
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return yaml_parser_parse_document_start(parser, event, false)
+
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return yaml_parser_parse_document_content(parser, event)
+
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return yaml_parser_parse_document_end(parser, event)
+
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, true, false)
+
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return yaml_parser_parse_node(parser, event, true, true)
+
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return yaml_parser_parse_node(parser, event, false, false)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_block_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return yaml_parser_parse_flow_mapping_value(parser, event, true)
+
+	default:
+		panic("invalid parser state")
+	}
+	return false
+}
+
+// Parse the production:
+// stream   ::= STREAM-START implicit_document? explicit_document* STREAM-END
+//              ************
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_STREAM_START_TOKEN {
+		return yaml_parser_set_parser_error(parser, "did not find expected <stream-start>", token.start_mark)
+	}
+	parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_STREAM_START_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+		encoding:   token.encoding,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                          *
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                          *************************
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	// Parse extra document end indicators.
+	if !implicit {
+		for token.typ == yaml_DOCUMENT_END_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
+		token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
+		token.typ != yaml_DOCUMENT_START_TOKEN &&
+		token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an implicit document.
+		if !yaml_parser_process_directives(parser, nil, nil) {
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+		*event = yaml_event_t{
+			typ:        yaml_DOCUMENT_START_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+	} else if token.typ != yaml_STREAM_END_TOKEN {
+		// Parse an explicit document.
+		var version_directive *yaml_version_directive_t
+		var tag_directives []yaml_tag_directive_t
+		start_mark := token.start_mark
+		if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
+			return false
+		}
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_DOCUMENT_START_TOKEN {
+			yaml_parser_set_parser_error(parser,
+				"did not find expected <document start>", token.start_mark)
+			return false
+		}
+		parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+		parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+		end_mark := token.end_mark
+
+		*event = yaml_event_t{
+			typ:               yaml_DOCUMENT_START_EVENT,
+			start_mark:        start_mark,
+			end_mark:          end_mark,
+			version_directive: version_directive,
+			tag_directives:    tag_directives,
+			implicit:          false,
+		}
+		skip_token(parser)
+
+	} else {
+		// Parse the stream end.
+		parser.state = yaml_PARSE_END_STATE
+		*event = yaml_event_t{
+			typ:        yaml_STREAM_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+	}
+
+	return true
+}
+
+// Parse the productions:
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//                                                    ***********
+//
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
+		token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
+		token.typ == yaml_DOCUMENT_START_TOKEN ||
+		token.typ == yaml_DOCUMENT_END_TOKEN ||
+		token.typ == yaml_STREAM_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		return yaml_parser_process_empty_scalar(parser, event,
+			token.start_mark)
+	}
+	return yaml_parser_parse_node(parser, event, true, false)
+}
+
+// Parse the productions:
+// implicit_document    ::= block_node DOCUMENT-END*
+//                                     *************
+// explicit_document    ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+//
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	implicit := true
+	if token.typ == yaml_DOCUMENT_END_TOKEN {
+		end_mark = token.end_mark
+		skip_token(parser)
+		implicit = false
+	}
+
+	parser.tag_directives = parser.tag_directives[:0]
+
+	parser.state = yaml_PARSE_DOCUMENT_START_STATE
+	*event = yaml_event_t{
+		typ:        yaml_DOCUMENT_END_EVENT,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		implicit:   implicit,
+	}
+	return true
+}
+
+// Parse the productions:
+// block_node_or_indentless_sequence    ::=
+//                          ALIAS
+//                          *****
+//                          | properties (block_content | indentless_block_sequence)?
+//                            **********  *
+//                          | block_content | indentless_block_sequence
+//                            *
+// block_node           ::= ALIAS
+//                          *****
+//                          | properties block_content?
+//                            ********** *
+//                          | block_content
+//                            *
+// flow_node            ::= ALIAS
+//                          *****
+//                          | properties flow_content?
+//                            ********** *
+//                          | flow_content
+//                            *
+// properties           ::= TAG ANCHOR? | ANCHOR TAG?
+//                          *************************
+// block_content        ::= block_collection | flow_collection | SCALAR
+//                                                               ******
+// flow_content         ::= flow_collection | SCALAR
+//                                            ******
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
+	//defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_ALIAS_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		*event = yaml_event_t{
+			typ:        yaml_ALIAS_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+			anchor:     token.value,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	start_mark := token.start_mark
+	end_mark := token.start_mark
+
+	var tag_token bool
+	var tag_handle, tag_suffix, anchor []byte
+	var tag_mark yaml_mark_t
+	if token.typ == yaml_ANCHOR_TOKEN {
+		anchor = token.value
+		start_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_TAG_TOKEN {
+			tag_token = true
+			tag_handle = token.value
+			tag_suffix = token.suffix
+			tag_mark = token.start_mark
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	} else if token.typ == yaml_TAG_TOKEN {
+		tag_token = true
+		tag_handle = token.value
+		tag_suffix = token.suffix
+		start_mark = token.start_mark
+		tag_mark = token.start_mark
+		end_mark = token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ == yaml_ANCHOR_TOKEN {
+			anchor = token.value
+			end_mark = token.end_mark
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+		}
+	}
+
+	var tag []byte
+	if tag_token {
+		if len(tag_handle) == 0 {
+			tag = tag_suffix
+			tag_suffix = nil
+		} else {
+			for i := range parser.tag_directives {
+				if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
+					tag = append([]byte(nil), parser.tag_directives[i].prefix...)
+					tag = append(tag, tag_suffix...)
+					break
+				}
+			}
+			if len(tag) == 0 {
+				yaml_parser_set_parser_error_context(parser,
+					"while parsing a node", start_mark,
+					"found undefined tag handle", tag_mark)
+				return false
+			}
+		}
+	}
+
+	implicit := len(tag) == 0
+	if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_SCALAR_TOKEN {
+		var plain_implicit, quoted_implicit bool
+		end_mark = token.end_mark
+		if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
+			plain_implicit = true
+		} else if len(tag) == 0 {
+			quoted_implicit = true
+		}
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			value:           token.value,
+			implicit:        plain_implicit,
+			quoted_implicit: quoted_implicit,
+			style:           yaml_style_t(token.style),
+		}
+		skip_token(parser)
+		return true
+	}
+	if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
+		// [Go] Some of the events below can be merged as they differ only on style.
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+		}
+		return true
+	}
+	if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
+		end_mark = token.end_mark
+		parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_START_EVENT,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			anchor:     anchor,
+			tag:        tag,
+			implicit:   implicit,
+			style:      yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+		}
+		return true
+	}
+	if len(anchor) > 0 || len(tag) > 0 {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+
+		*event = yaml_event_t{
+			typ:             yaml_SCALAR_EVENT,
+			start_mark:      start_mark,
+			end_mark:        end_mark,
+			anchor:          anchor,
+			tag:             tag,
+			implicit:        implicit,
+			quoted_implicit: false,
+			style:           yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+		}
+		return true
+	}
+
+	context := "while parsing a flow node"
+	if block {
+		context = "while parsing a block node"
+	}
+	yaml_parser_set_parser_error_context(parser, context, start_mark,
+		"did not find expected node content", token.start_mark)
+	return false
+}
+
+// Parse the productions:
+// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+//                    ********************  *********** *             *********
+//
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	}
+	if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+
+		*event = yaml_event_t{
+			typ:        yaml_SEQUENCE_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block collection", context_mark,
+		"did not find expected '-' indicator", token.start_mark)
+}
+
+// Parse the productions:
+// indentless_sequence  ::= (BLOCK-ENTRY block_node?)+
+//                           *********** *
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_BLOCK_ENTRY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
+			token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, true, false)
+		}
+		parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be token.end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//                          *******************
+//                          ((KEY block_node_or_indentless_sequence?)?
+//                            *** *
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//
+//                          BLOCK-END
+//                          *********
+//
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ == yaml_KEY_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		} else {
+			parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+			return yaml_parser_process_empty_scalar(parser, event, mark)
+		}
+	} else if token.typ == yaml_BLOCK_END_TOKEN {
+		parser.state = parser.states[len(parser.states)-1]
+		parser.states = parser.states[:len(parser.states)-1]
+		parser.marks = parser.marks[:len(parser.marks)-1]
+		*event = yaml_event_t{
+			typ:        yaml_MAPPING_END_EVENT,
+			start_mark: token.start_mark,
+			end_mark:   token.end_mark,
+		}
+		skip_token(parser)
+		return true
+	}
+
+	context_mark := parser.marks[len(parser.marks)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	return yaml_parser_set_parser_error_context(parser,
+		"while parsing a block mapping", context_mark,
+		"did not find expected key", token.start_mark)
+}
+
+// Parse the productions:
+// block_mapping        ::= BLOCK-MAPPING_START
+//
+//                          ((KEY block_node_or_indentless_sequence?)?
+//
+//                          (VALUE block_node_or_indentless_sequence?)?)*
+//                           ***** *
+//                          BLOCK-END
+//
+//
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		mark := token.end_mark
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_KEY_TOKEN &&
+			token.typ != yaml_VALUE_TOKEN &&
+			token.typ != yaml_BLOCK_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, true, true)
+		}
+		parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, mark)
+	}
+	parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence        ::= FLOW-SEQUENCE-START
+//                          *******************
+//                          (flow_sequence_entry FLOW-ENTRY)*
+//                           *                   **********
+//                          flow_sequence_entry?
+//                          *
+//                          FLOW-SEQUENCE-END
+//                          *****************
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *
+//
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow sequence", context_mark,
+					"did not find expected ',' or ']'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+			*event = yaml_event_t{
+				typ:        yaml_MAPPING_START_EVENT,
+				start_mark: token.start_mark,
+				end_mark:   token.end_mark,
+				implicit:   true,
+				style:      yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+			}
+			skip_token(parser)
+			return true
+		} else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+
+	*event = yaml_event_t{
+		typ:        yaml_SEQUENCE_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+
+	skip_token(parser)
+	return true
+}
+
+//
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                      *** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ != yaml_VALUE_TOKEN &&
+		token.typ != yaml_FLOW_ENTRY_TOKEN &&
+		token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+		parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+		return yaml_parser_parse_node(parser, event, false, false)
+	}
+	mark := token.end_mark
+	skip_token(parser)
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+	return yaml_parser_process_empty_scalar(parser, event, mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                      ***** *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token := peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Parse the productions:
+// flow_sequence_entry  ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                                                      *
+//
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.start_mark, // [Go] Shouldn't this be end_mark?
+	}
+	return true
+}
+
+// Parse the productions:
+// flow_mapping         ::= FLOW-MAPPING-START
+//                          ******************
+//                          (flow_mapping_entry FLOW-ENTRY)*
+//                           *                  **********
+//                          flow_mapping_entry?
+//                          ******************
+//                          FLOW-MAPPING-END
+//                          ****************
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                          *           *** *
+//
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
+	if first {
+		token := peek_token(parser)
+		parser.marks = append(parser.marks, token.start_mark)
+		skip_token(parser)
+	}
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+		if !first {
+			if token.typ == yaml_FLOW_ENTRY_TOKEN {
+				skip_token(parser)
+				token = peek_token(parser)
+				if token == nil {
+					return false
+				}
+			} else {
+				context_mark := parser.marks[len(parser.marks)-1]
+				parser.marks = parser.marks[:len(parser.marks)-1]
+				return yaml_parser_set_parser_error_context(parser,
+					"while parsing a flow mapping", context_mark,
+					"did not find expected ',' or '}'", token.start_mark)
+			}
+		}
+
+		if token.typ == yaml_KEY_TOKEN {
+			skip_token(parser)
+			token = peek_token(parser)
+			if token == nil {
+				return false
+			}
+			if token.typ != yaml_VALUE_TOKEN &&
+				token.typ != yaml_FLOW_ENTRY_TOKEN &&
+				token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+				parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+				return yaml_parser_parse_node(parser, event, false, false)
+			} else {
+				parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+				return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+			}
+		} else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+
+	parser.state = parser.states[len(parser.states)-1]
+	parser.states = parser.states[:len(parser.states)-1]
+	parser.marks = parser.marks[:len(parser.marks)-1]
+	*event = yaml_event_t{
+		typ:        yaml_MAPPING_END_EVENT,
+		start_mark: token.start_mark,
+		end_mark:   token.end_mark,
+	}
+	skip_token(parser)
+	return true
+}
+
+// Parse the productions:
+// flow_mapping_entry   ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+//                                   *                  ***** *
+//
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+	if empty {
+		parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+		return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+	}
+	if token.typ == yaml_VALUE_TOKEN {
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+		if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
+			parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+			return yaml_parser_parse_node(parser, event, false, false)
+		}
+	}
+	parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+	return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+// Generate an empty scalar event.
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
+	*event = yaml_event_t{
+		typ:        yaml_SCALAR_EVENT,
+		start_mark: mark,
+		end_mark:   mark,
+		value:      nil, // Empty
+		implicit:   true,
+		style:      yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+	}
+	return true
+}
+
+var default_tag_directives = []yaml_tag_directive_t{
+	{[]byte("!"), []byte("!")},
+	{[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+// Parse directives.
+func yaml_parser_process_directives(parser *yaml_parser_t,
+	version_directive_ref **yaml_version_directive_t,
+	tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+	var version_directive *yaml_version_directive_t
+	var tag_directives []yaml_tag_directive_t
+
+	token := peek_token(parser)
+	if token == nil {
+		return false
+	}
+
+	for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+		if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
+			if version_directive != nil {
+				yaml_parser_set_parser_error(parser,
+					"found duplicate %YAML directive", token.start_mark)
+				return false
+			}
+			if token.major != 1 || token.minor != 1 {
+				yaml_parser_set_parser_error(parser,
+					"found incompatible YAML document", token.start_mark)
+				return false
+			}
+			version_directive = &yaml_version_directive_t{
+				major: token.major,
+				minor: token.minor,
+			}
+		} else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
+			value := yaml_tag_directive_t{
+				handle: token.value,
+				prefix: token.prefix,
+			}
+			if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
+				return false
+			}
+			tag_directives = append(tag_directives, value)
+		}
+
+		skip_token(parser)
+		token = peek_token(parser)
+		if token == nil {
+			return false
+		}
+	}
+
+	for i := range default_tag_directives {
+		if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+			return false
+		}
+	}
+
+	if version_directive_ref != nil {
+		*version_directive_ref = version_directive
+	}
+	if tag_directives_ref != nil {
+		*tag_directives_ref = tag_directives
+	}
+	return true
+}
+
+// Append a tag directive to the directives stack.
+func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
+	for i := range parser.tag_directives {
+		if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
+			if allow_duplicates {
+				return true
+			}
+			return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+		}
+	}
+
+	// [Go] I suspect the copy is unnecessary. This was likely done
+	// because there was no way to track ownership of the data.
+	value_copy := yaml_tag_directive_t{
+		handle: make([]byte, len(value.handle)),
+		prefix: make([]byte, len(value.prefix)),
+	}
+	copy(value_copy.handle, value.handle)
+	copy(value_copy.prefix, value.prefix)
+	parser.tag_directives = append(parser.tag_directives, value_copy)
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/readerc.go b/switchq/vendor/gopkg.in/yaml.v2/readerc.go
new file mode 100644
index 0000000..f450791
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/readerc.go
@@ -0,0 +1,394 @@
+package yaml
+
+import (
+	"io"
+)
+
+// Set the reader error and return 0.
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
+	parser.error = yaml_READER_ERROR
+	parser.problem = problem
+	parser.problem_offset = offset
+	parser.problem_value = value
+	return false
+}
+
+// Byte order marks.
+const (
+	bom_UTF8    = "\xef\xbb\xbf"
+	bom_UTF16LE = "\xff\xfe"
+	bom_UTF16BE = "\xfe\xff"
+)
+
+// Determine the input stream encoding by checking the BOM symbol. If no BOM is
+// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+	// Ensure that we had enough bytes in the raw buffer.
+	for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+		if !yaml_parser_update_raw_buffer(parser) {
+			return false
+		}
+	}
+
+	// Determine the encoding.
+	buf := parser.raw_buffer
+	pos := parser.raw_buffer_pos
+	avail := len(buf) - pos
+	if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
+		parser.encoding = yaml_UTF16LE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
+		parser.encoding = yaml_UTF16BE_ENCODING
+		parser.raw_buffer_pos += 2
+		parser.offset += 2
+	} else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
+		parser.encoding = yaml_UTF8_ENCODING
+		parser.raw_buffer_pos += 3
+		parser.offset += 3
+	} else {
+		parser.encoding = yaml_UTF8_ENCODING
+	}
+	return true
+}
+
+// Update the raw buffer.
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+	size_read := 0
+
+	// Return if the raw buffer is full.
+	if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+		return true
+	}
+
+	// Return on EOF.
+	if parser.eof {
+		return true
+	}
+
+	// Move the remaining bytes in the raw buffer to the beginning.
+	if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+		copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+	}
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+	parser.raw_buffer_pos = 0
+
+	// Call the read handler to fill the buffer.
+	size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+	parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+	if err == io.EOF {
+		parser.eof = true
+	} else if err != nil {
+		return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
+	}
+	return true
+}
+
+// Ensure that the buffer contains at least `length` characters.
+// Return true on success, false on failure.
+//
+// The length is supposed to be significantly less that the buffer size.
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+	if parser.read_handler == nil {
+		panic("read handler must be set")
+	}
+
+	// If the EOF flag is set and the raw buffer is empty, do nothing.
+	if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+		return true
+	}
+
+	// Return if the buffer contains enough characters.
+	if parser.unread >= length {
+		return true
+	}
+
+	// Determine the input encoding if it is not known yet.
+	if parser.encoding == yaml_ANY_ENCODING {
+		if !yaml_parser_determine_encoding(parser) {
+			return false
+		}
+	}
+
+	// Move the unread characters to the beginning of the buffer.
+	buffer_len := len(parser.buffer)
+	if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
+		copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+		buffer_len -= parser.buffer_pos
+		parser.buffer_pos = 0
+	} else if parser.buffer_pos == buffer_len {
+		buffer_len = 0
+		parser.buffer_pos = 0
+	}
+
+	// Open the whole buffer for writing, and cut it before returning.
+	parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+	// Fill the buffer until it has enough characters.
+	first := true
+	for parser.unread < length {
+
+		// Fill the raw buffer if necessary.
+		if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+			if !yaml_parser_update_raw_buffer(parser) {
+				parser.buffer = parser.buffer[:buffer_len]
+				return false
+			}
+		}
+		first = false
+
+		// Decode the raw buffer.
+	inner:
+		for parser.raw_buffer_pos != len(parser.raw_buffer) {
+			var value rune
+			var width int
+
+			raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+
+			// Decode the next character.
+			switch parser.encoding {
+			case yaml_UTF8_ENCODING:
+				// Decode a UTF-8 character.  Check RFC 3629
+				// (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+				//
+				// The following table (taken from the RFC) is used for
+				// decoding.
+				//
+				//    Char. number range |        UTF-8 octet sequence
+				//      (hexadecimal)    |              (binary)
+				//   --------------------+------------------------------------
+				//   0000 0000-0000 007F | 0xxxxxxx
+				//   0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+				//   0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+				//   0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				//
+				// Additionally, the characters in the range 0xD800-0xDFFF
+				// are prohibited as they are reserved for use with UTF-16
+				// surrogate pairs.
+
+				// Determine the length of the UTF-8 sequence.
+				octet := parser.raw_buffer[parser.raw_buffer_pos]
+				switch {
+				case octet&0x80 == 0x00:
+					width = 1
+				case octet&0xE0 == 0xC0:
+					width = 2
+				case octet&0xF0 == 0xE0:
+					width = 3
+				case octet&0xF8 == 0xF0:
+					width = 4
+				default:
+					// The leading octet is invalid.
+					return yaml_parser_set_reader_error(parser,
+						"invalid leading UTF-8 octet",
+						parser.offset, int(octet))
+				}
+
+				// Check if the raw buffer contains an incomplete character.
+				if width > raw_unread {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-8 octet sequence",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Decode the leading octet.
+				switch {
+				case octet&0x80 == 0x00:
+					value = rune(octet & 0x7F)
+				case octet&0xE0 == 0xC0:
+					value = rune(octet & 0x1F)
+				case octet&0xF0 == 0xE0:
+					value = rune(octet & 0x0F)
+				case octet&0xF8 == 0xF0:
+					value = rune(octet & 0x07)
+				default:
+					value = 0
+				}
+
+				// Check and decode the trailing octets.
+				for k := 1; k < width; k++ {
+					octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+					// Check if the octet is valid.
+					if (octet & 0xC0) != 0x80 {
+						return yaml_parser_set_reader_error(parser,
+							"invalid trailing UTF-8 octet",
+							parser.offset+k, int(octet))
+					}
+
+					// Decode the octet.
+					value = (value << 6) + rune(octet&0x3F)
+				}
+
+				// Check the length of the sequence against the value.
+				switch {
+				case width == 1:
+				case width == 2 && value >= 0x80:
+				case width == 3 && value >= 0x800:
+				case width == 4 && value >= 0x10000:
+				default:
+					return yaml_parser_set_reader_error(parser,
+						"invalid length of a UTF-8 sequence",
+						parser.offset, -1)
+				}
+
+				// Check the range of the value.
+				if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
+					return yaml_parser_set_reader_error(parser,
+						"invalid Unicode character",
+						parser.offset, int(value))
+				}
+
+			case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
+				var low, high int
+				if parser.encoding == yaml_UTF16LE_ENCODING {
+					low, high = 0, 1
+				} else {
+					low, high = 1, 0
+				}
+
+				// The UTF-16 encoding is not as simple as one might
+				// naively think.  Check RFC 2781
+				// (http://www.ietf.org/rfc/rfc2781.txt).
+				//
+				// Normally, two subsequent bytes describe a Unicode
+				// character.  However a special technique (called a
+				// surrogate pair) is used for specifying character
+				// values larger than 0xFFFF.
+				//
+				// A surrogate pair consists of two pseudo-characters:
+				//      high surrogate area (0xD800-0xDBFF)
+				//      low surrogate area (0xDC00-0xDFFF)
+				//
+				// The following formulas are used for decoding
+				// and encoding characters using surrogate pairs:
+				//
+				//  U  = U' + 0x10000   (0x01 00 00 <= U <= 0x10 FF FF)
+				//  U' = yyyyyyyyyyxxxxxxxxxx   (0 <= U' <= 0x0F FF FF)
+				//  W1 = 110110yyyyyyyyyy
+				//  W2 = 110111xxxxxxxxxx
+				//
+				// where U is the character value, W1 is the high surrogate
+				// area, W2 is the low surrogate area.
+
+				// Check for incomplete UTF-16 character.
+				if raw_unread < 2 {
+					if parser.eof {
+						return yaml_parser_set_reader_error(parser,
+							"incomplete UTF-16 character",
+							parser.offset, -1)
+					}
+					break inner
+				}
+
+				// Get the character.
+				value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+					(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+				// Check for unexpected low surrogate area.
+				if value&0xFC00 == 0xDC00 {
+					return yaml_parser_set_reader_error(parser,
+						"unexpected low surrogate area",
+						parser.offset, int(value))
+				}
+
+				// Check for a high surrogate area.
+				if value&0xFC00 == 0xD800 {
+					width = 4
+
+					// Check for incomplete surrogate pair.
+					if raw_unread < 4 {
+						if parser.eof {
+							return yaml_parser_set_reader_error(parser,
+								"incomplete UTF-16 surrogate pair",
+								parser.offset, -1)
+						}
+						break inner
+					}
+
+					// Get the next character.
+					value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+						(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+					// Check for a low surrogate area.
+					if value2&0xFC00 != 0xDC00 {
+						return yaml_parser_set_reader_error(parser,
+							"expected low surrogate area",
+							parser.offset+2, int(value2))
+					}
+
+					// Generate the value of the surrogate pair.
+					value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+				} else {
+					width = 2
+				}
+
+			default:
+				panic("impossible")
+			}
+
+			// Check if the character is in the allowed range:
+			//      #x9 | #xA | #xD | [#x20-#x7E]               (8 bit)
+			//      | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD]    (16 bit)
+			//      | [#x10000-#x10FFFF]                        (32 bit)
+			switch {
+			case value == 0x09:
+			case value == 0x0A:
+			case value == 0x0D:
+			case value >= 0x20 && value <= 0x7E:
+			case value == 0x85:
+			case value >= 0xA0 && value <= 0xD7FF:
+			case value >= 0xE000 && value <= 0xFFFD:
+			case value >= 0x10000 && value <= 0x10FFFF:
+			default:
+				return yaml_parser_set_reader_error(parser,
+					"control characters are not allowed",
+					parser.offset, int(value))
+			}
+
+			// Move the raw pointers.
+			parser.raw_buffer_pos += width
+			parser.offset += width
+
+			// Finally put the character into the buffer.
+			if value <= 0x7F {
+				// 0000 0000-0000 007F . 0xxxxxxx
+				parser.buffer[buffer_len+0] = byte(value)
+				buffer_len += 1
+			} else if value <= 0x7FF {
+				// 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
+				parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
+				buffer_len += 2
+			} else if value <= 0xFFFF {
+				// 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
+				buffer_len += 3
+			} else {
+				// 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+				parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
+				parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
+				parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
+				parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
+				buffer_len += 4
+			}
+
+			parser.unread++
+		}
+
+		// On EOF, put NUL into the buffer and return.
+		if parser.eof {
+			parser.buffer[buffer_len] = 0
+			buffer_len++
+			parser.unread++
+			break
+		}
+	}
+	parser.buffer = parser.buffer[:buffer_len]
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/resolve.go b/switchq/vendor/gopkg.in/yaml.v2/resolve.go
new file mode 100644
index 0000000..93a8632
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/resolve.go
@@ -0,0 +1,203 @@
+package yaml
+
+import (
+	"encoding/base64"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+)
+
+type resolveMapItem struct {
+	value interface{}
+	tag   string
+}
+
+var resolveTable = make([]byte, 256)
+var resolveMap = make(map[string]resolveMapItem)
+
+func init() {
+	t := resolveTable
+	t[int('+')] = 'S' // Sign
+	t[int('-')] = 'S'
+	for _, c := range "0123456789" {
+		t[int(c)] = 'D' // Digit
+	}
+	for _, c := range "yYnNtTfFoO~" {
+		t[int(c)] = 'M' // In map
+	}
+	t[int('.')] = '.' // Float (potentially in map)
+
+	var resolveMapList = []struct {
+		v   interface{}
+		tag string
+		l   []string
+	}{
+		{true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
+		{true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
+		{true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
+		{false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
+		{false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
+		{false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
+		{nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
+		{math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
+		{math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
+		{math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
+		{"<<", yaml_MERGE_TAG, []string{"<<"}},
+	}
+
+	m := resolveMap
+	for _, item := range resolveMapList {
+		for _, s := range item.l {
+			m[s] = resolveMapItem{item.v, item.tag}
+		}
+	}
+}
+
+const longTagPrefix = "tag:yaml.org,2002:"
+
+func shortTag(tag string) string {
+	// TODO This can easily be made faster and produce less garbage.
+	if strings.HasPrefix(tag, longTagPrefix) {
+		return "!!" + tag[len(longTagPrefix):]
+	}
+	return tag
+}
+
+func longTag(tag string) string {
+	if strings.HasPrefix(tag, "!!") {
+		return longTagPrefix + tag[2:]
+	}
+	return tag
+}
+
+func resolvableTag(tag string) bool {
+	switch tag {
+	case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
+		return true
+	}
+	return false
+}
+
+func resolve(tag string, in string) (rtag string, out interface{}) {
+	if !resolvableTag(tag) {
+		return tag, in
+	}
+
+	defer func() {
+		switch tag {
+		case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
+			return
+		}
+		failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
+	}()
+
+	// Any data is accepted as a !!str or !!binary.
+	// Otherwise, the prefix is enough of a hint about what it might be.
+	hint := byte('N')
+	if in != "" {
+		hint = resolveTable[in[0]]
+	}
+	if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
+		// Handle things we can lookup in a map.
+		if item, ok := resolveMap[in]; ok {
+			return item.tag, item.value
+		}
+
+		// Base 60 floats are a bad idea, were dropped in YAML 1.2, and
+		// are purposefully unsupported here. They're still quoted on
+		// the way out for compatibility with other parser, though.
+
+		switch hint {
+		case 'M':
+			// We've already checked the map above.
+
+		case '.':
+			// Not in the map, so maybe a normal float.
+			floatv, err := strconv.ParseFloat(in, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+
+		case 'D', 'S':
+			// Int, float, or timestamp.
+			plain := strings.Replace(in, "_", "", -1)
+			intv, err := strconv.ParseInt(plain, 0, 64)
+			if err == nil {
+				if intv == int64(int(intv)) {
+					return yaml_INT_TAG, int(intv)
+				} else {
+					return yaml_INT_TAG, intv
+				}
+			}
+			uintv, err := strconv.ParseUint(plain, 0, 64)
+			if err == nil {
+				return yaml_INT_TAG, uintv
+			}
+			floatv, err := strconv.ParseFloat(plain, 64)
+			if err == nil {
+				return yaml_FLOAT_TAG, floatv
+			}
+			if strings.HasPrefix(plain, "0b") {
+				intv, err := strconv.ParseInt(plain[2:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, int(intv)
+					} else {
+						return yaml_INT_TAG, intv
+					}
+				}
+				uintv, err := strconv.ParseUint(plain[2:], 2, 64)
+				if err == nil {
+					return yaml_INT_TAG, uintv
+				}
+			} else if strings.HasPrefix(plain, "-0b") {
+				intv, err := strconv.ParseInt(plain[3:], 2, 64)
+				if err == nil {
+					if intv == int64(int(intv)) {
+						return yaml_INT_TAG, -int(intv)
+					} else {
+						return yaml_INT_TAG, -intv
+					}
+				}
+			}
+			// XXX Handle timestamps here.
+
+		default:
+			panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
+		}
+	}
+	if tag == yaml_BINARY_TAG {
+		return yaml_BINARY_TAG, in
+	}
+	if utf8.ValidString(in) {
+		return yaml_STR_TAG, in
+	}
+	return yaml_BINARY_TAG, encodeBase64(in)
+}
+
+// encodeBase64 encodes s as base64 that is broken up into multiple lines
+// as appropriate for the resulting length.
+func encodeBase64(s string) string {
+	const lineLen = 70
+	encLen := base64.StdEncoding.EncodedLen(len(s))
+	lines := encLen/lineLen + 1
+	buf := make([]byte, encLen*2+lines)
+	in := buf[0:encLen]
+	out := buf[encLen:]
+	base64.StdEncoding.Encode(in, []byte(s))
+	k := 0
+	for i := 0; i < len(in); i += lineLen {
+		j := i + lineLen
+		if j > len(in) {
+			j = len(in)
+		}
+		k += copy(out[k:], in[i:j])
+		if lines > 1 {
+			out[k] = '\n'
+			k++
+		}
+	}
+	return string(out[:k])
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/scannerc.go b/switchq/vendor/gopkg.in/yaml.v2/scannerc.go
new file mode 100644
index 0000000..2580800
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/scannerc.go
@@ -0,0 +1,2710 @@
+package yaml
+
+import (
+	"bytes"
+	"fmt"
+)
+
+// Introduction
+// ************
+//
+// The following notes assume that you are familiar with the YAML specification
+// (http://yaml.org/spec/cvs/current.html).  We mostly follow it, although in
+// some cases we are less restrictive that it requires.
+//
+// The process of transforming a YAML stream into a sequence of events is
+// divided on two steps: Scanning and Parsing.
+//
+// The Scanner transforms the input stream into a sequence of tokens, while the
+// parser transform the sequence of tokens produced by the Scanner into a
+// sequence of parsing events.
+//
+// The Scanner is rather clever and complicated. The Parser, on the contrary,
+// is a straightforward implementation of a recursive-descendant parser (or,
+// LL(1) parser, as it is usually called).
+//
+// Actually there are two issues of Scanning that might be called "clever", the
+// rest is quite straightforward.  The issues are "block collection start" and
+// "simple keys".  Both issues are explained below in details.
+//
+// Here the Scanning step is explained and implemented.  We start with the list
+// of all the tokens produced by the Scanner together with short descriptions.
+//
+// Now, tokens:
+//
+//      STREAM-START(encoding)          # The stream start.
+//      STREAM-END                      # The stream end.
+//      VERSION-DIRECTIVE(major,minor)  # The '%YAML' directive.
+//      TAG-DIRECTIVE(handle,prefix)    # The '%TAG' directive.
+//      DOCUMENT-START                  # '---'
+//      DOCUMENT-END                    # '...'
+//      BLOCK-SEQUENCE-START            # Indentation increase denoting a block
+//      BLOCK-MAPPING-START             # sequence or a block mapping.
+//      BLOCK-END                       # Indentation decrease.
+//      FLOW-SEQUENCE-START             # '['
+//      FLOW-SEQUENCE-END               # ']'
+//      BLOCK-SEQUENCE-START            # '{'
+//      BLOCK-SEQUENCE-END              # '}'
+//      BLOCK-ENTRY                     # '-'
+//      FLOW-ENTRY                      # ','
+//      KEY                             # '?' or nothing (simple keys).
+//      VALUE                           # ':'
+//      ALIAS(anchor)                   # '*anchor'
+//      ANCHOR(anchor)                  # '&anchor'
+//      TAG(handle,suffix)              # '!handle!suffix'
+//      SCALAR(value,style)             # A scalar.
+//
+// The following two tokens are "virtual" tokens denoting the beginning and the
+// end of the stream:
+//
+//      STREAM-START(encoding)
+//      STREAM-END
+//
+// We pass the information about the input stream encoding with the
+// STREAM-START token.
+//
+// The next two tokens are responsible for tags:
+//
+//      VERSION-DIRECTIVE(major,minor)
+//      TAG-DIRECTIVE(handle,prefix)
+//
+// Example:
+//
+//      %YAML   1.1
+//      %TAG    !   !foo
+//      %TAG    !yaml!  tag:yaml.org,2002:
+//      ---
+//
+// The correspoding sequence of tokens:
+//
+//      STREAM-START(utf-8)
+//      VERSION-DIRECTIVE(1,1)
+//      TAG-DIRECTIVE("!","!foo")
+//      TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+//      DOCUMENT-START
+//      STREAM-END
+//
+// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+// line.
+//
+// The document start and end indicators are represented by:
+//
+//      DOCUMENT-START
+//      DOCUMENT-END
+//
+// Note that if a YAML stream contains an implicit document (without '---'
+// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+// produced.
+//
+// In the following examples, we present whole documents together with the
+// produced tokens.
+//
+//      1. An implicit document:
+//
+//          'a scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          STREAM-END
+//
+//      2. An explicit document:
+//
+//          ---
+//          'a scalar'
+//          ...
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-END
+//          STREAM-END
+//
+//      3. Several documents in a stream:
+//
+//          'a scalar'
+//          ---
+//          'another scalar'
+//          ---
+//          'yet another scalar'
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          SCALAR("a scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("another scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("yet another scalar",single-quoted)
+//          STREAM-END
+//
+// We have already introduced the SCALAR token above.  The following tokens are
+// used to describe aliases, anchors, tag, and scalars:
+//
+//      ALIAS(anchor)
+//      ANCHOR(anchor)
+//      TAG(handle,suffix)
+//      SCALAR(value,style)
+//
+// The following series of examples illustrate the usage of these tokens:
+//
+//      1. A recursive sequence:
+//
+//          &A [ *A ]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          ANCHOR("A")
+//          FLOW-SEQUENCE-START
+//          ALIAS("A")
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A tagged scalar:
+//
+//          !!float "3.14"  # A good approximation.
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          TAG("!!","float")
+//          SCALAR("3.14",double-quoted)
+//          STREAM-END
+//
+//      3. Various scalar styles:
+//
+//          --- # Implicit empty plain scalars do not produce tokens.
+//          --- a plain scalar
+//          --- 'a single-quoted scalar'
+//          --- "a double-quoted scalar"
+//          --- |-
+//            a literal scalar
+//          --- >-
+//            a folded
+//            scalar
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          DOCUMENT-START
+//          DOCUMENT-START
+//          SCALAR("a plain scalar",plain)
+//          DOCUMENT-START
+//          SCALAR("a single-quoted scalar",single-quoted)
+//          DOCUMENT-START
+//          SCALAR("a double-quoted scalar",double-quoted)
+//          DOCUMENT-START
+//          SCALAR("a literal scalar",literal)
+//          DOCUMENT-START
+//          SCALAR("a folded scalar",folded)
+//          STREAM-END
+//
+// Now it's time to review collection-related tokens. We will start with
+// flow collections:
+//
+//      FLOW-SEQUENCE-START
+//      FLOW-SEQUENCE-END
+//      FLOW-MAPPING-START
+//      FLOW-MAPPING-END
+//      FLOW-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+// correspondingly.  FLOW-ENTRY represent the ',' indicator.  Finally the
+// indicators '?' and ':', which are used for denoting mapping keys and values,
+// are represented by the KEY and VALUE tokens.
+//
+// The following examples show flow collections:
+//
+//      1. A flow sequence:
+//
+//          [item 1, item 2, item 3]
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-SEQUENCE-START
+//          SCALAR("item 1",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 2",plain)
+//          FLOW-ENTRY
+//          SCALAR("item 3",plain)
+//          FLOW-SEQUENCE-END
+//          STREAM-END
+//
+//      2. A flow mapping:
+//
+//          {
+//              a simple key: a value,  # Note that the KEY token is produced.
+//              ? a complex key: another value,
+//          }
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          FLOW-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          FLOW-ENTRY
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          FLOW-ENTRY
+//          FLOW-MAPPING-END
+//          STREAM-END
+//
+// A simple key is a key which is not denoted by the '?' indicator.  Note that
+// the Scanner still produce the KEY token whenever it encounters a simple key.
+//
+// For scanning block collections, the following tokens are used (note that we
+// repeat KEY and VALUE here):
+//
+//      BLOCK-SEQUENCE-START
+//      BLOCK-MAPPING-START
+//      BLOCK-END
+//      BLOCK-ENTRY
+//      KEY
+//      VALUE
+//
+// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+// increase that precedes a block collection (cf. the INDENT token in Python).
+// The token BLOCK-END denote indentation decrease that ends a block collection
+// (cf. the DEDENT token in Python).  However YAML has some syntax pecularities
+// that makes detections of these tokens more complex.
+//
+// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+// '-', '?', and ':' correspondingly.
+//
+// The following examples show how the tokens BLOCK-SEQUENCE-START,
+// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+//
+//      1. Block sequences:
+//
+//          - item 1
+//          - item 2
+//          -
+//            - item 3.1
+//            - item 3.2
+//          -
+//            key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 3.1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 3.2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Block mappings:
+//
+//          a simple key: a value   # The KEY token is produced here.
+//          ? a complex key
+//          : another value
+//          a mapping:
+//            key 1: value 1
+//            key 2: value 2
+//          a sequence:
+//            - item 1
+//            - item 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a simple key",plain)
+//          VALUE
+//          SCALAR("a value",plain)
+//          KEY
+//          SCALAR("a complex key",plain)
+//          VALUE
+//          SCALAR("another value",plain)
+//          KEY
+//          SCALAR("a mapping",plain)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML does not always require to start a new block collection from a new
+// line.  If the current line contains only '-', '?', and ':' indicators, a new
+// block collection may start at the current line.  The following examples
+// illustrate this case:
+//
+//      1. Collections in a sequence:
+//
+//          - - item 1
+//            - item 2
+//          - key 1: value 1
+//            key 2: value 2
+//          - ? complex key
+//            : complex value
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-ENTRY
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("complex key")
+//          VALUE
+//          SCALAR("complex value")
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+//      2. Collections in a mapping:
+//
+//          ? a sequence
+//          : - item 1
+//            - item 2
+//          ? a mapping
+//          : key 1: value 1
+//            key 2: value 2
+//
+//      Tokens:
+//
+//          STREAM-START(utf-8)
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("a sequence",plain)
+//          VALUE
+//          BLOCK-SEQUENCE-START
+//          BLOCK-ENTRY
+//          SCALAR("item 1",plain)
+//          BLOCK-ENTRY
+//          SCALAR("item 2",plain)
+//          BLOCK-END
+//          KEY
+//          SCALAR("a mapping",plain)
+//          VALUE
+//          BLOCK-MAPPING-START
+//          KEY
+//          SCALAR("key 1",plain)
+//          VALUE
+//          SCALAR("value 1",plain)
+//          KEY
+//          SCALAR("key 2",plain)
+//          VALUE
+//          SCALAR("value 2",plain)
+//          BLOCK-END
+//          BLOCK-END
+//          STREAM-END
+//
+// YAML also permits non-indented sequences if they are included into a block
+// mapping.  In this case, the token BLOCK-SEQUENCE-START is not produced:
+//
+//      key:
+//      - item 1    # BLOCK-SEQUENCE-START is NOT produced here.
+//      - item 2
+//
+// Tokens:
+//
+//      STREAM-START(utf-8)
+//      BLOCK-MAPPING-START
+//      KEY
+//      SCALAR("key",plain)
+//      VALUE
+//      BLOCK-ENTRY
+//      SCALAR("item 1",plain)
+//      BLOCK-ENTRY
+//      SCALAR("item 2",plain)
+//      BLOCK-END
+//
+
+// Ensure that the buffer contains the required number of characters.
+// Return true on success, false on failure (reader error or memory error).
+func cache(parser *yaml_parser_t, length int) bool {
+	// [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
+	return parser.unread >= length || yaml_parser_update_buffer(parser, length)
+}
+
+// Advance the buffer pointer.
+func skip(parser *yaml_parser_t) {
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+	if is_crlf(parser.buffer, parser.buffer_pos) {
+		parser.mark.index += 2
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread -= 2
+		parser.buffer_pos += 2
+	} else if is_break(parser.buffer, parser.buffer_pos) {
+		parser.mark.index++
+		parser.mark.column = 0
+		parser.mark.line++
+		parser.unread--
+		parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+	}
+}
+
+// Copy a character to a string buffer and advance pointers.
+func read(parser *yaml_parser_t, s []byte) []byte {
+	w := width(parser.buffer[parser.buffer_pos])
+	if w == 0 {
+		panic("invalid character sequence")
+	}
+	if len(s) == 0 {
+		s = make([]byte, 0, 32)
+	}
+	if w == 1 && len(s)+w <= cap(s) {
+		s = s[:len(s)+1]
+		s[len(s)-1] = parser.buffer[parser.buffer_pos]
+		parser.buffer_pos++
+	} else {
+		s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+		parser.buffer_pos += w
+	}
+	parser.mark.index++
+	parser.mark.column++
+	parser.unread--
+	return s
+}
+
+// Copy a line break character to a string buffer and advance pointers.
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+	buf := parser.buffer
+	pos := parser.buffer_pos
+	switch {
+	case buf[pos] == '\r' && buf[pos+1] == '\n':
+		// CR LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+		parser.mark.index++
+		parser.unread--
+	case buf[pos] == '\r' || buf[pos] == '\n':
+		// CR|LF . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 1
+	case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
+		// NEL . LF
+		s = append(s, '\n')
+		parser.buffer_pos += 2
+	case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
+		// LS|PS . LS|PS
+		s = append(s, buf[parser.buffer_pos:pos+3]...)
+		parser.buffer_pos += 3
+	default:
+		return s
+	}
+	parser.mark.index++
+	parser.mark.column = 0
+	parser.mark.line++
+	parser.unread--
+	return s
+}
+
+// Get the next token.
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Erase the token object.
+	*token = yaml_token_t{} // [Go] Is this necessary?
+
+	// No tokens after STREAM-END or error.
+	if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+		return true
+	}
+
+	// Ensure that the tokens queue contains enough tokens.
+	if !parser.token_available {
+		if !yaml_parser_fetch_more_tokens(parser) {
+			return false
+		}
+	}
+
+	// Fetch the next token from the queue.
+	*token = parser.tokens[parser.tokens_head]
+	parser.tokens_head++
+	parser.tokens_parsed++
+	parser.token_available = false
+
+	if token.typ == yaml_STREAM_END_TOKEN {
+		parser.stream_end_produced = true
+	}
+	return true
+}
+
+// Set the scanner error and return false.
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
+	parser.error = yaml_SCANNER_ERROR
+	parser.context = context
+	parser.context_mark = context_mark
+	parser.problem = problem
+	parser.problem_mark = parser.mark
+	return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
+	context := "while parsing a tag"
+	if directive {
+		context = "while parsing a %TAG directive"
+	}
+	return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+func trace(args ...interface{}) func() {
+	pargs := append([]interface{}{"+++"}, args...)
+	fmt.Println(pargs...)
+	pargs = append([]interface{}{"---"}, args...)
+	return func() { fmt.Println(pargs...) }
+}
+
+// Ensure that the tokens queue contains at least one token which can be
+// returned to the Parser.
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+	// While we need more tokens to fetch, do it.
+	for {
+		// Check if we really need to fetch more tokens.
+		need_more_tokens := false
+
+		if parser.tokens_head == len(parser.tokens) {
+			// Queue is empty.
+			need_more_tokens = true
+		} else {
+			// Check if any potential simple key may occupy the head position.
+			if !yaml_parser_stale_simple_keys(parser) {
+				return false
+			}
+
+			for i := range parser.simple_keys {
+				simple_key := &parser.simple_keys[i]
+				if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
+					need_more_tokens = true
+					break
+				}
+			}
+		}
+
+		// We are finished.
+		if !need_more_tokens {
+			break
+		}
+		// Fetch the next token.
+		if !yaml_parser_fetch_next_token(parser) {
+			return false
+		}
+	}
+
+	parser.token_available = true
+	return true
+}
+
+// The dispatcher for token fetchers.
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+	// Ensure that the buffer is initialized.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check if we just started scanning.  Fetch STREAM-START then.
+	if !parser.stream_start_produced {
+		return yaml_parser_fetch_stream_start(parser)
+	}
+
+	// Eat whitespaces and comments until we reach the next token.
+	if !yaml_parser_scan_to_next_token(parser) {
+		return false
+	}
+
+	// Remove obsolete potential simple keys.
+	if !yaml_parser_stale_simple_keys(parser) {
+		return false
+	}
+
+	// Check the indentation level against the current column.
+	if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+		return false
+	}
+
+	// Ensure that the buffer contains at least 4 characters.  4 is the length
+	// of the longest indicators ('--- ' and '... ').
+	if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+		return false
+	}
+
+	// Is it the end of the stream?
+	if is_z(parser.buffer, parser.buffer_pos) {
+		return yaml_parser_fetch_stream_end(parser)
+	}
+
+	// Is it a directive?
+	if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
+		return yaml_parser_fetch_directive(parser)
+	}
+
+	buf := parser.buffer
+	pos := parser.buffer_pos
+
+	// Is it the document start indicator?
+	if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
+	}
+
+	// Is it the document end indicator?
+	if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
+		return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
+	}
+
+	// Is it the flow sequence start indicator?
+	if buf[pos] == '[' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
+	}
+
+	// Is it the flow mapping start indicator?
+	if parser.buffer[parser.buffer_pos] == '{' {
+		return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
+	}
+
+	// Is it the flow sequence end indicator?
+	if parser.buffer[parser.buffer_pos] == ']' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_SEQUENCE_END_TOKEN)
+	}
+
+	// Is it the flow mapping end indicator?
+	if parser.buffer[parser.buffer_pos] == '}' {
+		return yaml_parser_fetch_flow_collection_end(parser,
+			yaml_FLOW_MAPPING_END_TOKEN)
+	}
+
+	// Is it the flow entry indicator?
+	if parser.buffer[parser.buffer_pos] == ',' {
+		return yaml_parser_fetch_flow_entry(parser)
+	}
+
+	// Is it the block entry indicator?
+	if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
+		return yaml_parser_fetch_block_entry(parser)
+	}
+
+	// Is it the key indicator?
+	if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_key(parser)
+	}
+
+	// Is it the value indicator?
+	if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_value(parser)
+	}
+
+	// Is it an alias?
+	if parser.buffer[parser.buffer_pos] == '*' {
+		return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+	}
+
+	// Is it an anchor?
+	if parser.buffer[parser.buffer_pos] == '&' {
+		return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+	}
+
+	// Is it a tag?
+	if parser.buffer[parser.buffer_pos] == '!' {
+		return yaml_parser_fetch_tag(parser)
+	}
+
+	// Is it a literal scalar?
+	if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, true)
+	}
+
+	// Is it a folded scalar?
+	if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
+		return yaml_parser_fetch_block_scalar(parser, false)
+	}
+
+	// Is it a single-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '\'' {
+		return yaml_parser_fetch_flow_scalar(parser, true)
+	}
+
+	// Is it a double-quoted scalar?
+	if parser.buffer[parser.buffer_pos] == '"' {
+		return yaml_parser_fetch_flow_scalar(parser, false)
+	}
+
+	// Is it a plain scalar?
+	//
+	// A plain scalar may start with any non-blank characters except
+	//
+	//      '-', '?', ':', ',', '[', ']', '{', '}',
+	//      '#', '&', '*', '!', '|', '>', '\'', '\"',
+	//      '%', '@', '`'.
+	//
+	// In the block context (and, for the '-' indicator, in the flow context
+	// too), it may also start with the characters
+	//
+	//      '-', '?', ':'
+	//
+	// if it is followed by a non-space character.
+	//
+	// The last rule is more restrictive than the specification requires.
+	// [Go] Make this logic more reasonable.
+	//switch parser.buffer[parser.buffer_pos] {
+	//case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
+	//}
+	if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
+		parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
+		parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+		parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
+		parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
+		parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
+		(parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
+		(parser.flow_level == 0 &&
+			(parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
+			!is_blankz(parser.buffer, parser.buffer_pos+1)) {
+		return yaml_parser_fetch_plain_scalar(parser)
+	}
+
+	// If we don't determine the token type so far, it is an error.
+	return yaml_parser_set_scanner_error(parser,
+		"while scanning for the next token", parser.mark,
+		"found character that cannot start any token")
+}
+
+// Check the list of potential simple keys and remove the positions that
+// cannot contain simple keys anymore.
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+	// Check for a potential simple key for each flow level.
+	for i := range parser.simple_keys {
+		simple_key := &parser.simple_keys[i]
+
+		// The specification requires that a simple key
+		//
+		//  - is limited to a single line,
+		//  - is shorter than 1024 characters.
+		if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
+
+			// Check if the potential simple key to be removed is required.
+			if simple_key.required {
+				return yaml_parser_set_scanner_error(parser,
+					"while scanning a simple key", simple_key.mark,
+					"could not find expected ':'")
+			}
+			simple_key.possible = false
+		}
+	}
+	return true
+}
+
+// Check if a simple key may start at the current position and add it if
+// needed.
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+	// A simple key is required at the current position if the scanner is in
+	// the block context and the current column coincides with the indentation
+	// level.
+
+	required := parser.flow_level == 0 && parser.indent == parser.mark.column
+
+	// A simple key is required only when it is the first token in the current
+	// line.  Therefore it is always allowed.  But we add a check anyway.
+	if required && !parser.simple_key_allowed {
+		panic("should not happen")
+	}
+
+	//
+	// If the current position may start a simple key, save it.
+	//
+	if parser.simple_key_allowed {
+		simple_key := yaml_simple_key_t{
+			possible:     true,
+			required:     required,
+			token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+		}
+		simple_key.mark = parser.mark
+
+		if !yaml_parser_remove_simple_key(parser) {
+			return false
+		}
+		parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+	}
+	return true
+}
+
+// Remove a potential simple key at the current flow level.
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+	i := len(parser.simple_keys) - 1
+	if parser.simple_keys[i].possible {
+		// If the key is required, it is an error.
+		if parser.simple_keys[i].required {
+			return yaml_parser_set_scanner_error(parser,
+				"while scanning a simple key", parser.simple_keys[i].mark,
+				"could not find expected ':'")
+		}
+	}
+	// Remove the key from the stack.
+	parser.simple_keys[i].possible = false
+	return true
+}
+
+// Increase the flow level and resize the simple key list if needed.
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+	// Reset the simple key on the next level.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// Increase the flow level.
+	parser.flow_level++
+	return true
+}
+
+// Decrease the flow level.
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+	if parser.flow_level > 0 {
+		parser.flow_level--
+		parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+	}
+	return true
+}
+
+// Push the current indentation level to the stack and set the new level
+// the current column is greater than the indentation level.  In this case,
+// append or insert the specified token into the token queue.
+func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	if parser.indent < column {
+		// Push the current indentation level to the stack and set the new
+		// indentation level.
+		parser.indents = append(parser.indents, parser.indent)
+		parser.indent = column
+
+		// Create a token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        typ,
+			start_mark: mark,
+			end_mark:   mark,
+		}
+		if number > -1 {
+			number -= parser.tokens_parsed
+		}
+		yaml_insert_token(parser, number, &token)
+	}
+	return true
+}
+
+// Pop indentation levels from the indents stack until the current level
+// becomes less or equal to the column.  For each indentation level, append
+// the BLOCK-END token.
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+	// In the flow context, do nothing.
+	if parser.flow_level > 0 {
+		return true
+	}
+
+	// Loop through the indentation levels in the stack.
+	for parser.indent > column {
+		// Create a token and append it to the queue.
+		token := yaml_token_t{
+			typ:        yaml_BLOCK_END_TOKEN,
+			start_mark: parser.mark,
+			end_mark:   parser.mark,
+		}
+		yaml_insert_token(parser, -1, &token)
+
+		// Pop the indentation level.
+		parser.indent = parser.indents[len(parser.indents)-1]
+		parser.indents = parser.indents[:len(parser.indents)-1]
+	}
+	return true
+}
+
+// Initialize the scanner and produce the STREAM-START token.
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+
+	// Set the initial indentation.
+	parser.indent = -1
+
+	// Initialize the simple key stack.
+	parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+	// A simple key is allowed at the beginning of the stream.
+	parser.simple_key_allowed = true
+
+	// We have started.
+	parser.stream_start_produced = true
+
+	// Create the STREAM-START token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_START_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+		encoding:   parser.encoding,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the STREAM-END token and shut down the scanner.
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+
+	// Force new line.
+	if parser.mark.column != 0 {
+		parser.mark.column = 0
+		parser.mark.line++
+	}
+
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the STREAM-END token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_STREAM_END_TOKEN,
+		start_mark: parser.mark,
+		end_mark:   parser.mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
+	token := yaml_token_t{}
+	if !yaml_parser_scan_directive(parser, &token) {
+		return false
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the DOCUMENT-START or DOCUMENT-END token.
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset the indentation level.
+	if !yaml_parser_unroll_indent(parser, -1) {
+		return false
+	}
+
+	// Reset simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+	start_mark := parser.mark
+
+	skip(parser)
+	skip(parser)
+	skip(parser)
+
+	end_mark := parser.mark
+
+	// Create the DOCUMENT-START or DOCUMENT-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// The indicators '[' and '{' may start a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// Increase the flow level.
+	if !yaml_parser_increase_flow_level(parser) {
+		return false
+	}
+
+	// A simple key may follow the indicators '[' and '{'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// Reset any potential simple key on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Decrease the flow level.
+	if !yaml_parser_decrease_flow_level(parser) {
+		return false
+	}
+
+	// No simple keys after the indicators ']' and '}'.
+	parser.simple_key_allowed = false
+
+	// Consume the token.
+
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
+	token := yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	// Append the token to the queue.
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the FLOW-ENTRY token.
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after ','.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the FLOW-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_FLOW_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the BLOCK-ENTRY token.
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+	// Check if the scanner is in the block context.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new entry.
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"block sequence entries are not allowed in this context")
+		}
+		// Add the BLOCK-SEQUENCE-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+			return false
+		}
+	} else {
+		// It is an error for the '-' indicator to occur in the flow context,
+		// but we let the Parser detect and report about it because the Parser
+		// is able to point to the context.
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '-'.
+	parser.simple_key_allowed = true
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the BLOCK-ENTRY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_BLOCK_ENTRY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the KEY token.
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+
+	// In the block context, additional checks are required.
+	if parser.flow_level == 0 {
+		// Check if we are allowed to start a new key (not nessesary simple).
+		if !parser.simple_key_allowed {
+			return yaml_parser_set_scanner_error(parser, "", parser.mark,
+				"mapping keys are not allowed in this context")
+		}
+		// Add the BLOCK-MAPPING-START token if needed.
+		if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+			return false
+		}
+	}
+
+	// Reset any potential simple keys on the current flow level.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// Simple keys are allowed after '?' in the block context.
+	parser.simple_key_allowed = parser.flow_level == 0
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the KEY token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_KEY_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the VALUE token.
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+	simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+	// Have we found a simple key?
+	if simple_key.possible {
+		// Create the KEY token and insert it into the queue.
+		token := yaml_token_t{
+			typ:        yaml_KEY_TOKEN,
+			start_mark: simple_key.mark,
+			end_mark:   simple_key.mark,
+		}
+		yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+		// In the block context, we may need to add the BLOCK-MAPPING-START token.
+		if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+			simple_key.token_number,
+			yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+			return false
+		}
+
+		// Remove the simple key.
+		simple_key.possible = false
+
+		// A simple key cannot follow another simple key.
+		parser.simple_key_allowed = false
+
+	} else {
+		// The ':' indicator follows a complex key.
+
+		// In the block context, extra checks are required.
+		if parser.flow_level == 0 {
+
+			// Check if we are allowed to start a complex value.
+			if !parser.simple_key_allowed {
+				return yaml_parser_set_scanner_error(parser, "", parser.mark,
+					"mapping values are not allowed in this context")
+			}
+
+			// Add the BLOCK-MAPPING-START token if needed.
+			if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+				return false
+			}
+		}
+
+		// Simple keys after ':' are allowed in the block context.
+		parser.simple_key_allowed = parser.flow_level == 0
+	}
+
+	// Consume the token.
+	start_mark := parser.mark
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create the VALUE token and append it to the queue.
+	token := yaml_token_t{
+		typ:        yaml_VALUE_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the ALIAS or ANCHOR token.
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
+	// An anchor or an alias could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow an anchor or an alias.
+	parser.simple_key_allowed = false
+
+	// Create the ALIAS or ANCHOR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_anchor(parser, &token, typ) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the TAG token.
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+	// A tag could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a tag.
+	parser.simple_key_allowed = false
+
+	// Create the TAG token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_tag(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+	// Remove any potential simple keys.
+	if !yaml_parser_remove_simple_key(parser) {
+		return false
+	}
+
+	// A simple key may follow a block scalar.
+	parser.simple_key_allowed = true
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Produce the SCALAR(...,plain) token.
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+	// A plain scalar could be a simple key.
+	if !yaml_parser_save_simple_key(parser) {
+		return false
+	}
+
+	// A simple key cannot follow a flow scalar.
+	parser.simple_key_allowed = false
+
+	// Create the SCALAR token and append it to the queue.
+	var token yaml_token_t
+	if !yaml_parser_scan_plain_scalar(parser, &token) {
+		return false
+	}
+	yaml_insert_token(parser, -1, &token)
+	return true
+}
+
+// Eat whitespaces and comments until the next token is found.
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+
+	// Until the next token is not found.
+	for {
+		// Allow the BOM mark to start a line.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+		}
+
+		// Eat whitespaces.
+		// Tabs are allowed:
+		//  - in the flow context
+		//  - in the block context, but not at the beginning of the line or
+		//  after '-', '?', or ':' (complex value).
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Eat a comment until a line break.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			for !is_breakz(parser.buffer, parser.buffer_pos) {
+				skip(parser)
+				if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+					return false
+				}
+			}
+		}
+
+		// If it is a line break, eat it.
+		if is_break(parser.buffer, parser.buffer_pos) {
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+			skip_line(parser)
+
+			// In the block context, a new line may start a simple key.
+			if parser.flow_level == 0 {
+				parser.simple_key_allowed = true
+			}
+		} else {
+			break // We have found a token.
+		}
+	}
+
+	return true
+}
+
+// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+//
+// Scope:
+//      %YAML    1.1    # a comment \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//      ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+	// Eat '%'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the directive name.
+	var name []byte
+	if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+		return false
+	}
+
+	// Is it a YAML directive?
+	if bytes.Equal(name, []byte("YAML")) {
+		// Scan the VERSION directive value.
+		var major, minor int8
+		if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a VERSION-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_VERSION_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			major:      major,
+			minor:      minor,
+		}
+
+		// Is it a TAG directive?
+	} else if bytes.Equal(name, []byte("TAG")) {
+		// Scan the TAG directive value.
+		var handle, prefix []byte
+		if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
+			return false
+		}
+		end_mark := parser.mark
+
+		// Create a TAG-DIRECTIVE token.
+		*token = yaml_token_t{
+			typ:        yaml_TAG_DIRECTIVE_TOKEN,
+			start_mark: start_mark,
+			end_mark:   end_mark,
+			value:      handle,
+			prefix:     prefix,
+		}
+
+		// Unknown directive.
+	} else {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unknown directive name")
+		return false
+	}
+
+	// Eat the rest of the line including any comments.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	return true
+}
+
+// Scan the directive name.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//       ^^^^
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//       ^^^
+//
+func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
+	// Consume the directive name.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	var s []byte
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the name is empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "could not find expected directive name")
+		return false
+	}
+
+	// Check for an blank character after the name.
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a directive",
+			start_mark, "found unexpected non-alphabetical character")
+		return false
+	}
+	*name = s
+	return true
+}
+
+// Scan the value of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//           ^^^^^^
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Consume the major version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+		return false
+	}
+
+	// Eat '.'.
+	if parser.buffer[parser.buffer_pos] != '.' {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected digit or '.' character")
+	}
+
+	skip(parser)
+
+	// Consume the minor version number.
+	if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+		return false
+	}
+	return true
+}
+
+const max_number_length = 2
+
+// Scan the version number of VERSION-DIRECTIVE.
+//
+// Scope:
+//      %YAML   1.1     # a comment \n
+//              ^
+//      %YAML   1.1     # a comment \n
+//                ^
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
+
+	// Repeat while the next character is digit.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var value, length int8
+	for is_digit(parser.buffer, parser.buffer_pos) {
+		// Check if the number is too long.
+		length++
+		if length > max_number_length {
+			return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+				start_mark, "found extremely long version number")
+		}
+		value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the number was present.
+	if length == 0 {
+		return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+			start_mark, "did not find expected version number")
+	}
+	*number = value
+	return true
+}
+
+// Scan the value of a TAG-DIRECTIVE token.
+//
+// Scope:
+//      %TAG    !yaml!  tag:yaml.org,2002:  \n
+//          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+//
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
+	var handle_value, prefix_value []byte
+
+	// Eat whitespaces.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a handle.
+	if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+		return false
+	}
+
+	// Expect a whitespace.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blank(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace")
+		return false
+	}
+
+	// Eat whitespaces.
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Scan a prefix.
+	if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+		return false
+	}
+
+	// Expect a whitespace or line break.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	*handle = handle_value
+	*prefix = prefix_value
+	return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
+	var s []byte
+
+	// Eat the indicator character.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the value.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	end_mark := parser.mark
+
+	/*
+	 * Check if length of the anchor is greater than 0 and it is followed by
+	 * a whitespace character or one of the indicators:
+	 *
+	 *      '?', ':', ',', ']', '}', '%', '@', '`'.
+	 */
+
+	if len(s) == 0 ||
+		!(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
+			parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
+			parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
+			parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
+			parser.buffer[parser.buffer_pos] == '`') {
+		context := "while scanning an alias"
+		if typ == yaml_ANCHOR_TOKEN {
+			context = "while scanning an anchor"
+		}
+		yaml_parser_set_scanner_error(parser, context, start_mark,
+			"did not find expected alphabetic or numeric character")
+		return false
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        typ,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+	}
+
+	return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+	var handle, suffix []byte
+
+	start_mark := parser.mark
+
+	// Check if the tag is in the canonical form.
+	if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+		return false
+	}
+
+	if parser.buffer[parser.buffer_pos+1] == '<' {
+		// Keep the handle as ''
+
+		// Eat '!<'
+		skip(parser)
+		skip(parser)
+
+		// Consume the tag value.
+		if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+			return false
+		}
+
+		// Check for '>' and eat it.
+		if parser.buffer[parser.buffer_pos] != '>' {
+			yaml_parser_set_scanner_error(parser, "while scanning a tag",
+				start_mark, "did not find the expected '>'")
+			return false
+		}
+
+		skip(parser)
+	} else {
+		// The tag has either the '!suffix' or the '!handle!suffix' form.
+
+		// First, try to scan a handle.
+		if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+			return false
+		}
+
+		// Check if it is, indeed, handle.
+		if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+			// Scan the suffix now.
+			if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+				return false
+			}
+		} else {
+			// It wasn't a handle after all.  Scan the rest of the tag.
+			if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+				return false
+			}
+
+			// Set the handle to '!'.
+			handle = []byte{'!'}
+
+			// A special case: the '!' tag.  Set the handle to '' and the
+			// suffix to '!'.
+			if len(suffix) == 0 {
+				handle, suffix = suffix, handle
+			}
+		}
+	}
+
+	// Check the character which ends the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if !is_blankz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a tag",
+			start_mark, "did not find expected whitespace or line break")
+		return false
+	}
+
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_TAG_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      handle,
+		suffix:     suffix,
+	}
+	return true
+}
+
+// Scan a tag handle.
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
+	// Check the initial '!' character.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	if parser.buffer[parser.buffer_pos] != '!' {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected '!'")
+		return false
+	}
+
+	var s []byte
+
+	// Copy the '!' character.
+	s = read(parser, s)
+
+	// Copy all subsequent alphabetical and numerical characters.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_alpha(parser.buffer, parser.buffer_pos) {
+		s = read(parser, s)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the trailing character is '!' and copy it.
+	if parser.buffer[parser.buffer_pos] == '!' {
+		s = read(parser, s)
+	} else {
+		// It's either the '!' tag or not really a tag handle.  If it's a %TAG
+		// directive, it's an error.  If it's a tag token, it must be a part of URI.
+		if directive && !(s[0] == '!' && s[1] == 0) {
+			yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find expected '!'")
+			return false
+		}
+	}
+
+	*handle = s
+	return true
+}
+
+// Scan a tag.
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
+	//size_t length = head ? strlen((char *)head) : 0
+	var s []byte
+
+	// Copy the head if needed.
+	//
+	// Note that we don't copy the leading '!' character.
+	if len(head) > 1 {
+		s = append(s, head[1:]...)
+	}
+
+	// Scan the tag.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// The set of characters that may appear in URI is as follows:
+	//
+	//      '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+	//      '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+	//      '%'.
+	// [Go] Convert this into more reasonable logic.
+	for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
+		parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
+		parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
+		parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
+		parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
+		parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
+		parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
+		parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
+		parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
+		parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
+		parser.buffer[parser.buffer_pos] == '%' {
+		// Check if it is a URI-escape sequence.
+		if parser.buffer[parser.buffer_pos] == '%' {
+			if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
+				return false
+			}
+		} else {
+			s = read(parser, s)
+		}
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+
+	// Check if the tag is non-empty.
+	if len(s) == 0 {
+		yaml_parser_set_scanner_tag_error(parser, directive,
+			start_mark, "did not find expected tag URI")
+		return false
+	}
+	*uri = s
+	return true
+}
+
+// Decode an URI-escape sequence corresponding to a single UTF-8 character.
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
+
+	// Decode the required number of characters.
+	w := 1024
+	for w > 0 {
+		// Check for a URI-escaped octet.
+		if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+			return false
+		}
+
+		if !(parser.buffer[parser.buffer_pos] == '%' &&
+			is_hex(parser.buffer, parser.buffer_pos+1) &&
+			is_hex(parser.buffer, parser.buffer_pos+2)) {
+			return yaml_parser_set_scanner_tag_error(parser, directive,
+				start_mark, "did not find URI escaped octet")
+		}
+
+		// Get the octet.
+		octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
+
+		// If it is the leading octet, determine the length of the UTF-8 sequence.
+		if w == 1024 {
+			w = width(octet)
+			if w == 0 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect leading UTF-8 octet")
+			}
+		} else {
+			// Check if the trailing octet is correct.
+			if octet&0xC0 != 0x80 {
+				return yaml_parser_set_scanner_tag_error(parser, directive,
+					start_mark, "found an incorrect trailing UTF-8 octet")
+			}
+		}
+
+		// Copy the octet and move the pointers.
+		*s = append(*s, octet)
+		skip(parser)
+		skip(parser)
+		skip(parser)
+		w--
+	}
+	return true
+}
+
+// Scan a block scalar.
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
+	// Eat the indicator '|' or '>'.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Scan the additional block scalar indicators.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+
+	// Check for a chomping indicator.
+	var chomping, increment int
+	if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+		// Set the chomping method and eat the indicator.
+		if parser.buffer[parser.buffer_pos] == '+' {
+			chomping = +1
+		} else {
+			chomping = -1
+		}
+		skip(parser)
+
+		// Check for an indentation indicator.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if is_digit(parser.buffer, parser.buffer_pos) {
+			// Check that the indentation is greater than 0.
+			if parser.buffer[parser.buffer_pos] == '0' {
+				yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+					start_mark, "found an indentation indicator equal to 0")
+				return false
+			}
+
+			// Get the indentation level and eat the indicator.
+			increment = as_digit(parser.buffer, parser.buffer_pos)
+			skip(parser)
+		}
+
+	} else if is_digit(parser.buffer, parser.buffer_pos) {
+		// Do the same as above, but in the opposite order.
+
+		if parser.buffer[parser.buffer_pos] == '0' {
+			yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found an indentation indicator equal to 0")
+			return false
+		}
+		increment = as_digit(parser.buffer, parser.buffer_pos)
+		skip(parser)
+
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+			if parser.buffer[parser.buffer_pos] == '+' {
+				chomping = +1
+			} else {
+				chomping = -1
+			}
+			skip(parser)
+		}
+	}
+
+	// Eat whitespaces and comments to the end of the line.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	for is_blank(parser.buffer, parser.buffer_pos) {
+		skip(parser)
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+	}
+	if parser.buffer[parser.buffer_pos] == '#' {
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+	}
+
+	// Check if we are at the end of the line.
+	if !is_breakz(parser.buffer, parser.buffer_pos) {
+		yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+			start_mark, "did not find expected comment or line break")
+		return false
+	}
+
+	// Eat a line break.
+	if is_break(parser.buffer, parser.buffer_pos) {
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		skip_line(parser)
+	}
+
+	end_mark := parser.mark
+
+	// Set the indentation level if it was specified.
+	var indent int
+	if increment > 0 {
+		if parser.indent >= 0 {
+			indent = parser.indent + increment
+		} else {
+			indent = increment
+		}
+	}
+
+	// Scan the leading line breaks and determine the indentation level if needed.
+	var s, leading_break, trailing_breaks []byte
+	if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+		return false
+	}
+
+	// Scan the block scalar content.
+	if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+		return false
+	}
+	var leading_blank, trailing_blank bool
+	for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
+		// We are at the beginning of a non-empty line.
+
+		// Is it a trailing whitespace?
+		trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Check if we need to fold the leading line break.
+		if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
+			// Do we need to join the lines by space?
+			if len(trailing_breaks) == 0 {
+				s = append(s, ' ')
+			}
+		} else {
+			s = append(s, leading_break...)
+		}
+		leading_break = leading_break[:0]
+
+		// Append the remaining line breaks.
+		s = append(s, trailing_breaks...)
+		trailing_breaks = trailing_breaks[:0]
+
+		// Is it a leading whitespace?
+		leading_blank = is_blank(parser.buffer, parser.buffer_pos)
+
+		// Consume the current line.
+		for !is_breakz(parser.buffer, parser.buffer_pos) {
+			s = read(parser, s)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+
+		leading_break = read_line(parser, leading_break)
+
+		// Eat the following indentation spaces and line breaks.
+		if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
+			return false
+		}
+	}
+
+	// Chomp the tail.
+	if chomping != -1 {
+		s = append(s, leading_break...)
+	}
+	if chomping == 1 {
+		s = append(s, trailing_breaks...)
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_LITERAL_SCALAR_STYLE,
+	}
+	if !literal {
+		token.style = yaml_FOLDED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan indentation spaces and line breaks for a block scalar.  Determine the
+// indentation level if needed.
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
+	*end_mark = parser.mark
+
+	// Eat the indentation spaces and line breaks.
+	max_indent := 0
+	for {
+		// Eat the indentation spaces.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+		for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
+			skip(parser)
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+		if parser.mark.column > max_indent {
+			max_indent = parser.mark.column
+		}
+
+		// Check for a tab character messing the indentation.
+		if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
+			return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+				start_mark, "found a tab character where an indentation space is expected")
+		}
+
+		// Have we found a non-empty line?
+		if !is_break(parser.buffer, parser.buffer_pos) {
+			break
+		}
+
+		// Consume the line break.
+		if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+			return false
+		}
+		// [Go] Should really be returning breaks instead.
+		*breaks = read_line(parser, *breaks)
+		*end_mark = parser.mark
+	}
+
+	// Determine the indentation level if needed.
+	if *indent == 0 {
+		*indent = max_indent
+		if *indent < parser.indent+1 {
+			*indent = parser.indent + 1
+		}
+		if *indent < 1 {
+			*indent = 1
+		}
+	}
+	return true
+}
+
+// Scan a quoted scalar.
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
+	// Eat the left quote.
+	start_mark := parser.mark
+	skip(parser)
+
+	// Consume the content of the quoted scalar.
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	for {
+		// Check that there are no document indicators at the beginning of the line.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected document indicator")
+			return false
+		}
+
+		// Check for EOF.
+		if is_z(parser.buffer, parser.buffer_pos) {
+			yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+				start_mark, "found unexpected end of stream")
+			return false
+		}
+
+		// Consume non-blank characters.
+		leading_blanks := false
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+			if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
+				// Is is an escaped single quote.
+				s = append(s, '\'')
+				skip(parser)
+				skip(parser)
+
+			} else if single && parser.buffer[parser.buffer_pos] == '\'' {
+				// It is a right single quote.
+				break
+			} else if !single && parser.buffer[parser.buffer_pos] == '"' {
+				// It is a right double quote.
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
+				// It is an escaped line break.
+				if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
+					return false
+				}
+				skip(parser)
+				skip_line(parser)
+				leading_blanks = true
+				break
+
+			} else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+				// It is an escape sequence.
+				code_length := 0
+
+				// Check the escape character.
+				switch parser.buffer[parser.buffer_pos+1] {
+				case '0':
+					s = append(s, 0)
+				case 'a':
+					s = append(s, '\x07')
+				case 'b':
+					s = append(s, '\x08')
+				case 't', '\t':
+					s = append(s, '\x09')
+				case 'n':
+					s = append(s, '\x0A')
+				case 'v':
+					s = append(s, '\x0B')
+				case 'f':
+					s = append(s, '\x0C')
+				case 'r':
+					s = append(s, '\x0D')
+				case 'e':
+					s = append(s, '\x1B')
+				case ' ':
+					s = append(s, '\x20')
+				case '"':
+					s = append(s, '"')
+				case '\'':
+					s = append(s, '\'')
+				case '\\':
+					s = append(s, '\\')
+				case 'N': // NEL (#x85)
+					s = append(s, '\xC2')
+					s = append(s, '\x85')
+				case '_': // #xA0
+					s = append(s, '\xC2')
+					s = append(s, '\xA0')
+				case 'L': // LS (#x2028)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA8')
+				case 'P': // PS (#x2029)
+					s = append(s, '\xE2')
+					s = append(s, '\x80')
+					s = append(s, '\xA9')
+				case 'x':
+					code_length = 2
+				case 'u':
+					code_length = 4
+				case 'U':
+					code_length = 8
+				default:
+					yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+						start_mark, "found unknown escape character")
+					return false
+				}
+
+				skip(parser)
+				skip(parser)
+
+				// Consume an arbitrary escape code.
+				if code_length > 0 {
+					var value int
+
+					// Scan the character value.
+					if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
+						return false
+					}
+					for k := 0; k < code_length; k++ {
+						if !is_hex(parser.buffer, parser.buffer_pos+k) {
+							yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+								start_mark, "did not find expected hexdecimal number")
+							return false
+						}
+						value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
+					}
+
+					// Check the value and write the character.
+					if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+						yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+							start_mark, "found invalid Unicode character escape code")
+						return false
+					}
+					if value <= 0x7F {
+						s = append(s, byte(value))
+					} else if value <= 0x7FF {
+						s = append(s, byte(0xC0+(value>>6)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else if value <= 0xFFFF {
+						s = append(s, byte(0xE0+(value>>12)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					} else {
+						s = append(s, byte(0xF0+(value>>18)))
+						s = append(s, byte(0x80+((value>>12)&0x3F)))
+						s = append(s, byte(0x80+((value>>6)&0x3F)))
+						s = append(s, byte(0x80+(value&0x3F)))
+					}
+
+					// Advance the pointer.
+					for k := 0; k < code_length; k++ {
+						skip(parser)
+					}
+				}
+			} else {
+				// It is a non-escaped non-blank character.
+				s = read(parser, s)
+			}
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Check if we are at the end of the scalar.
+		if single {
+			if parser.buffer[parser.buffer_pos] == '\'' {
+				break
+			}
+		} else {
+			if parser.buffer[parser.buffer_pos] == '"' {
+				break
+			}
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Join the whitespaces or fold line breaks.
+		if leading_blanks {
+			// Do we need to fold line breaks?
+			if len(leading_break) > 0 && leading_break[0] == '\n' {
+				if len(trailing_breaks) == 0 {
+					s = append(s, ' ')
+				} else {
+					s = append(s, trailing_breaks...)
+				}
+			} else {
+				s = append(s, leading_break...)
+				s = append(s, trailing_breaks...)
+			}
+			trailing_breaks = trailing_breaks[:0]
+			leading_break = leading_break[:0]
+		} else {
+			s = append(s, whitespaces...)
+			whitespaces = whitespaces[:0]
+		}
+	}
+
+	// Eat the right quote.
+	skip(parser)
+	end_mark := parser.mark
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_SINGLE_QUOTED_SCALAR_STYLE,
+	}
+	if !single {
+		token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+	}
+	return true
+}
+
+// Scan a plain scalar.
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+
+	var s, leading_break, trailing_breaks, whitespaces []byte
+	var leading_blanks bool
+	var indent = parser.indent + 1
+
+	start_mark := parser.mark
+	end_mark := parser.mark
+
+	// Consume the content of the plain scalar.
+	for {
+		// Check for a document indicator.
+		if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
+			return false
+		}
+		if parser.mark.column == 0 &&
+			((parser.buffer[parser.buffer_pos+0] == '-' &&
+				parser.buffer[parser.buffer_pos+1] == '-' &&
+				parser.buffer[parser.buffer_pos+2] == '-') ||
+				(parser.buffer[parser.buffer_pos+0] == '.' &&
+					parser.buffer[parser.buffer_pos+1] == '.' &&
+					parser.buffer[parser.buffer_pos+2] == '.')) &&
+			is_blankz(parser.buffer, parser.buffer_pos+3) {
+			break
+		}
+
+		// Check for a comment.
+		if parser.buffer[parser.buffer_pos] == '#' {
+			break
+		}
+
+		// Consume non-blank characters.
+		for !is_blankz(parser.buffer, parser.buffer_pos) {
+
+			// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
+			if parser.flow_level > 0 &&
+				parser.buffer[parser.buffer_pos] == ':' &&
+				!is_blankz(parser.buffer, parser.buffer_pos+1) {
+				yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+					start_mark, "found unexpected ':'")
+				return false
+			}
+
+			// Check for indicators that may end a plain scalar.
+			if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
+				(parser.flow_level > 0 &&
+					(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
+						parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
+						parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
+						parser.buffer[parser.buffer_pos] == '}')) {
+				break
+			}
+
+			// Check if we need to join whitespaces and breaks.
+			if leading_blanks || len(whitespaces) > 0 {
+				if leading_blanks {
+					// Do we need to fold line breaks?
+					if leading_break[0] == '\n' {
+						if len(trailing_breaks) == 0 {
+							s = append(s, ' ')
+						} else {
+							s = append(s, trailing_breaks...)
+						}
+					} else {
+						s = append(s, leading_break...)
+						s = append(s, trailing_breaks...)
+					}
+					trailing_breaks = trailing_breaks[:0]
+					leading_break = leading_break[:0]
+					leading_blanks = false
+				} else {
+					s = append(s, whitespaces...)
+					whitespaces = whitespaces[:0]
+				}
+			}
+
+			// Copy the character.
+			s = read(parser, s)
+
+			end_mark = parser.mark
+			if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+				return false
+			}
+		}
+
+		// Is it the end?
+		if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
+			break
+		}
+
+		// Consume blank characters.
+		if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+			return false
+		}
+
+		for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
+			if is_blank(parser.buffer, parser.buffer_pos) {
+
+				// Check for tab character that abuse indentation.
+				if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
+					yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+						start_mark, "found a tab character that violate indentation")
+					return false
+				}
+
+				// Consume a space or a tab character.
+				if !leading_blanks {
+					whitespaces = read(parser, whitespaces)
+				} else {
+					skip(parser)
+				}
+			} else {
+				if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
+					return false
+				}
+
+				// Check if it is a first line break.
+				if !leading_blanks {
+					whitespaces = whitespaces[:0]
+					leading_break = read_line(parser, leading_break)
+					leading_blanks = true
+				} else {
+					trailing_breaks = read_line(parser, trailing_breaks)
+				}
+			}
+			if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
+				return false
+			}
+		}
+
+		// Check indentation level.
+		if parser.flow_level == 0 && parser.mark.column < indent {
+			break
+		}
+	}
+
+	// Create a token.
+	*token = yaml_token_t{
+		typ:        yaml_SCALAR_TOKEN,
+		start_mark: start_mark,
+		end_mark:   end_mark,
+		value:      s,
+		style:      yaml_PLAIN_SCALAR_STYLE,
+	}
+
+	// Note that we change the 'simple_key_allowed' flag.
+	if leading_blanks {
+		parser.simple_key_allowed = true
+	}
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/sorter.go b/switchq/vendor/gopkg.in/yaml.v2/sorter.go
new file mode 100644
index 0000000..5958822
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/sorter.go
@@ -0,0 +1,104 @@
+package yaml
+
+import (
+	"reflect"
+	"unicode"
+)
+
+type keyList []reflect.Value
+
+func (l keyList) Len() int      { return len(l) }
+func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
+func (l keyList) Less(i, j int) bool {
+	a := l[i]
+	b := l[j]
+	ak := a.Kind()
+	bk := b.Kind()
+	for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
+		a = a.Elem()
+		ak = a.Kind()
+	}
+	for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
+		b = b.Elem()
+		bk = b.Kind()
+	}
+	af, aok := keyFloat(a)
+	bf, bok := keyFloat(b)
+	if aok && bok {
+		if af != bf {
+			return af < bf
+		}
+		if ak != bk {
+			return ak < bk
+		}
+		return numLess(a, b)
+	}
+	if ak != reflect.String || bk != reflect.String {
+		return ak < bk
+	}
+	ar, br := []rune(a.String()), []rune(b.String())
+	for i := 0; i < len(ar) && i < len(br); i++ {
+		if ar[i] == br[i] {
+			continue
+		}
+		al := unicode.IsLetter(ar[i])
+		bl := unicode.IsLetter(br[i])
+		if al && bl {
+			return ar[i] < br[i]
+		}
+		if al || bl {
+			return bl
+		}
+		var ai, bi int
+		var an, bn int64
+		for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
+			an = an*10 + int64(ar[ai]-'0')
+		}
+		for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
+			bn = bn*10 + int64(br[bi]-'0')
+		}
+		if an != bn {
+			return an < bn
+		}
+		if ai != bi {
+			return ai < bi
+		}
+		return ar[i] < br[i]
+	}
+	return len(ar) < len(br)
+}
+
+// keyFloat returns a float value for v if it is a number/bool
+// and whether it is a number/bool or not.
+func keyFloat(v reflect.Value) (f float64, ok bool) {
+	switch v.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return float64(v.Int()), true
+	case reflect.Float32, reflect.Float64:
+		return v.Float(), true
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return float64(v.Uint()), true
+	case reflect.Bool:
+		if v.Bool() {
+			return 1, true
+		}
+		return 0, true
+	}
+	return 0, false
+}
+
+// numLess returns whether a < b.
+// a and b must necessarily have the same kind.
+func numLess(a, b reflect.Value) bool {
+	switch a.Kind() {
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return a.Int() < b.Int()
+	case reflect.Float32, reflect.Float64:
+		return a.Float() < b.Float()
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return a.Uint() < b.Uint()
+	case reflect.Bool:
+		return !a.Bool() && b.Bool()
+	}
+	panic("not a number")
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/writerc.go b/switchq/vendor/gopkg.in/yaml.v2/writerc.go
new file mode 100644
index 0000000..190362f
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/writerc.go
@@ -0,0 +1,89 @@
+package yaml
+
+// Set the writer error and return false.
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+	emitter.error = yaml_WRITER_ERROR
+	emitter.problem = problem
+	return false
+}
+
+// Flush the output buffer.
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+	if emitter.write_handler == nil {
+		panic("write handler not set")
+	}
+
+	// Check if the buffer is empty.
+	if emitter.buffer_pos == 0 {
+		return true
+	}
+
+	// If the output encoding is UTF-8, we don't need to recode the buffer.
+	if emitter.encoding == yaml_UTF8_ENCODING {
+		if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
+			return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+		}
+		emitter.buffer_pos = 0
+		return true
+	}
+
+	// Recode the buffer into the raw buffer.
+	var low, high int
+	if emitter.encoding == yaml_UTF16LE_ENCODING {
+		low, high = 0, 1
+	} else {
+		high, low = 1, 0
+	}
+
+	pos := 0
+	for pos < emitter.buffer_pos {
+		// See the "reader.c" code for more details on UTF-8 encoding.  Note
+		// that we assume that the buffer contains a valid UTF-8 sequence.
+
+		// Read the next UTF-8 character.
+		octet := emitter.buffer[pos]
+
+		var w int
+		var value rune
+		switch {
+		case octet&0x80 == 0x00:
+			w, value = 1, rune(octet&0x7F)
+		case octet&0xE0 == 0xC0:
+			w, value = 2, rune(octet&0x1F)
+		case octet&0xF0 == 0xE0:
+			w, value = 3, rune(octet&0x0F)
+		case octet&0xF8 == 0xF0:
+			w, value = 4, rune(octet&0x07)
+		}
+		for k := 1; k < w; k++ {
+			octet = emitter.buffer[pos+k]
+			value = (value << 6) + (rune(octet) & 0x3F)
+		}
+		pos += w
+
+		// Write the character.
+		if value < 0x10000 {
+			var b [2]byte
+			b[high] = byte(value >> 8)
+			b[low] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+		} else {
+			// Write the character using a surrogate pair (check "reader.c").
+			var b [4]byte
+			value -= 0x10000
+			b[high] = byte(0xD8 + (value >> 18))
+			b[low] = byte((value >> 10) & 0xFF)
+			b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+			b[low+2] = byte(value & 0xFF)
+			emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+		}
+	}
+
+	// Write the raw buffer.
+	if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+		return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+	}
+	emitter.buffer_pos = 0
+	emitter.raw_buffer = emitter.raw_buffer[:0]
+	return true
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/yaml.go b/switchq/vendor/gopkg.in/yaml.v2/yaml.go
new file mode 100644
index 0000000..36d6b88
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/yaml.go
@@ -0,0 +1,346 @@
+// Package yaml implements YAML support for the Go language.
+//
+// Source code and other details for the project are available at GitHub:
+//
+//   https://github.com/go-yaml/yaml
+//
+package yaml
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// MapSlice encodes and decodes as a YAML map.
+// The order of keys is preserved when encoding and decoding.
+type MapSlice []MapItem
+
+// MapItem is an item in a MapSlice.
+type MapItem struct {
+	Key, Value interface{}
+}
+
+// The Unmarshaler interface may be implemented by types to customize their
+// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
+// method receives a function that may be called to unmarshal the original
+// YAML value into a field or variable. It is safe to call the unmarshal
+// function parameter more than once if necessary.
+type Unmarshaler interface {
+	UnmarshalYAML(unmarshal func(interface{}) error) error
+}
+
+// The Marshaler interface may be implemented by types to customize their
+// behavior when being marshaled into a YAML document. The returned value
+// is marshaled in place of the original value implementing Marshaler.
+//
+// If an error is returned by MarshalYAML, the marshaling procedure stops
+// and returns with the provided error.
+type Marshaler interface {
+	MarshalYAML() (interface{}, error)
+}
+
+// Unmarshal decodes the first document found within the in byte slice
+// and assigns decoded values into the out value.
+//
+// Maps and pointers (to a struct, string, int, etc) are accepted as out
+// values. If an internal pointer within a struct is not initialized,
+// the yaml package will initialize it if necessary for unmarshalling
+// the provided data. The out parameter must not be nil.
+//
+// The type of the decoded values should be compatible with the respective
+// values in out. If one or more values cannot be decoded due to a type
+// mismatches, decoding continues partially until the end of the YAML
+// content, and a *yaml.TypeError is returned with details for all
+// missed values.
+//
+// Struct fields are only unmarshalled if they are exported (have an
+// upper case first letter), and are unmarshalled using the field name
+// lowercased as the default key. Custom keys may be defined via the
+// "yaml" name in the field tag: the content preceding the first comma
+// is used as the key, and the following comma-separated options are
+// used to tweak the marshalling process (see Marshal).
+// Conflicting names result in a runtime error.
+//
+// For example:
+//
+//     type T struct {
+//         F int `yaml:"a,omitempty"`
+//         B int
+//     }
+//     var t T
+//     yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
+//
+// See the documentation of Marshal for the format of tags and a list of
+// supported tag options.
+//
+func Unmarshal(in []byte, out interface{}) (err error) {
+	defer handleErr(&err)
+	d := newDecoder()
+	p := newParser(in)
+	defer p.destroy()
+	node := p.parse()
+	if node != nil {
+		v := reflect.ValueOf(out)
+		if v.Kind() == reflect.Ptr && !v.IsNil() {
+			v = v.Elem()
+		}
+		d.unmarshal(node, v)
+	}
+	if len(d.terrors) > 0 {
+		return &TypeError{d.terrors}
+	}
+	return nil
+}
+
+// Marshal serializes the value provided into a YAML document. The structure
+// of the generated document will reflect the structure of the value itself.
+// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
+//
+// Struct fields are only unmarshalled if they are exported (have an upper case
+// first letter), and are unmarshalled using the field name lowercased as the
+// default key. Custom keys may be defined via the "yaml" name in the field
+// tag: the content preceding the first comma is used as the key, and the
+// following comma-separated options are used to tweak the marshalling process.
+// Conflicting names result in a runtime error.
+//
+// The field tag format accepted is:
+//
+//     `(...) yaml:"[<key>][,<flag1>[,<flag2>]]" (...)`
+//
+// The following flags are currently supported:
+//
+//     omitempty    Only include the field if it's not set to the zero
+//                  value for the type or to empty slices or maps.
+//                  Does not apply to zero valued structs.
+//
+//     flow         Marshal using a flow style (useful for structs,
+//                  sequences and maps).
+//
+//     inline       Inline the field, which must be a struct or a map,
+//                  causing all of its fields or keys to be processed as if
+//                  they were part of the outer struct. For maps, keys must
+//                  not conflict with the yaml keys of other struct fields.
+//
+// In addition, if the key is "-", the field is ignored.
+//
+// For example:
+//
+//     type T struct {
+//         F int "a,omitempty"
+//         B int
+//     }
+//     yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
+//     yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
+//
+func Marshal(in interface{}) (out []byte, err error) {
+	defer handleErr(&err)
+	e := newEncoder()
+	defer e.destroy()
+	e.marshal("", reflect.ValueOf(in))
+	e.finish()
+	out = e.out
+	return
+}
+
+func handleErr(err *error) {
+	if v := recover(); v != nil {
+		if e, ok := v.(yamlError); ok {
+			*err = e.err
+		} else {
+			panic(v)
+		}
+	}
+}
+
+type yamlError struct {
+	err error
+}
+
+func fail(err error) {
+	panic(yamlError{err})
+}
+
+func failf(format string, args ...interface{}) {
+	panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
+}
+
+// A TypeError is returned by Unmarshal when one or more fields in
+// the YAML document cannot be properly decoded into the requested
+// types. When this error is returned, the value is still
+// unmarshaled partially.
+type TypeError struct {
+	Errors []string
+}
+
+func (e *TypeError) Error() string {
+	return fmt.Sprintf("yaml: unmarshal errors:\n  %s", strings.Join(e.Errors, "\n  "))
+}
+
+// --------------------------------------------------------------------------
+// Maintain a mapping of keys to structure field indexes
+
+// The code in this section was copied from mgo/bson.
+
+// structInfo holds details for the serialization of fields of
+// a given struct.
+type structInfo struct {
+	FieldsMap  map[string]fieldInfo
+	FieldsList []fieldInfo
+
+	// InlineMap is the number of the field in the struct that
+	// contains an ,inline map, or -1 if there's none.
+	InlineMap int
+}
+
+type fieldInfo struct {
+	Key       string
+	Num       int
+	OmitEmpty bool
+	Flow      bool
+
+	// Inline holds the field index if the field is part of an inlined struct.
+	Inline []int
+}
+
+var structMap = make(map[reflect.Type]*structInfo)
+var fieldMapMutex sync.RWMutex
+
+func getStructInfo(st reflect.Type) (*structInfo, error) {
+	fieldMapMutex.RLock()
+	sinfo, found := structMap[st]
+	fieldMapMutex.RUnlock()
+	if found {
+		return sinfo, nil
+	}
+
+	n := st.NumField()
+	fieldsMap := make(map[string]fieldInfo)
+	fieldsList := make([]fieldInfo, 0, n)
+	inlineMap := -1
+	for i := 0; i != n; i++ {
+		field := st.Field(i)
+		if field.PkgPath != "" && !field.Anonymous {
+			continue // Private field
+		}
+
+		info := fieldInfo{Num: i}
+
+		tag := field.Tag.Get("yaml")
+		if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
+			tag = string(field.Tag)
+		}
+		if tag == "-" {
+			continue
+		}
+
+		inline := false
+		fields := strings.Split(tag, ",")
+		if len(fields) > 1 {
+			for _, flag := range fields[1:] {
+				switch flag {
+				case "omitempty":
+					info.OmitEmpty = true
+				case "flow":
+					info.Flow = true
+				case "inline":
+					inline = true
+				default:
+					return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
+				}
+			}
+			tag = fields[0]
+		}
+
+		if inline {
+			switch field.Type.Kind() {
+			case reflect.Map:
+				if inlineMap >= 0 {
+					return nil, errors.New("Multiple ,inline maps in struct " + st.String())
+				}
+				if field.Type.Key() != reflect.TypeOf("") {
+					return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
+				}
+				inlineMap = info.Num
+			case reflect.Struct:
+				sinfo, err := getStructInfo(field.Type)
+				if err != nil {
+					return nil, err
+				}
+				for _, finfo := range sinfo.FieldsList {
+					if _, found := fieldsMap[finfo.Key]; found {
+						msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
+						return nil, errors.New(msg)
+					}
+					if finfo.Inline == nil {
+						finfo.Inline = []int{i, finfo.Num}
+					} else {
+						finfo.Inline = append([]int{i}, finfo.Inline...)
+					}
+					fieldsMap[finfo.Key] = finfo
+					fieldsList = append(fieldsList, finfo)
+				}
+			default:
+				//return nil, errors.New("Option ,inline needs a struct value or map field")
+				return nil, errors.New("Option ,inline needs a struct value field")
+			}
+			continue
+		}
+
+		if tag != "" {
+			info.Key = tag
+		} else {
+			info.Key = strings.ToLower(field.Name)
+		}
+
+		if _, found = fieldsMap[info.Key]; found {
+			msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
+			return nil, errors.New(msg)
+		}
+
+		fieldsList = append(fieldsList, info)
+		fieldsMap[info.Key] = info
+	}
+
+	sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
+
+	fieldMapMutex.Lock()
+	structMap[st] = sinfo
+	fieldMapMutex.Unlock()
+	return sinfo, nil
+}
+
+func isZero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.String:
+		return len(v.String()) == 0
+	case reflect.Interface, reflect.Ptr:
+		return v.IsNil()
+	case reflect.Slice:
+		return v.Len() == 0
+	case reflect.Map:
+		return v.Len() == 0
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+		return v.Uint() == 0
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Struct:
+		vt := v.Type()
+		for i := v.NumField() - 1; i >= 0; i-- {
+			if vt.Field(i).PkgPath != "" {
+				continue // Private field
+			}
+			if !isZero(v.Field(i)) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/yamlh.go b/switchq/vendor/gopkg.in/yaml.v2/yamlh.go
new file mode 100644
index 0000000..d60a6b6
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/yamlh.go
@@ -0,0 +1,716 @@
+package yaml
+
+import (
+	"io"
+)
+
+// The version directive data.
+type yaml_version_directive_t struct {
+	major int8 // The major version number.
+	minor int8 // The minor version number.
+}
+
+// The tag directive data.
+type yaml_tag_directive_t struct {
+	handle []byte // The tag handle.
+	prefix []byte // The tag prefix.
+}
+
+type yaml_encoding_t int
+
+// The stream encoding.
+const (
+	// Let the parser choose the encoding.
+	yaml_ANY_ENCODING yaml_encoding_t = iota
+
+	yaml_UTF8_ENCODING    // The default UTF-8 encoding.
+	yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
+	yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
+)
+
+type yaml_break_t int
+
+// Line break types.
+const (
+	// Let the parser choose the break type.
+	yaml_ANY_BREAK yaml_break_t = iota
+
+	yaml_CR_BREAK   // Use CR for line breaks (Mac style).
+	yaml_LN_BREAK   // Use LN for line breaks (Unix style).
+	yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
+)
+
+type yaml_error_type_t int
+
+// Many bad things could happen with the parser and emitter.
+const (
+	// No error is produced.
+	yaml_NO_ERROR yaml_error_type_t = iota
+
+	yaml_MEMORY_ERROR   // Cannot allocate or reallocate a block of memory.
+	yaml_READER_ERROR   // Cannot read or decode the input stream.
+	yaml_SCANNER_ERROR  // Cannot scan the input stream.
+	yaml_PARSER_ERROR   // Cannot parse the input stream.
+	yaml_COMPOSER_ERROR // Cannot compose a YAML document.
+	yaml_WRITER_ERROR   // Cannot write to the output stream.
+	yaml_EMITTER_ERROR  // Cannot emit a YAML stream.
+)
+
+// The pointer position.
+type yaml_mark_t struct {
+	index  int // The position index.
+	line   int // The position line.
+	column int // The position column.
+}
+
+// Node Styles
+
+type yaml_style_t int8
+
+type yaml_scalar_style_t yaml_style_t
+
+// Scalar styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+	yaml_PLAIN_SCALAR_STYLE         // The plain scalar style.
+	yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
+	yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
+	yaml_LITERAL_SCALAR_STYLE       // The literal scalar style.
+	yaml_FOLDED_SCALAR_STYLE        // The folded scalar style.
+)
+
+type yaml_sequence_style_t yaml_style_t
+
+// Sequence styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+	yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
+	yaml_FLOW_SEQUENCE_STYLE  // The flow sequence style.
+)
+
+type yaml_mapping_style_t yaml_style_t
+
+// Mapping styles.
+const (
+	// Let the emitter choose the style.
+	yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+	yaml_BLOCK_MAPPING_STYLE // The block mapping style.
+	yaml_FLOW_MAPPING_STYLE  // The flow mapping style.
+)
+
+// Tokens
+
+type yaml_token_type_t int
+
+// Token types.
+const (
+	// An empty token.
+	yaml_NO_TOKEN yaml_token_type_t = iota
+
+	yaml_STREAM_START_TOKEN // A STREAM-START token.
+	yaml_STREAM_END_TOKEN   // A STREAM-END token.
+
+	yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
+	yaml_TAG_DIRECTIVE_TOKEN     // A TAG-DIRECTIVE token.
+	yaml_DOCUMENT_START_TOKEN    // A DOCUMENT-START token.
+	yaml_DOCUMENT_END_TOKEN      // A DOCUMENT-END token.
+
+	yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
+	yaml_BLOCK_MAPPING_START_TOKEN  // A BLOCK-SEQUENCE-END token.
+	yaml_BLOCK_END_TOKEN            // A BLOCK-END token.
+
+	yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
+	yaml_FLOW_SEQUENCE_END_TOKEN   // A FLOW-SEQUENCE-END token.
+	yaml_FLOW_MAPPING_START_TOKEN  // A FLOW-MAPPING-START token.
+	yaml_FLOW_MAPPING_END_TOKEN    // A FLOW-MAPPING-END token.
+
+	yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
+	yaml_FLOW_ENTRY_TOKEN  // A FLOW-ENTRY token.
+	yaml_KEY_TOKEN         // A KEY token.
+	yaml_VALUE_TOKEN       // A VALUE token.
+
+	yaml_ALIAS_TOKEN  // An ALIAS token.
+	yaml_ANCHOR_TOKEN // An ANCHOR token.
+	yaml_TAG_TOKEN    // A TAG token.
+	yaml_SCALAR_TOKEN // A SCALAR token.
+)
+
+func (tt yaml_token_type_t) String() string {
+	switch tt {
+	case yaml_NO_TOKEN:
+		return "yaml_NO_TOKEN"
+	case yaml_STREAM_START_TOKEN:
+		return "yaml_STREAM_START_TOKEN"
+	case yaml_STREAM_END_TOKEN:
+		return "yaml_STREAM_END_TOKEN"
+	case yaml_VERSION_DIRECTIVE_TOKEN:
+		return "yaml_VERSION_DIRECTIVE_TOKEN"
+	case yaml_TAG_DIRECTIVE_TOKEN:
+		return "yaml_TAG_DIRECTIVE_TOKEN"
+	case yaml_DOCUMENT_START_TOKEN:
+		return "yaml_DOCUMENT_START_TOKEN"
+	case yaml_DOCUMENT_END_TOKEN:
+		return "yaml_DOCUMENT_END_TOKEN"
+	case yaml_BLOCK_SEQUENCE_START_TOKEN:
+		return "yaml_BLOCK_SEQUENCE_START_TOKEN"
+	case yaml_BLOCK_MAPPING_START_TOKEN:
+		return "yaml_BLOCK_MAPPING_START_TOKEN"
+	case yaml_BLOCK_END_TOKEN:
+		return "yaml_BLOCK_END_TOKEN"
+	case yaml_FLOW_SEQUENCE_START_TOKEN:
+		return "yaml_FLOW_SEQUENCE_START_TOKEN"
+	case yaml_FLOW_SEQUENCE_END_TOKEN:
+		return "yaml_FLOW_SEQUENCE_END_TOKEN"
+	case yaml_FLOW_MAPPING_START_TOKEN:
+		return "yaml_FLOW_MAPPING_START_TOKEN"
+	case yaml_FLOW_MAPPING_END_TOKEN:
+		return "yaml_FLOW_MAPPING_END_TOKEN"
+	case yaml_BLOCK_ENTRY_TOKEN:
+		return "yaml_BLOCK_ENTRY_TOKEN"
+	case yaml_FLOW_ENTRY_TOKEN:
+		return "yaml_FLOW_ENTRY_TOKEN"
+	case yaml_KEY_TOKEN:
+		return "yaml_KEY_TOKEN"
+	case yaml_VALUE_TOKEN:
+		return "yaml_VALUE_TOKEN"
+	case yaml_ALIAS_TOKEN:
+		return "yaml_ALIAS_TOKEN"
+	case yaml_ANCHOR_TOKEN:
+		return "yaml_ANCHOR_TOKEN"
+	case yaml_TAG_TOKEN:
+		return "yaml_TAG_TOKEN"
+	case yaml_SCALAR_TOKEN:
+		return "yaml_SCALAR_TOKEN"
+	}
+	return "<unknown token>"
+}
+
+// The token structure.
+type yaml_token_t struct {
+	// The token type.
+	typ yaml_token_type_t
+
+	// The start/end of the token.
+	start_mark, end_mark yaml_mark_t
+
+	// The stream encoding (for yaml_STREAM_START_TOKEN).
+	encoding yaml_encoding_t
+
+	// The alias/anchor/scalar value or tag/tag directive handle
+	// (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
+	value []byte
+
+	// The tag suffix (for yaml_TAG_TOKEN).
+	suffix []byte
+
+	// The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
+	prefix []byte
+
+	// The scalar style (for yaml_SCALAR_TOKEN).
+	style yaml_scalar_style_t
+
+	// The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
+	major, minor int8
+}
+
+// Events
+
+type yaml_event_type_t int8
+
+// Event types.
+const (
+	// An empty event.
+	yaml_NO_EVENT yaml_event_type_t = iota
+
+	yaml_STREAM_START_EVENT   // A STREAM-START event.
+	yaml_STREAM_END_EVENT     // A STREAM-END event.
+	yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
+	yaml_DOCUMENT_END_EVENT   // A DOCUMENT-END event.
+	yaml_ALIAS_EVENT          // An ALIAS event.
+	yaml_SCALAR_EVENT         // A SCALAR event.
+	yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
+	yaml_SEQUENCE_END_EVENT   // A SEQUENCE-END event.
+	yaml_MAPPING_START_EVENT  // A MAPPING-START event.
+	yaml_MAPPING_END_EVENT    // A MAPPING-END event.
+)
+
+// The event structure.
+type yaml_event_t struct {
+
+	// The event type.
+	typ yaml_event_type_t
+
+	// The start and end of the event.
+	start_mark, end_mark yaml_mark_t
+
+	// The document encoding (for yaml_STREAM_START_EVENT).
+	encoding yaml_encoding_t
+
+	// The version directive (for yaml_DOCUMENT_START_EVENT).
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives (for yaml_DOCUMENT_START_EVENT).
+	tag_directives []yaml_tag_directive_t
+
+	// The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
+	anchor []byte
+
+	// The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	tag []byte
+
+	// The scalar value (for yaml_SCALAR_EVENT).
+	value []byte
+
+	// Is the document start/end indicator implicit, or the tag optional?
+	// (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
+	implicit bool
+
+	// Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
+	quoted_implicit bool
+
+	// The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
+	style yaml_style_t
+}
+
+func (e *yaml_event_t) scalar_style() yaml_scalar_style_t     { return yaml_scalar_style_t(e.style) }
+func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
+func (e *yaml_event_t) mapping_style() yaml_mapping_style_t   { return yaml_mapping_style_t(e.style) }
+
+// Nodes
+
+const (
+	yaml_NULL_TAG      = "tag:yaml.org,2002:null"      // The tag !!null with the only possible value: null.
+	yaml_BOOL_TAG      = "tag:yaml.org,2002:bool"      // The tag !!bool with the values: true and false.
+	yaml_STR_TAG       = "tag:yaml.org,2002:str"       // The tag !!str for string values.
+	yaml_INT_TAG       = "tag:yaml.org,2002:int"       // The tag !!int for integer values.
+	yaml_FLOAT_TAG     = "tag:yaml.org,2002:float"     // The tag !!float for float values.
+	yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
+
+	yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
+	yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
+
+	// Not in original libyaml.
+	yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+	yaml_MERGE_TAG  = "tag:yaml.org,2002:merge"
+
+	yaml_DEFAULT_SCALAR_TAG   = yaml_STR_TAG // The default scalar tag is !!str.
+	yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
+	yaml_DEFAULT_MAPPING_TAG  = yaml_MAP_TAG // The default mapping tag is !!map.
+)
+
+type yaml_node_type_t int
+
+// Node types.
+const (
+	// An empty node.
+	yaml_NO_NODE yaml_node_type_t = iota
+
+	yaml_SCALAR_NODE   // A scalar node.
+	yaml_SEQUENCE_NODE // A sequence node.
+	yaml_MAPPING_NODE  // A mapping node.
+)
+
+// An element of a sequence node.
+type yaml_node_item_t int
+
+// An element of a mapping node.
+type yaml_node_pair_t struct {
+	key   int // The key of the element.
+	value int // The value of the element.
+}
+
+// The node structure.
+type yaml_node_t struct {
+	typ yaml_node_type_t // The node type.
+	tag []byte           // The node tag.
+
+	// The node data.
+
+	// The scalar parameters (for yaml_SCALAR_NODE).
+	scalar struct {
+		value  []byte              // The scalar value.
+		length int                 // The length of the scalar value.
+		style  yaml_scalar_style_t // The scalar style.
+	}
+
+	// The sequence parameters (for YAML_SEQUENCE_NODE).
+	sequence struct {
+		items_data []yaml_node_item_t    // The stack of sequence items.
+		style      yaml_sequence_style_t // The sequence style.
+	}
+
+	// The mapping parameters (for yaml_MAPPING_NODE).
+	mapping struct {
+		pairs_data  []yaml_node_pair_t   // The stack of mapping pairs (key, value).
+		pairs_start *yaml_node_pair_t    // The beginning of the stack.
+		pairs_end   *yaml_node_pair_t    // The end of the stack.
+		pairs_top   *yaml_node_pair_t    // The top of the stack.
+		style       yaml_mapping_style_t // The mapping style.
+	}
+
+	start_mark yaml_mark_t // The beginning of the node.
+	end_mark   yaml_mark_t // The end of the node.
+
+}
+
+// The document structure.
+type yaml_document_t struct {
+
+	// The document nodes.
+	nodes []yaml_node_t
+
+	// The version directive.
+	version_directive *yaml_version_directive_t
+
+	// The list of tag directives.
+	tag_directives_data  []yaml_tag_directive_t
+	tag_directives_start int // The beginning of the tag directives list.
+	tag_directives_end   int // The end of the tag directives list.
+
+	start_implicit int // Is the document start indicator implicit?
+	end_implicit   int // Is the document end indicator implicit?
+
+	// The start/end of the document.
+	start_mark, end_mark yaml_mark_t
+}
+
+// The prototype of a read handler.
+//
+// The read handler is called when the parser needs to read more bytes from the
+// source. The handler should write not more than size bytes to the buffer.
+// The number of written bytes should be set to the size_read variable.
+//
+// [in,out]   data        A pointer to an application data specified by
+//                        yaml_parser_set_input().
+// [out]      buffer      The buffer to write the data from the source.
+// [in]       size        The size of the buffer.
+// [out]      size_read   The actual number of bytes read from the source.
+//
+// On success, the handler should return 1.  If the handler failed,
+// the returned value should be 0. On EOF, the handler should set the
+// size_read to 0 and return 1.
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+// This structure holds information about a potential simple key.
+type yaml_simple_key_t struct {
+	possible     bool        // Is a simple key possible?
+	required     bool        // Is a simple key required?
+	token_number int         // The number of the token.
+	mark         yaml_mark_t // The position mark.
+}
+
+// The states of the parser.
+type yaml_parser_state_t int
+
+const (
+	yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+
+	yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE           // Expect the beginning of an implicit document.
+	yaml_PARSE_DOCUMENT_START_STATE                    // Expect DOCUMENT-START.
+	yaml_PARSE_DOCUMENT_CONTENT_STATE                  // Expect the content of a document.
+	yaml_PARSE_DOCUMENT_END_STATE                      // Expect DOCUMENT-END.
+	yaml_PARSE_BLOCK_NODE_STATE                        // Expect a block node.
+	yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
+	yaml_PARSE_FLOW_NODE_STATE                         // Expect a flow node.
+	yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE        // Expect the first entry of a block sequence.
+	yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE              // Expect an entry of a block sequence.
+	yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE         // Expect an entry of an indentless sequence.
+	yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE           // Expect the first key of a block mapping.
+	yaml_PARSE_BLOCK_MAPPING_KEY_STATE                 // Expect a block mapping key.
+	yaml_PARSE_BLOCK_MAPPING_VALUE_STATE               // Expect a block mapping value.
+	yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE         // Expect the first entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE               // Expect an entry of a flow sequence.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE   // Expect a key of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
+	yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE   // Expect the and of an ordered mapping entry.
+	yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE            // Expect the first key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_KEY_STATE                  // Expect a key of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_VALUE_STATE                // Expect a value of a flow mapping.
+	yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE          // Expect an empty value of a flow mapping.
+	yaml_PARSE_END_STATE                               // Expect nothing.
+)
+
+func (ps yaml_parser_state_t) String() string {
+	switch ps {
+	case yaml_PARSE_STREAM_START_STATE:
+		return "yaml_PARSE_STREAM_START_STATE"
+	case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+		return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_START_STATE:
+		return "yaml_PARSE_DOCUMENT_START_STATE"
+	case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+		return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
+	case yaml_PARSE_DOCUMENT_END_STATE:
+		return "yaml_PARSE_DOCUMENT_END_STATE"
+	case yaml_PARSE_BLOCK_NODE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_STATE"
+	case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+		return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
+	case yaml_PARSE_FLOW_NODE_STATE:
+		return "yaml_PARSE_FLOW_NODE_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
+	case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+		return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
+	case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
+	case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
+	case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+		return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
+	case yaml_PARSE_END_STATE:
+		return "yaml_PARSE_END_STATE"
+	}
+	return "<unknown parser state>"
+}
+
+// This structure holds aliases data.
+type yaml_alias_data_t struct {
+	anchor []byte      // The anchor.
+	index  int         // The node id.
+	mark   yaml_mark_t // The anchor mark.
+}
+
+// The parser structure.
+//
+// All members are internal. Manage the structure using the
+// yaml_parser_ family of functions.
+type yaml_parser_t struct {
+
+	// Error handling
+
+	error yaml_error_type_t // Error type.
+
+	problem string // Error description.
+
+	// The byte about which the problem occured.
+	problem_offset int
+	problem_value  int
+	problem_mark   yaml_mark_t
+
+	// The error context.
+	context      string
+	context_mark yaml_mark_t
+
+	// Reader stuff
+
+	read_handler yaml_read_handler_t // Read handler.
+
+	input_file io.Reader // File input data.
+	input      []byte    // String input data.
+	input_pos  int
+
+	eof bool // EOF flag
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	unread int // The number of unread characters in the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The input encoding.
+
+	offset int         // The offset of the current position (in bytes).
+	mark   yaml_mark_t // The mark of the current position.
+
+	// Scanner stuff
+
+	stream_start_produced bool // Have we started to scan the input stream?
+	stream_end_produced   bool // Have we reached the end of the input stream?
+
+	flow_level int // The number of unclosed '[' and '{' indicators.
+
+	tokens          []yaml_token_t // The tokens queue.
+	tokens_head     int            // The head of the tokens queue.
+	tokens_parsed   int            // The number of tokens fetched from the queue.
+	token_available bool           // Does the tokens queue contain a token ready for dequeueing.
+
+	indent  int   // The current indentation level.
+	indents []int // The indentation levels stack.
+
+	simple_key_allowed bool                // May a simple key occur at the current position?
+	simple_keys        []yaml_simple_key_t // The stack of simple keys.
+
+	// Parser stuff
+
+	state          yaml_parser_state_t    // The current parser state.
+	states         []yaml_parser_state_t  // The parser states stack.
+	marks          []yaml_mark_t          // The stack of marks.
+	tag_directives []yaml_tag_directive_t // The list of TAG directives.
+
+	// Dumper stuff
+
+	aliases []yaml_alias_data_t // The alias data.
+
+	document *yaml_document_t // The currently parsed document.
+}
+
+// Emitter Definitions
+
+// The prototype of a write handler.
+//
+// The write handler is called when the emitter needs to flush the accumulated
+// characters to the output.  The handler should write @a size bytes of the
+// @a buffer to the output.
+//
+// @param[in,out]   data        A pointer to an application data specified by
+//                              yaml_emitter_set_output().
+// @param[in]       buffer      The buffer with bytes to be written.
+// @param[in]       size        The size of the buffer.
+//
+// @returns On success, the handler should return @c 1.  If the handler failed,
+// the returned value should be @c 0.
+//
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+type yaml_emitter_state_t int
+
+// The emitter states.
+const (
+	// Expect STREAM-START.
+	yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+
+	yaml_EMIT_FIRST_DOCUMENT_START_STATE       // Expect the first DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_START_STATE             // Expect DOCUMENT-START or STREAM-END.
+	yaml_EMIT_DOCUMENT_CONTENT_STATE           // Expect the content of a document.
+	yaml_EMIT_DOCUMENT_END_STATE               // Expect DOCUMENT-END.
+	yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE   // Expect the first item of a flow sequence.
+	yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE         // Expect an item of a flow sequence.
+	yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE     // Expect the first key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_KEY_STATE           // Expect a key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE  // Expect a value for a simple key of a flow mapping.
+	yaml_EMIT_FLOW_MAPPING_VALUE_STATE         // Expect a value of a flow mapping.
+	yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE  // Expect the first item of a block sequence.
+	yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE        // Expect an item of a block sequence.
+	yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE    // Expect the first key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_KEY_STATE          // Expect the key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
+	yaml_EMIT_BLOCK_MAPPING_VALUE_STATE        // Expect a value of a block mapping.
+	yaml_EMIT_END_STATE                        // Expect nothing.
+)
+
+// The emitter structure.
+//
+// All members are internal.  Manage the structure using the @c yaml_emitter_
+// family of functions.
+type yaml_emitter_t struct {
+
+	// Error handling
+
+	error   yaml_error_type_t // Error type.
+	problem string            // Error description.
+
+	// Writer stuff
+
+	write_handler yaml_write_handler_t // Write handler.
+
+	output_buffer *[]byte   // String output data.
+	output_file   io.Writer // File output data.
+
+	buffer     []byte // The working buffer.
+	buffer_pos int    // The current position of the buffer.
+
+	raw_buffer     []byte // The raw buffer.
+	raw_buffer_pos int    // The current position of the buffer.
+
+	encoding yaml_encoding_t // The stream encoding.
+
+	// Emitter stuff
+
+	canonical   bool         // If the output is in the canonical style?
+	best_indent int          // The number of indentation spaces.
+	best_width  int          // The preferred width of the output lines.
+	unicode     bool         // Allow unescaped non-ASCII characters?
+	line_break  yaml_break_t // The preferred line break.
+
+	state  yaml_emitter_state_t   // The current emitter state.
+	states []yaml_emitter_state_t // The stack of states.
+
+	events      []yaml_event_t // The event queue.
+	events_head int            // The head of the event queue.
+
+	indents []int // The stack of indentation levels.
+
+	tag_directives []yaml_tag_directive_t // The list of tag directives.
+
+	indent int // The current indentation level.
+
+	flow_level int // The current flow level.
+
+	root_context       bool // Is it the document root context?
+	sequence_context   bool // Is it a sequence context?
+	mapping_context    bool // Is it a mapping context?
+	simple_key_context bool // Is it a simple mapping key context?
+
+	line       int  // The current line.
+	column     int  // The current column.
+	whitespace bool // If the last character was a whitespace?
+	indention  bool // If the last character was an indentation character (' ', '-', '?', ':')?
+	open_ended bool // If an explicit document end is required?
+
+	// Anchor analysis.
+	anchor_data struct {
+		anchor []byte // The anchor value.
+		alias  bool   // Is it an alias?
+	}
+
+	// Tag analysis.
+	tag_data struct {
+		handle []byte // The tag handle.
+		suffix []byte // The tag suffix.
+	}
+
+	// Scalar analysis.
+	scalar_data struct {
+		value                 []byte              // The scalar value.
+		multiline             bool                // Does the scalar contain line breaks?
+		flow_plain_allowed    bool                // Can the scalar be expessed in the flow plain style?
+		block_plain_allowed   bool                // Can the scalar be expressed in the block plain style?
+		single_quoted_allowed bool                // Can the scalar be expressed in the single quoted style?
+		block_allowed         bool                // Can the scalar be expressed in the literal or folded styles?
+		style                 yaml_scalar_style_t // The output style.
+	}
+
+	// Dumper stuff
+
+	opened bool // If the stream was already opened?
+	closed bool // If the stream was already closed?
+
+	// The information associated with the document nodes.
+	anchors *struct {
+		references int  // The number of references.
+		anchor     int  // The anchor id.
+		serialized bool // If the node has been emitted?
+	}
+
+	last_anchor_id int // The last assigned anchor id.
+
+	document *yaml_document_t // The currently emitted document.
+}
diff --git a/switchq/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/switchq/vendor/gopkg.in/yaml.v2/yamlprivateh.go
new file mode 100644
index 0000000..8110ce3
--- /dev/null
+++ b/switchq/vendor/gopkg.in/yaml.v2/yamlprivateh.go
@@ -0,0 +1,173 @@
+package yaml
+
+const (
+	// The size of the input raw buffer.
+	input_raw_buffer_size = 512
+
+	// The size of the input buffer.
+	// It should be possible to decode the whole raw buffer.
+	input_buffer_size = input_raw_buffer_size * 3
+
+	// The size of the output buffer.
+	output_buffer_size = 128
+
+	// The size of the output raw buffer.
+	// It should be possible to encode the whole output buffer.
+	output_raw_buffer_size = (output_buffer_size*2 + 2)
+
+	// The size of other stacks and queues.
+	initial_stack_size  = 16
+	initial_queue_size  = 16
+	initial_string_size = 16
+)
+
+// Check if the character at the specified position is an alphabetical
+// character, a digit, '_', or '-'.
+func is_alpha(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
+}
+
+// Check if the character at the specified position is a digit.
+func is_digit(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9'
+}
+
+// Get the value of a digit.
+func as_digit(b []byte, i int) int {
+	return int(b[i]) - '0'
+}
+
+// Check if the character at the specified position is a hex-digit.
+func is_hex(b []byte, i int) bool {
+	return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
+}
+
+// Get the value of a hex-digit.
+func as_hex(b []byte, i int) int {
+	bi := b[i]
+	if bi >= 'A' && bi <= 'F' {
+		return int(bi) - 'A' + 10
+	}
+	if bi >= 'a' && bi <= 'f' {
+		return int(bi) - 'a' + 10
+	}
+	return int(bi) - '0'
+}
+
+// Check if the character is ASCII.
+func is_ascii(b []byte, i int) bool {
+	return b[i] <= 0x7F
+}
+
+// Check if the character at the start of the buffer can be printed unescaped.
+func is_printable(b []byte, i int) bool {
+	return ((b[i] == 0x0A) || // . == #x0A
+		(b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
+		(b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
+		(b[i] > 0xC2 && b[i] < 0xED) ||
+		(b[i] == 0xED && b[i+1] < 0xA0) ||
+		(b[i] == 0xEE) ||
+		(b[i] == 0xEF && // #xE000 <= . <= #xFFFD
+			!(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
+			!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+// Check if the character at the specified position is NUL.
+func is_z(b []byte, i int) bool {
+	return b[i] == 0x00
+}
+
+// Check if the beginning of the buffer is a BOM.
+func is_bom(b []byte, i int) bool {
+	return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
+}
+
+// Check if the character at the specified position is space.
+func is_space(b []byte, i int) bool {
+	return b[i] == ' '
+}
+
+// Check if the character at the specified position is tab.
+func is_tab(b []byte, i int) bool {
+	return b[i] == '\t'
+}
+
+// Check if the character at the specified position is blank (space or tab).
+func is_blank(b []byte, i int) bool {
+	//return is_space(b, i) || is_tab(b, i)
+	return b[i] == ' ' || b[i] == '\t'
+}
+
+// Check if the character at the specified position is a line break.
+func is_break(b []byte, i int) bool {
+	return (b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
+}
+
+func is_crlf(b []byte, i int) bool {
+	return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// Check if the character is a line break or NUL.
+func is_breakz(b []byte, i int) bool {
+	//return is_break(b, i) || is_z(b, i)
+	return (        // is_break:
+	b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		// is_z:
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, or NUL.
+func is_spacez(b []byte, i int) bool {
+	//return is_space(b, i) || is_breakz(b, i)
+	return ( // is_space:
+	b[i] == ' ' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Check if the character is a line break, space, tab, or NUL.
+func is_blankz(b []byte, i int) bool {
+	//return is_blank(b, i) || is_breakz(b, i)
+	return ( // is_blank:
+	b[i] == ' ' || b[i] == '\t' ||
+		// is_breakz:
+		b[i] == '\r' || // CR (#xD)
+		b[i] == '\n' || // LF (#xA)
+		b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
+		b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
+		b[i] == 0)
+}
+
+// Determine the width of the character.
+func width(b byte) int {
+	// Don't replace these by a switch without first
+	// confirming that it is being inlined.
+	if b&0x80 == 0x00 {
+		return 1
+	}
+	if b&0xE0 == 0xC0 {
+		return 2
+	}
+	if b&0xF0 == 0xE0 {
+		return 3
+	}
+	if b&0xF8 == 0xF0 {
+		return 4
+	}
+	return 0
+
+}
diff --git a/switchq/vendor/vendor.json b/switchq/vendor/vendor.json
new file mode 100644
index 0000000..5796e97
--- /dev/null
+++ b/switchq/vendor/vendor.json
@@ -0,0 +1,133 @@
+{
+	"comment": "",
+	"ignore": "test",
+	"package": [
+		{
+			"checksumSHA1": "dGXnnR7ZhsrZNnEqFimk6q7YCqs=",
+			"path": "github.com/Sirupsen/logrus",
+			"revision": "61e43dc76f7ee59a82bdf3d71033dc12bea4c77d",
+			"revisionTime": "2017-01-13T01:19:11Z"
+		},
+		{
+			"checksumSHA1": "F5dR3/i70EhSIMZfeIV+H8/PtvM=",
+			"path": "github.com/gorilla/mux",
+			"revision": "392c28fe23e1c45ddba891b0320b3b5df220beea",
+			"revisionTime": "2017-01-18T13:43:44Z"
+		},
+		{
+			"checksumSHA1": "b4Bd+o7lqSpiJr8eYHrfn6Y5iPE=",
+			"path": "github.com/juju/ansiterm",
+			"revision": "35c59b9e0fe275705a71bf5d58ee293f27efbbc4",
+			"revisionTime": "2016-11-07T20:46:39Z"
+		},
+		{
+			"checksumSHA1": "mqeBN4W37/KmjWBQT4xqIQGjGYE=",
+			"path": "github.com/juju/ansiterm/tabwriter",
+			"revision": "35c59b9e0fe275705a71bf5d58ee293f27efbbc4",
+			"revisionTime": "2016-11-07T20:46:39Z"
+		},
+		{
+			"checksumSHA1": "KIX/3RadQkfl4ZxCmOQ01vAGLEI=",
+			"path": "github.com/juju/errors",
+			"revision": "6f54ff6318409d31ff16261533ce2c8381a4fd5d",
+			"revisionTime": "2016-08-09T03:08:48Z"
+		},
+		{
+			"checksumSHA1": "kFWNEYI94c34qJ3twdktZxjLQPE=",
+			"path": "github.com/juju/gomaasapi",
+			"revision": "f0300c96aa5e0deb3cc599ca8a8949097b955e60",
+			"revisionTime": "2016-11-02T12:07:14Z"
+		},
+		{
+			"checksumSHA1": "WttBhWbKX60YUiSiyF+mQKpfdVo=",
+			"path": "github.com/juju/loggo",
+			"revision": "3b7ece48644d35850f4ced4c2cbc2cf8413f58e0",
+			"revisionTime": "2016-08-18T02:57:24Z"
+		},
+		{
+			"checksumSHA1": "WsQOT3cgol8O3aenURnzUokuIAA=",
+			"path": "github.com/juju/schema",
+			"revision": "e4e05803c9a103fdfa880476044100ac17e54830",
+			"revisionTime": "2016-09-16T14:28:50Z"
+		},
+		{
+			"checksumSHA1": "uBuPqeE5+azM1/N7YtTU2IUXuTc=",
+			"path": "github.com/juju/utils",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "CEj9efal05UefJa0E6s1Uskv5XU=",
+			"path": "github.com/juju/utils/clock",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "i9VUhzmNRF4jDTwpDuAgoAru+k0=",
+			"path": "github.com/juju/utils/set",
+			"revision": "e5eb4d99a51c3ded8e4e8bdfe527260ceeec997d",
+			"revisionTime": "2017-01-16T11:32:47Z"
+		},
+		{
+			"checksumSHA1": "DqG7AvEBxxWLRzQPgLM69dUpkL0=",
+			"path": "github.com/juju/version",
+			"revision": "1f41e27e54f21acccf9b2dddae063a782a8a7ceb",
+			"revisionTime": "2016-10-31T05:19:06Z"
+		},
+		{
+			"checksumSHA1": "pNria08/hqW7nnWb0erRBMdJU3M=",
+			"path": "github.com/kelseyhightower/envconfig",
+			"revision": "4069f29f08928c54bcb1fdf632b31b1bb54b4fdb",
+			"revisionTime": "2017-01-13T19:16:37Z"
+		},
+		{
+			"checksumSHA1": "0nXta8PKYGYicKWeydn0A7ELKcs=",
+			"path": "github.com/lunixbochs/vtclean",
+			"revision": "4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36",
+			"revisionTime": "2016-01-25T03:51:06Z"
+		},
+		{
+			"checksumSHA1": "I4njd26dG5hxFT2nawuByM4pxzY=",
+			"path": "github.com/mattn/go-colorable",
+			"revision": "d228849504861217f796da67fae4f6e347643f15",
+			"revisionTime": "2016-11-03T16:00:40Z"
+		},
+		{
+			"checksumSHA1": "xZuhljnmBysJPta/lMyYmJdujCg=",
+			"path": "github.com/mattn/go-isatty",
+			"revision": "30a891c33c7cde7b02a981314b4228ec99380cca",
+			"revisionTime": "2016-11-23T14:36:37Z"
+		},
+		{
+			"checksumSHA1": "1MGpGDQqnUoRpv7VEcQrXOBydXE=",
+			"path": "golang.org/x/crypto/pbkdf2",
+			"revision": "b8a2a83acfe6e6770b75de42d5ff4c67596675c0",
+			"revisionTime": "2017-01-13T19:21:00Z"
+		},
+		{
+			"checksumSHA1": "TuN733jTVL7T0QVzNx6Tn6BAgrg=",
+			"path": "gopkg.in/juju/names.v2",
+			"revision": "0847c26d322a121e52614f969fb82eae2820c715",
+			"revisionTime": "2016-11-02T13:43:03Z"
+		},
+		{
+			"checksumSHA1": "YsB2DChSV9HxdzHaKATllAUKWSI=",
+			"path": "gopkg.in/mgo.v2/bson",
+			"revision": "3f83fa5005286a7fe593b055f0d7771a7dce4655",
+			"revisionTime": "2016-08-18T02:01:20Z"
+		},
+		{
+			"checksumSHA1": "XQsrqoNT1U0KzLxOFcAZVvqhLfk=",
+			"path": "gopkg.in/mgo.v2/internal/json",
+			"revision": "3f83fa5005286a7fe593b055f0d7771a7dce4655",
+			"revisionTime": "2016-08-18T02:01:20Z"
+		},
+		{
+			"checksumSHA1": "12GqsW8PiRPnezDDy0v4brZrndM=",
+			"path": "gopkg.in/yaml.v2",
+			"revision": "a5b47d31c556af34a302ce5d659e6fea44d90de0",
+			"revisionTime": "2016-09-28T15:37:09Z"
+		}
+	],
+	"rootPath": "gerrit.opencord.com/maas/switchq"
+}