cord-776 create build / runtime containers for autmation uservices

Change-Id: I246973192adef56a250ffe93a5f65fff488840c1
diff --git a/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000..f2c2bc2
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/automation/vendor/github.com/Sirupsen/logrus/LICENSE b/automation/vendor/github.com/Sirupsen/logrus/LICENSE
new file mode 100644
index 0000000..f090cb4
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Simon Eskildsen
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/automation/vendor/github.com/Sirupsen/logrus/README.md b/automation/vendor/github.com/Sirupsen/logrus/README.md
new file mode 100644
index 0000000..206c746
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/README.md
@@ -0,0 +1,433 @@
+# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/>&nbsp;[![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus)&nbsp;[![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
+
+**Seeing weird case-sensitive problems?** See [this
+issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
+This change has been reverted. I apologize for causing this. I greatly
+underestimated the impact this would have. Logrus strives for stability and
+backwards compatibility and failed to provide that.
+
+Logrus is a structured logger for Go (golang), completely API compatible with
+the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+Nicely color-coded in development (when a TTY is attached, otherwise just
+plain text):
+
+![Colored](http://i.imgur.com/PY7qMwd.png)
+
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
+or Splunk:
+
+```json
+{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
+ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
+
+{"level":"warning","msg":"The group's number increased tremendously!",
+"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
+"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
+
+{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
+"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
+
+{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
+"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
+```
+
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
+attached, the output is compatible with the
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
+
+```text
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
+```
+
+#### Example
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+```go
+package main
+
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+  }).Info("A walrus appears")
+}
+```
+
+Note that it's completely api-compatible with the stdlib logger, so you can
+replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+and you'll now have the flexibility of Logrus. You can customize it all you
+want:
+
+```go
+package main
+
+import (
+  "os"
+  log "github.com/Sirupsen/logrus"
+)
+
+func init() {
+  // Log as JSON instead of the default ASCII formatter.
+  log.SetFormatter(&log.JSONFormatter{})
+
+  // Output to stdout instead of the default stderr, could also be a file.
+  log.SetOutput(os.Stdout)
+
+  // Only log the warning severity or above.
+  log.SetLevel(log.WarnLevel)
+}
+
+func main() {
+  log.WithFields(log.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 122,
+  }).Warn("The group's number increased tremendously!")
+
+  log.WithFields(log.Fields{
+    "omg":    true,
+    "number": 100,
+  }).Fatal("The ice breaks!")
+
+  // A common pattern is to re-use fields between logging statements by re-using
+  // the logrus.Entry returned from WithFields()
+  contextLogger := log.WithFields(log.Fields{
+    "common": "this is a common field",
+    "other": "I also should be logged always",
+  })
+
+  contextLogger.Info("I'll be logged with common and other field")
+  contextLogger.Info("Me too")
+}
+```
+
+For more advanced usage such as logging to multiple locations from the same
+application, you can also create an instance of the `logrus` Logger:
+
+```go
+package main
+
+import (
+  "github.com/Sirupsen/logrus"
+)
+
+// Create a new instance of the logger. You can have any number of instances.
+var log = logrus.New()
+
+func main() {
+  // The API for setting attributes is a little different than the package level
+  // exported logger. See Godoc.
+  log.Out = os.Stderr
+
+  log.WithFields(logrus.Fields{
+    "animal": "walrus",
+    "size":   10,
+  }).Info("A group of walrus emerges from the ocean")
+}
+```
+
+#### Fields
+
+Logrus encourages careful, structured logging though logging fields instead of
+long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
+to send event %s to topic %s with key %d")`, you should log the much more
+discoverable:
+
+```go
+log.WithFields(log.Fields{
+  "event": event,
+  "topic": topic,
+  "key": key,
+}).Fatal("Failed to send event")
+```
+
+We've found this API forces you to think about logging in a way that produces
+much more useful logging messages. We've been in countless situations where just
+a single added field to a log statement that was already there would've saved us
+hours. The `WithFields` call is optional.
+
+In general, with Logrus using any of the `printf`-family functions should be
+seen as a hint you should add a field, however, you can still use the
+`printf`-family functions with Logrus.
+
+#### Hooks
+
+You can add hooks for logging levels. For example to send errors to an exception
+tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
+multiple places simultaneously, e.g. syslog.
+
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+  "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+  logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
+  "log/syslog"
+)
+
+func init() {
+
+  // Use the Airbrake hook to report errors that have Error severity or above to
+  // an exception tracker. You can create custom hooks, see the Hooks section.
+  log.AddHook(airbrake.NewHook(123, "xyz", "production"))
+
+  hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
+  if err != nil {
+    log.Error("Unable to connect to local syslog daemon")
+  } else {
+    log.AddHook(hook)
+  }
+}
+```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
+
+| Hook  | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+
+
+#### Level logging
+
+Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
+
+```go
+log.Debug("Useful debugging information.")
+log.Info("Something noteworthy happened!")
+log.Warn("You should probably take a look at this.")
+log.Error("Something failed but I'm not quitting.")
+// Calls os.Exit(1) after logging
+log.Fatal("Bye.")
+// Calls panic() after logging
+log.Panic("I'm bailing.")
+```
+
+You can set the logging level on a `Logger`, then it will only log entries with
+that severity or anything above it:
+
+```go
+// Will log anything that is info or above (warn, error, fatal, panic). Default.
+log.SetLevel(log.InfoLevel)
+```
+
+It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
+environment if your application has that.
+
+#### Entries
+
+Besides the fields added with `WithField` or `WithFields` some fields are
+automatically added to all logging events:
+
+1. `time`. The timestamp when the entry was created.
+2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
+   the `AddFields` call. E.g. `Failed to send event.`
+3. `level`. The logging level. E.g. `info`.
+
+#### Environments
+
+Logrus has no notion of environment.
+
+If you wish for hooks and formatters to only be used in specific environments,
+you should handle that yourself. For example, if your application has a global
+variable `Environment`, which is a string representation of the environment you
+could do:
+
+```go
+import (
+  log "github.com/Sirupsen/logrus"
+)
+
+init() {
+  // do something here to set environment depending on an environment variable
+  // or command-line flag
+  if Environment == "production" {
+    log.SetFormatter(&log.JSONFormatter{})
+  } else {
+    // The TextFormatter is default, you don't actually have to do this.
+    log.SetFormatter(&log.TextFormatter{})
+  }
+}
+```
+
+This configuration is how `logrus` was intended to be used, but JSON in
+production is mostly only useful if you do log aggregation with tools like
+Splunk or Logstash.
+
+#### Formatters
+
+The built-in logging formatters are:
+
+* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
+  without colors.
+  * *Note:* to force colored output when there is no TTY, set the `ForceColors`
+    field to `true`.  To force no colored output even if there is a TTY  set the
+    `DisableColors` field to `true`
+* `logrus.JSONFormatter`. Logs fields as JSON.
+
+Third party logging formatters:
+
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+
+You can define your formatter by implementing the `Formatter` interface,
+requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
+`Fields` type (`map[string]interface{}`) with all your fields as well as the
+default ones (see Entries section above):
+
+```go
+type MyJSONFormatter struct {
+}
+
+log.SetFormatter(new(MyJSONFormatter))
+
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
+  // Note this doesn't include Time, Level and Message which are available on
+  // the Entry. Consult `godoc` on information about those fields or read the
+  // source of the official loggers.
+  serialized, err := json.Marshal(entry.Data)
+    if err != nil {
+      return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+    }
+  return append(serialized, '\n'), nil
+}
+```
+
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+    // create a stdlib log.Logger that writes to
+    // logrus.Logger.
+    ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+#### Rotation
+
+Log rotation is not provided with Logrus. Log rotation should be done by an
+external program (like `logrotate(8)`) that can compress and delete old log
+entries. It should not be a feature of the application-level logger.
+
+#### Tools
+
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+  // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+  1) logger.Out is protected by locks.
+
+  2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+     (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go b/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000..b4c9e84
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://bitbucket.org/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+	"fmt"
+	"os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+	defer func() {
+		if err := recover(); err != nil {
+			fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+		}
+	}()
+
+	handler()
+}
+
+func runHandlers() {
+	for _, handler := range handlers {
+		runHandler(handler)
+	}
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+	runHandlers()
+	os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+	handlers = append(handlers, handler)
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/doc.go b/automation/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000..dddd5f8
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+  package main
+
+  import (
+    log "github.com/Sirupsen/logrus"
+  )
+
+  func main() {
+    log.WithFields(log.Fields{
+      "animal": "walrus",
+      "number": 1,
+      "size":   10,
+    }).Info("A walrus appears")
+  }
+
+Output:
+  time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/automation/vendor/github.com/Sirupsen/logrus/entry.go b/automation/vendor/github.com/Sirupsen/logrus/entry.go
new file mode 100644
index 0000000..4edbe7a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/entry.go
@@ -0,0 +1,275 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"sync"
+	"time"
+)
+
+var bufferPool *sync.Pool
+
+func init() {
+	bufferPool = &sync.Pool{
+		New: func() interface{} {
+			return new(bytes.Buffer)
+		},
+	}
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
+// An entry is the final or intermediate Logrus logging entry. It contains all
+// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
+// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
+// passed around as much as you wish to avoid field duplication.
+type Entry struct {
+	Logger *Logger
+
+	// Contains all the fields set by the user.
+	Data Fields
+
+	// Time at which the log entry was created
+	Time time.Time
+
+	// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
+	Level Level
+
+	// Message passed to Debug, Info, Warn, Error, Fatal or Panic
+	Message string
+
+	// When formatter is called in entry.log(), an Buffer may be set to entry
+	Buffer *bytes.Buffer
+}
+
+func NewEntry(logger *Logger) *Entry {
+	return &Entry{
+		Logger: logger,
+		// Default is three fields, give a little extra room
+		Data: make(Fields, 5),
+	}
+}
+
+// Returns the string representation from the reader and ultimately the
+// formatter.
+func (entry *Entry) String() (string, error) {
+	serialized, err := entry.Logger.Formatter.Format(entry)
+	if err != nil {
+		return "", err
+	}
+	str := string(serialized)
+	return str, nil
+}
+
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+	return entry.WithField(ErrorKey, err)
+}
+
+// Add a single field to the Entry.
+func (entry *Entry) WithField(key string, value interface{}) *Entry {
+	return entry.WithFields(Fields{key: value})
+}
+
+// Add a map of fields to the Entry.
+func (entry *Entry) WithFields(fields Fields) *Entry {
+	data := make(Fields, len(entry.Data)+len(fields))
+	for k, v := range entry.Data {
+		data[k] = v
+	}
+	for k, v := range fields {
+		data[k] = v
+	}
+	return &Entry{Logger: entry.Logger, Data: data}
+}
+
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+	var buffer *bytes.Buffer
+	entry.Time = time.Now()
+	entry.Level = level
+	entry.Message = msg
+
+	if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
+		entry.Logger.mu.Unlock()
+	}
+	buffer = bufferPool.Get().(*bytes.Buffer)
+	buffer.Reset()
+	defer bufferPool.Put(buffer)
+	entry.Buffer = buffer
+	serialized, err := entry.Logger.Formatter.Format(&entry)
+	entry.Buffer = nil
+	if err != nil {
+		entry.Logger.mu.Lock()
+		fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
+		entry.Logger.mu.Unlock()
+	} else {
+		entry.Logger.mu.Lock()
+		_, err = entry.Logger.Out.Write(serialized)
+		if err != nil {
+			fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+		}
+		entry.Logger.mu.Unlock()
+	}
+
+	// To avoid Entry#log() returning a value that only would make sense for
+	// panic() to use in Entry#Panic(), we avoid the allocation by checking
+	// directly here.
+	if level <= PanicLevel {
+		panic(&entry)
+	}
+}
+
+func (entry *Entry) Debug(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.log(DebugLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Print(args ...interface{}) {
+	entry.Info(args...)
+}
+
+func (entry *Entry) Info(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.log(InfoLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warn(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.log(WarnLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Warning(args ...interface{}) {
+	entry.Warn(args...)
+}
+
+func (entry *Entry) Error(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.log(ErrorLevel, fmt.Sprint(args...))
+	}
+}
+
+func (entry *Entry) Fatal(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.log(FatalLevel, fmt.Sprint(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panic(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.log(PanicLevel, fmt.Sprint(args...))
+	}
+	panic(fmt.Sprint(args...))
+}
+
+// Entry Printf family functions
+
+func (entry *Entry) Debugf(format string, args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Infof(format string, args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Printf(format string, args ...interface{}) {
+	entry.Infof(format, args...)
+}
+
+func (entry *Entry) Warnf(format string, args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Warningf(format string, args ...interface{}) {
+	entry.Warnf(format, args...)
+}
+
+func (entry *Entry) Errorf(format string, args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(fmt.Sprintf(format, args...))
+	}
+}
+
+func (entry *Entry) Fatalf(format string, args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(fmt.Sprintf(format, args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicf(format string, args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(fmt.Sprintf(format, args...))
+	}
+}
+
+// Entry Println family functions
+
+func (entry *Entry) Debugln(args ...interface{}) {
+	if entry.Logger.Level >= DebugLevel {
+		entry.Debug(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Infoln(args ...interface{}) {
+	if entry.Logger.Level >= InfoLevel {
+		entry.Info(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Println(args ...interface{}) {
+	entry.Infoln(args...)
+}
+
+func (entry *Entry) Warnln(args ...interface{}) {
+	if entry.Logger.Level >= WarnLevel {
+		entry.Warn(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Warningln(args ...interface{}) {
+	entry.Warnln(args...)
+}
+
+func (entry *Entry) Errorln(args ...interface{}) {
+	if entry.Logger.Level >= ErrorLevel {
+		entry.Error(entry.sprintlnn(args...))
+	}
+}
+
+func (entry *Entry) Fatalln(args ...interface{}) {
+	if entry.Logger.Level >= FatalLevel {
+		entry.Fatal(entry.sprintlnn(args...))
+	}
+	Exit(1)
+}
+
+func (entry *Entry) Panicln(args ...interface{}) {
+	if entry.Logger.Level >= PanicLevel {
+		entry.Panic(entry.sprintlnn(args...))
+	}
+}
+
+// Sprintlnn => Sprint no newline. This is to get the behavior of how
+// fmt.Sprintln where spaces are always added between operands, regardless of
+// their type. Instead of vendoring the Sprintln implementation to spare a
+// string allocation, we do the simplest thing.
+func (entry *Entry) sprintlnn(args ...interface{}) string {
+	msg := fmt.Sprintln(args...)
+	return msg[:len(msg)-1]
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/exported.go b/automation/vendor/github.com/Sirupsen/logrus/exported.go
new file mode 100644
index 0000000..9a0120a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/exported.go
@@ -0,0 +1,193 @@
+package logrus
+
+import (
+	"io"
+)
+
+var (
+	// std is the name of the standard logger in stdlib `log`
+	std = New()
+)
+
+func StandardLogger() *Logger {
+	return std
+}
+
+// SetOutput sets the standard logger output.
+func SetOutput(out io.Writer) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Out = out
+}
+
+// SetFormatter sets the standard logger formatter.
+func SetFormatter(formatter Formatter) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Formatter = formatter
+}
+
+// SetLevel sets the standard logger level.
+func SetLevel(level Level) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Level = level
+}
+
+// GetLevel returns the standard logger level.
+func GetLevel() Level {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	return std.Level
+}
+
+// AddHook adds a hook to the standard logger hooks.
+func AddHook(hook Hook) {
+	std.mu.Lock()
+	defer std.mu.Unlock()
+	std.Hooks.Add(hook)
+}
+
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+	return std.WithField(ErrorKey, err)
+}
+
+// WithField creates an entry from the standard logger and adds a field to
+// it. If you want multiple fields, use `WithFields`.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithField(key string, value interface{}) *Entry {
+	return std.WithField(key, value)
+}
+
+// WithFields creates an entry from the standard logger and adds multiple
+// fields to it. This is simply a helper for `WithField`, invoking it
+// once for each field.
+//
+// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
+// or Panic on the Entry it returns.
+func WithFields(fields Fields) *Entry {
+	return std.WithFields(fields)
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func Debug(args ...interface{}) {
+	std.Debug(args...)
+}
+
+// Print logs a message at level Info on the standard logger.
+func Print(args ...interface{}) {
+	std.Print(args...)
+}
+
+// Info logs a message at level Info on the standard logger.
+func Info(args ...interface{}) {
+	std.Info(args...)
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func Warn(args ...interface{}) {
+	std.Warn(args...)
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func Warning(args ...interface{}) {
+	std.Warning(args...)
+}
+
+// Error logs a message at level Error on the standard logger.
+func Error(args ...interface{}) {
+	std.Error(args...)
+}
+
+// Panic logs a message at level Panic on the standard logger.
+func Panic(args ...interface{}) {
+	std.Panic(args...)
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func Fatal(args ...interface{}) {
+	std.Fatal(args...)
+}
+
+// Debugf logs a message at level Debug on the standard logger.
+func Debugf(format string, args ...interface{}) {
+	std.Debugf(format, args...)
+}
+
+// Printf logs a message at level Info on the standard logger.
+func Printf(format string, args ...interface{}) {
+	std.Printf(format, args...)
+}
+
+// Infof logs a message at level Info on the standard logger.
+func Infof(format string, args ...interface{}) {
+	std.Infof(format, args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func Warnf(format string, args ...interface{}) {
+	std.Warnf(format, args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func Warningf(format string, args ...interface{}) {
+	std.Warningf(format, args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func Errorf(format string, args ...interface{}) {
+	std.Errorf(format, args...)
+}
+
+// Panicf logs a message at level Panic on the standard logger.
+func Panicf(format string, args ...interface{}) {
+	std.Panicf(format, args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func Fatalf(format string, args ...interface{}) {
+	std.Fatalf(format, args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger.
+func Debugln(args ...interface{}) {
+	std.Debugln(args...)
+}
+
+// Println logs a message at level Info on the standard logger.
+func Println(args ...interface{}) {
+	std.Println(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger.
+func Infoln(args ...interface{}) {
+	std.Infoln(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger.
+func Warnln(args ...interface{}) {
+	std.Warnln(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger.
+func Warningln(args ...interface{}) {
+	std.Warningln(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger.
+func Errorln(args ...interface{}) {
+	std.Errorln(args...)
+}
+
+// Panicln logs a message at level Panic on the standard logger.
+func Panicln(args ...interface{}) {
+	std.Panicln(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger.
+func Fatalln(args ...interface{}) {
+	std.Fatalln(args...)
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/formatter.go b/automation/vendor/github.com/Sirupsen/logrus/formatter.go
new file mode 100644
index 0000000..b5fbe93
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -0,0 +1,45 @@
+package logrus
+
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
+// The Formatter interface is used to implement a custom Formatter. It takes an
+// `Entry`. It exposes all the fields, including the default ones:
+//
+// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
+// * `entry.Data["time"]`. The timestamp.
+// * `entry.Data["level"]. The level the entry was logged at.
+//
+// Any additional fields added with `WithField` or `WithFields` are also in
+// `entry.Data`. Format is expected to return an array of bytes which are then
+// logged to `logger.Out`.
+type Formatter interface {
+	Format(*Entry) ([]byte, error)
+}
+
+// This is to not silently overwrite `time`, `msg` and `level` fields when
+// dumping it. If this code wasn't there doing:
+//
+//  logrus.WithField("level", 1).Info("hello")
+//
+// Would just silently drop the user provided level. Instead with this code
+// it'll logged as:
+//
+//  {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
+//
+// It's not exported because it's still using Data in an opinionated way. It's to
+// avoid code duplication between the two default formatters.
+func prefixFieldClashes(data Fields) {
+	if t, ok := data["time"]; ok {
+		data["fields.time"] = t
+	}
+
+	if m, ok := data["msg"]; ok {
+		data["fields.msg"] = m
+	}
+
+	if l, ok := data["level"]; ok {
+		data["fields.level"] = l
+	}
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/hooks.go b/automation/vendor/github.com/Sirupsen/logrus/hooks.go
new file mode 100644
index 0000000..3f151cd
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -0,0 +1,34 @@
+package logrus
+
+// A hook to be fired when logging on the logging levels returned from
+// `Levels()` on your implementation of the interface. Note that this is not
+// fired in a goroutine or a channel with workers, you should handle such
+// functionality yourself if your call is non-blocking and you don't wish for
+// the logging calls for levels returned from `Levels()` to block.
+type Hook interface {
+	Levels() []Level
+	Fire(*Entry) error
+}
+
+// Internal type for storing the hooks on a logger instance.
+type LevelHooks map[Level][]Hook
+
+// Add a hook to an instance of logger. This is called with
+// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
+func (hooks LevelHooks) Add(hook Hook) {
+	for _, level := range hook.Levels() {
+		hooks[level] = append(hooks[level], hook)
+	}
+}
+
+// Fire all the hooks for the passed level. Used by `entry.log` to fire
+// appropriate hooks for a log entry.
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
+	for _, hook := range hooks[level] {
+		if err := hook.Fire(entry); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go b/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go
new file mode 100644
index 0000000..266554e
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -0,0 +1,74 @@
+package logrus
+
+import (
+	"encoding/json"
+	"fmt"
+)
+
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+	FieldKeyMsg   = "msg"
+	FieldKeyLevel = "level"
+	FieldKeyTime  = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+	if k, ok := f[key]; ok {
+		return k
+	}
+
+	return string(key)
+}
+
+type JSONFormatter struct {
+	// TimestampFormat sets the format used for marshaling timestamps.
+	TimestampFormat string
+
+	// DisableTimestamp allows disabling automatic timestamps in output
+	DisableTimestamp bool
+
+	// FieldMap allows users to customize the names of keys for various fields.
+	// As an example:
+	// formatter := &JSONFormatter{
+	//   	FieldMap: FieldMap{
+	// 		 FieldKeyTime: "@timestamp",
+	// 		 FieldKeyLevel: "@level",
+	// 		 FieldKeyLevel: "@message",
+	//    },
+	// }
+	FieldMap FieldMap
+}
+
+func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+	data := make(Fields, len(entry.Data)+3)
+	for k, v := range entry.Data {
+		switch v := v.(type) {
+		case error:
+			// Otherwise errors are ignored by `encoding/json`
+			// https://github.com/Sirupsen/logrus/issues/137
+			data[k] = v.Error()
+		default:
+			data[k] = v
+		}
+	}
+	prefixFieldClashes(data)
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+
+	if !f.DisableTimestamp {
+		data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+	}
+	data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+	data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
+
+	serialized, err := json.Marshal(data)
+	if err != nil {
+		return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
+	}
+	return append(serialized, '\n'), nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/logger.go b/automation/vendor/github.com/Sirupsen/logrus/logger.go
new file mode 100644
index 0000000..b769f3d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/logger.go
@@ -0,0 +1,308 @@
+package logrus
+
+import (
+	"io"
+	"os"
+	"sync"
+)
+
+type Logger struct {
+	// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
+	// file, or leave it default which is `os.Stderr`. You can also set this to
+	// something more adventorous, such as logging to Kafka.
+	Out io.Writer
+	// Hooks for the logger instance. These allow firing events based on logging
+	// levels and log entries. For example, to send errors to an error tracking
+	// service, log to StatsD or dump the core on fatal errors.
+	Hooks LevelHooks
+	// All log entries pass through the formatter before logged to Out. The
+	// included formatters are `TextFormatter` and `JSONFormatter` for which
+	// TextFormatter is the default. In development (when a TTY is attached) it
+	// logs with colors, but to a file it wouldn't. You can easily implement your
+	// own that implements the `Formatter` interface, see the `README` or included
+	// formatters for examples.
+	Formatter Formatter
+	// The logging level the logger should log at. This is typically (and defaults
+	// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
+	// logged. `logrus.Debug` is useful in
+	Level Level
+	// Used to sync writing to the log. Locking is enabled by Default
+	mu MutexWrap
+	// Reusable empty entry
+	entryPool sync.Pool
+}
+
+type MutexWrap struct {
+	lock     sync.Mutex
+	disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+	if !mw.disabled {
+		mw.lock.Lock()
+	}
+}
+
+func (mw *MutexWrap) Unlock() {
+	if !mw.disabled {
+		mw.lock.Unlock()
+	}
+}
+
+func (mw *MutexWrap) Disable() {
+	mw.disabled = true
+}
+
+// Creates a new logger. Configuration should be set by changing `Formatter`,
+// `Out` and `Hooks` directly on the default logger instance. You can also just
+// instantiate your own:
+//
+//    var log = &Logger{
+//      Out: os.Stderr,
+//      Formatter: new(JSONFormatter),
+//      Hooks: make(LevelHooks),
+//      Level: logrus.DebugLevel,
+//    }
+//
+// It's recommended to make this a global instance called `log`.
+func New() *Logger {
+	return &Logger{
+		Out:       os.Stderr,
+		Formatter: new(TextFormatter),
+		Hooks:     make(LevelHooks),
+		Level:     InfoLevel,
+	}
+}
+
+func (logger *Logger) newEntry() *Entry {
+	entry, ok := logger.entryPool.Get().(*Entry)
+	if ok {
+		return entry
+	}
+	return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+	logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
+// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
+// If you want multiple fields, use `WithFields`.
+func (logger *Logger) WithField(key string, value interface{}) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithField(key, value)
+}
+
+// Adds a struct of fields to the log entry. All it does is call `WithField` for
+// each `Field`.
+func (logger *Logger) WithFields(fields Fields) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry.  All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+	entry := logger.newEntry()
+	defer logger.releaseEntry(entry)
+	return entry.WithError(err)
+}
+
+func (logger *Logger) Debugf(format string, args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infof(format string, args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infof(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Printf(format string, args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Printf(format, args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningf(format string, args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorf(format string, args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalf(format string, args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalf(format, args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicf(format string, args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicf(format, args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debug(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debug(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Info(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Info(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Print(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Info(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warn(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warning(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warn(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Error(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Error(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatal(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatal(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panic(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panic(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Debugln(args ...interface{}) {
+	if logger.Level >= DebugLevel {
+		entry := logger.newEntry()
+		entry.Debugln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Infoln(args ...interface{}) {
+	if logger.Level >= InfoLevel {
+		entry := logger.newEntry()
+		entry.Infoln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Println(args ...interface{}) {
+	entry := logger.newEntry()
+	entry.Println(args...)
+	logger.releaseEntry(entry)
+}
+
+func (logger *Logger) Warnln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Warningln(args ...interface{}) {
+	if logger.Level >= WarnLevel {
+		entry := logger.newEntry()
+		entry.Warnln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Errorln(args ...interface{}) {
+	if logger.Level >= ErrorLevel {
+		entry := logger.newEntry()
+		entry.Errorln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+func (logger *Logger) Fatalln(args ...interface{}) {
+	if logger.Level >= FatalLevel {
+		entry := logger.newEntry()
+		entry.Fatalln(args...)
+		logger.releaseEntry(entry)
+	}
+	Exit(1)
+}
+
+func (logger *Logger) Panicln(args ...interface{}) {
+	if logger.Level >= PanicLevel {
+		entry := logger.newEntry()
+		entry.Panicln(args...)
+		logger.releaseEntry(entry)
+	}
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+	logger.mu.Disable()
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/logrus.go b/automation/vendor/github.com/Sirupsen/logrus/logrus.go
new file mode 100644
index 0000000..e596691
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -0,0 +1,143 @@
+package logrus
+
+import (
+	"fmt"
+	"log"
+	"strings"
+)
+
+// Fields type, used to pass to `WithFields`.
+type Fields map[string]interface{}
+
+// Level type
+type Level uint8
+
+// Convert the Level to a string. E.g. PanicLevel becomes "panic".
+func (level Level) String() string {
+	switch level {
+	case DebugLevel:
+		return "debug"
+	case InfoLevel:
+		return "info"
+	case WarnLevel:
+		return "warning"
+	case ErrorLevel:
+		return "error"
+	case FatalLevel:
+		return "fatal"
+	case PanicLevel:
+		return "panic"
+	}
+
+	return "unknown"
+}
+
+// ParseLevel takes a string level and returns the Logrus log level constant.
+func ParseLevel(lvl string) (Level, error) {
+	switch strings.ToLower(lvl) {
+	case "panic":
+		return PanicLevel, nil
+	case "fatal":
+		return FatalLevel, nil
+	case "error":
+		return ErrorLevel, nil
+	case "warn", "warning":
+		return WarnLevel, nil
+	case "info":
+		return InfoLevel, nil
+	case "debug":
+		return DebugLevel, nil
+	}
+
+	var l Level
+	return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
+}
+
+// A constant exposing all logging levels
+var AllLevels = []Level{
+	PanicLevel,
+	FatalLevel,
+	ErrorLevel,
+	WarnLevel,
+	InfoLevel,
+	DebugLevel,
+}
+
+// These are the different logging levels. You can set the logging level to log
+// on your instance of logger, obtained with `logrus.New()`.
+const (
+	// PanicLevel level, highest level of severity. Logs and then calls panic with the
+	// message passed to Debug, Info, ...
+	PanicLevel Level = iota
+	// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
+	// logging level is set to Panic.
+	FatalLevel
+	// ErrorLevel level. Logs. Used for errors that should definitely be noted.
+	// Commonly used for hooks to send errors to an error tracking service.
+	ErrorLevel
+	// WarnLevel level. Non-critical entries that deserve eyes.
+	WarnLevel
+	// InfoLevel level. General operational entries about what's going on inside the
+	// application.
+	InfoLevel
+	// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
+	DebugLevel
+)
+
+// Won't compile if StdLogger can't be realized by a log.Logger
+var (
+	_ StdLogger = &log.Logger{}
+	_ StdLogger = &Entry{}
+	_ StdLogger = &Logger{}
+)
+
+// StdLogger is what your logrus-enabled library should take, that way
+// it'll accept a stdlib logger and a logrus logger. There's no standard
+// interface, this is the closest we get, unfortunately.
+type StdLogger interface {
+	Print(...interface{})
+	Printf(string, ...interface{})
+	Println(...interface{})
+
+	Fatal(...interface{})
+	Fatalf(string, ...interface{})
+	Fatalln(...interface{})
+
+	Panic(...interface{})
+	Panicf(string, ...interface{})
+	Panicln(...interface{})
+}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+	WithField(key string, value interface{}) *Entry
+	WithFields(fields Fields) *Entry
+	WithError(err error) *Entry
+
+	Debugf(format string, args ...interface{})
+	Infof(format string, args ...interface{})
+	Printf(format string, args ...interface{})
+	Warnf(format string, args ...interface{})
+	Warningf(format string, args ...interface{})
+	Errorf(format string, args ...interface{})
+	Fatalf(format string, args ...interface{})
+	Panicf(format string, args ...interface{})
+
+	Debug(args ...interface{})
+	Info(args ...interface{})
+	Print(args ...interface{})
+	Warn(args ...interface{})
+	Warning(args ...interface{})
+	Error(args ...interface{})
+	Fatal(args ...interface{})
+	Panic(args ...interface{})
+
+	Debugln(args ...interface{})
+	Infoln(args ...interface{})
+	Println(args ...interface{})
+	Warnln(args ...interface{})
+	Warningln(args ...interface{})
+	Errorln(args ...interface{})
+	Fatalln(args ...interface{})
+	Panicln(args ...interface{})
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000..1960169
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package logrus
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	return true
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000..5f6be4d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go
new file mode 100644
index 0000000..308160c
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -0,0 +1,14 @@
+// Based on ssh/terminal:
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TCGETS
+
+type Termios syscall.Termios
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
new file mode 100644
index 0000000..329038f
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -0,0 +1,22 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var termios Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000..a3c6f6e
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+	_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+	return err == nil
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go
new file mode 100644
index 0000000..3727e8a
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -0,0 +1,27 @@
+// Based on ssh/terminal:
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build windows,!appengine
+
+package logrus
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+var (
+	procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+)
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal() bool {
+	fd := syscall.Stderr
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go b/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go
new file mode 100644
index 0000000..076de5d
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -0,0 +1,166 @@
+package logrus
+
+import (
+	"bytes"
+	"fmt"
+	"runtime"
+	"sort"
+	"strings"
+	"time"
+)
+
+const (
+	nocolor = 0
+	red     = 31
+	green   = 32
+	yellow  = 33
+	blue    = 34
+	gray    = 37
+)
+
+var (
+	baseTimestamp time.Time
+	isTerminal    bool
+)
+
+func init() {
+	baseTimestamp = time.Now()
+	isTerminal = IsTerminal()
+}
+
+type TextFormatter struct {
+	// Set to true to bypass checking for a TTY before outputting colors.
+	ForceColors bool
+
+	// Force disabling colors.
+	DisableColors bool
+
+	// Disable timestamp logging. useful when output is redirected to logging
+	// system that already adds timestamps.
+	DisableTimestamp bool
+
+	// Enable logging the full timestamp when a TTY is attached instead of just
+	// the time passed since beginning of execution.
+	FullTimestamp bool
+
+	// TimestampFormat to use for display when a full timestamp is printed
+	TimestampFormat string
+
+	// The fields are sorted by default for a consistent output. For applications
+	// that log extremely frequently and don't use the JSON formatter this may not
+	// be desired.
+	DisableSorting bool
+}
+
+func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
+	var b *bytes.Buffer
+	keys := make([]string, 0, len(entry.Data))
+	for k := range entry.Data {
+		keys = append(keys, k)
+	}
+
+	if !f.DisableSorting {
+		sort.Strings(keys)
+	}
+	if entry.Buffer != nil {
+		b = entry.Buffer
+	} else {
+		b = &bytes.Buffer{}
+	}
+
+	prefixFieldClashes(entry.Data)
+
+	isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+	isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+
+	timestampFormat := f.TimestampFormat
+	if timestampFormat == "" {
+		timestampFormat = DefaultTimestampFormat
+	}
+	if isColored {
+		f.printColored(b, entry, keys, timestampFormat)
+	} else {
+		if !f.DisableTimestamp {
+			f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
+		}
+		f.appendKeyValue(b, "level", entry.Level.String())
+		if entry.Message != "" {
+			f.appendKeyValue(b, "msg", entry.Message)
+		}
+		for _, key := range keys {
+			f.appendKeyValue(b, key, entry.Data[key])
+		}
+	}
+
+	b.WriteByte('\n')
+	return b.Bytes(), nil
+}
+
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
+	var levelColor int
+	switch entry.Level {
+	case DebugLevel:
+		levelColor = gray
+	case WarnLevel:
+		levelColor = yellow
+	case ErrorLevel, FatalLevel, PanicLevel:
+		levelColor = red
+	default:
+		levelColor = blue
+	}
+
+	levelText := strings.ToUpper(entry.Level.String())[0:4]
+
+	if f.DisableTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+	} else if !f.FullTimestamp {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+	} else {
+		fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+	}
+	for _, k := range keys {
+		v := entry.Data[k]
+		fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+		f.appendValue(b, v)
+	}
+}
+
+func needsQuoting(text string) bool {
+	for _, ch := range text {
+		if !((ch >= 'a' && ch <= 'z') ||
+			(ch >= 'A' && ch <= 'Z') ||
+			(ch >= '0' && ch <= '9') ||
+			ch == '-' || ch == '.') {
+			return true
+		}
+	}
+	return false
+}
+
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+	b.WriteString(key)
+	b.WriteByte('=')
+	f.appendValue(b, value)
+	b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+	switch value := value.(type) {
+	case string:
+		if !needsQuoting(value) {
+			b.WriteString(value)
+		} else {
+			fmt.Fprintf(b, "%q", value)
+		}
+	case error:
+		errmsg := value.Error()
+		if !needsQuoting(errmsg) {
+			b.WriteString(errmsg)
+		} else {
+			fmt.Fprintf(b, "%q", errmsg)
+		}
+	default:
+		fmt.Fprint(b, value)
+	}
+}
diff --git a/automation/vendor/github.com/Sirupsen/logrus/writer.go b/automation/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000..f74d2aa
--- /dev/null
+++ b/automation/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,53 @@
+package logrus
+
+import (
+	"bufio"
+	"io"
+	"runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+	return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+	reader, writer := io.Pipe()
+
+	var printFunc func(args ...interface{})
+	switch level {
+	case DebugLevel:
+		printFunc = logger.Debug
+	case InfoLevel:
+		printFunc = logger.Info
+	case WarnLevel:
+		printFunc = logger.Warn
+	case ErrorLevel:
+		printFunc = logger.Error
+	case FatalLevel:
+		printFunc = logger.Fatal
+	case PanicLevel:
+		printFunc = logger.Panic
+	default:
+		printFunc = logger.Print
+	}
+
+	go logger.writerScanner(reader, printFunc)
+	runtime.SetFinalizer(writer, writerFinalizer)
+
+	return writer
+}
+
+func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+	scanner := bufio.NewScanner(reader)
+	for scanner.Scan() {
+		printFunc(scanner.Text())
+	}
+	if err := scanner.Err(); err != nil {
+		logger.Errorf("Error while reading from Writer: %s", err)
+	}
+	reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+	writer.Close()
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/LICENSE b/automation/vendor/github.com/juju/ansiterm/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/ansiterm/Makefile b/automation/vendor/github.com/juju/ansiterm/Makefile
new file mode 100644
index 0000000..212fdcb
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/Makefile
@@ -0,0 +1,14 @@
+# Copyright 2016 Canonical Ltd.
+# Licensed under the LGPLv3, see LICENCE file for details.
+
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/ansiterm > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/ansiterm?status.svg)](https://godoc.org/github.com/juju/ansiterm)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/ansiterm/README.md b/automation/vendor/github.com/juju/ansiterm/README.md
new file mode 100644
index 0000000..5674387
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/README.md
@@ -0,0 +1,323 @@
+
+# ansiterm
+    import "github.com/juju/ansiterm"
+
+Package ansiterm provides a Writer that writes out the ANSI escape
+codes for color and styles.
+
+
+
+
+
+
+
+## type Color
+``` go
+type Color int
+```
+Color represents one of the standard 16 ANSI colors.
+
+
+
+``` go
+const (
+    Default Color
+    Black
+    Red
+    Green
+    Yellow
+    Blue
+    Magenta
+    Cyan
+    Gray
+    DarkGray
+    BrightRed
+    BrightGreen
+    BrightYellow
+    BrightBlue
+    BrightMagenta
+    BrightCyan
+    White
+)
+```
+
+
+
+
+
+
+
+
+### func (Color) String
+``` go
+func (c Color) String() string
+```
+String returns the name of the color.
+
+
+
+## type Context
+``` go
+type Context struct {
+    Foreground Color
+    Background Color
+    Styles     []Style
+}
+```
+Context provides a way to specify both foreground and background colors
+along with other styles and write text to a Writer with those colors and
+styles.
+
+
+
+
+
+
+
+
+
+### func Background
+``` go
+func Background(color Color) *Context
+```
+Background is a convenience function that creates a Context with the
+specified color as the background color.
+
+
+### func Foreground
+``` go
+func Foreground(color Color) *Context
+```
+Foreground is a convenience function that creates a Context with the
+specified color as the foreground color.
+
+
+### func Styles
+``` go
+func Styles(styles ...Style) *Context
+```
+Styles is a convenience function that creates a Context with the
+specified styles set.
+
+
+
+
+### func (\*Context) Fprint
+``` go
+func (c *Context) Fprint(w sgrWriter, args ...interface{})
+```
+Fprint will set the sgr values of the writer to the specified foreground,
+background and styles, then formats using the default formats for its
+operands and writes to w. Spaces are added between operands when neither is
+a string. It returns the number of bytes written and any write error
+encountered.
+
+
+
+### func (\*Context) Fprintf
+``` go
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{})
+```
+Fprintf will set the sgr values of the writer to the specified
+foreground, background and styles, then write the formatted string,
+then reset the writer.
+
+
+
+### func (\*Context) SetBackground
+``` go
+func (c *Context) SetBackground(color Color) *Context
+```
+SetBackground sets the background to the specified color.
+
+
+
+### func (\*Context) SetForeground
+``` go
+func (c *Context) SetForeground(color Color) *Context
+```
+SetForeground sets the foreground to the specified color.
+
+
+
+### func (\*Context) SetStyle
+``` go
+func (c *Context) SetStyle(styles ...Style) *Context
+```
+SetStyle replaces the styles with the new values.
+
+
+
+## type Style
+``` go
+type Style int
+```
+
+
+``` go
+const (
+    Bold Style
+    Faint
+    Italic
+    Underline
+    Blink
+    Reverse
+    Strikethrough
+    Conceal
+)
+```
+
+
+
+
+
+
+
+
+### func (Style) String
+``` go
+func (s Style) String() string
+```
+
+
+## type TabWriter
+``` go
+type TabWriter struct {
+    Writer
+    // contains filtered or unexported fields
+}
+```
+TabWriter is a filter that inserts padding around tab-delimited
+columns in its input to align them in the output.
+
+It also setting of colors and styles over and above the standard
+tabwriter package.
+
+
+
+
+
+
+
+
+
+### func NewTabWriter
+``` go
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+NewTabWriter returns a writer that is able to set colors and styels.
+The ansi escape codes are stripped for width calculations.
+
+
+
+
+### func (\*TabWriter) Flush
+``` go
+func (t *TabWriter) Flush() error
+```
+Flush should be called after the last call to Write to ensure
+that any data buffered in the Writer is written to output. Any
+incomplete escape sequence at the end is considered
+complete for formatting purposes.
+
+
+
+### func (\*TabWriter) Init
+``` go
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter
+```
+A Writer must be initialized with a call to Init. The first parameter (output)
+specifies the filter output. The remaining parameters control the formatting:
+
+
+	minwidth	minimal cell width including any padding
+	tabwidth	width of tab characters (equivalent number of spaces)
+	padding		padding added to a cell before computing its width
+	padchar		ASCII char used for padding
+			if padchar == '\t', the Writer will assume that the
+			width of a '\t' in the formatted output is tabwidth,
+			and cells are left-aligned independent of align_left
+			(for correct-looking results, tabwidth must correspond
+			to the tab width in the viewer displaying the result)
+	flags		formatting control
+
+
+
+## type Writer
+``` go
+type Writer struct {
+    io.Writer
+    // contains filtered or unexported fields
+}
+```
+Writer allows colors and styles to be specified. If the io.Writer
+is not a terminal capable of color, all attempts to set colors or
+styles are no-ops.
+
+
+
+
+
+
+
+
+
+### func NewWriter
+``` go
+func NewWriter(w io.Writer) *Writer
+```
+NewWriter returns a Writer that allows the caller to specify colors and
+styles. If the io.Writer is not a terminal capable of color, all attempts
+to set colors or styles are no-ops.
+
+
+
+
+### func (\*Writer) ClearStyle
+``` go
+func (w *Writer) ClearStyle(s Style)
+```
+ClearStyle clears the text style.
+
+
+
+### func (\*Writer) Reset
+``` go
+func (w *Writer) Reset()
+```
+Reset returns the default foreground and background colors with no styles.
+
+
+
+### func (\*Writer) SetBackground
+``` go
+func (w *Writer) SetBackground(c Color)
+```
+SetBackground sets the background color.
+
+
+
+### func (\*Writer) SetForeground
+``` go
+func (w *Writer) SetForeground(c Color)
+```
+SetForeground sets the foreground color.
+
+
+
+### func (\*Writer) SetStyle
+``` go
+func (w *Writer) SetStyle(s Style)
+```
+SetStyle sets the text style.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/ansiterm/attribute.go b/automation/vendor/github.com/juju/ansiterm/attribute.go
new file mode 100644
index 0000000..f2daa48
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/attribute.go
@@ -0,0 +1,50 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+type attribute int
+
+const (
+	unknownAttribute attribute = -1
+	reset            attribute = 0
+)
+
+// sgr returns the escape sequence for the Select Graphic Rendition
+// for the attribute.
+func (a attribute) sgr() string {
+	if a < 0 {
+		return ""
+	}
+	return fmt.Sprintf("\x1b[%dm", a)
+}
+
+type attributes []attribute
+
+func (a attributes) Len() int           { return len(a) }
+func (a attributes) Less(i, j int) bool { return a[i] < a[j] }
+func (a attributes) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+
+// sgr returns the combined escape sequence for the Select Graphic Rendition
+// for the sequence of attributes.
+func (a attributes) sgr() string {
+	switch len(a) {
+	case 0:
+		return ""
+	case 1:
+		return a[0].sgr()
+	default:
+		sort.Sort(a)
+		var values []string
+		for _, attr := range a {
+			values = append(values, fmt.Sprint(attr))
+		}
+		return fmt.Sprintf("\x1b[%sm", strings.Join(values, ";"))
+	}
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/color.go b/automation/vendor/github.com/juju/ansiterm/color.go
new file mode 100644
index 0000000..0a97de3
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/color.go
@@ -0,0 +1,119 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Color = iota
+	Default
+	Black
+	Red
+	Green
+	Yellow
+	Blue
+	Magenta
+	Cyan
+	Gray
+	DarkGray
+	BrightRed
+	BrightGreen
+	BrightYellow
+	BrightBlue
+	BrightMagenta
+	BrightCyan
+	White
+)
+
+// Color represents one of the standard 16 ANSI colors.
+type Color int
+
+// String returns the name of the color.
+func (c Color) String() string {
+	switch c {
+	case Default:
+		return "default"
+	case Black:
+		return "black"
+	case Red:
+		return "red"
+	case Green:
+		return "green"
+	case Yellow:
+		return "yellow"
+	case Blue:
+		return "blue"
+	case Magenta:
+		return "magenta"
+	case Cyan:
+		return "cyan"
+	case Gray:
+		return "gray"
+	case DarkGray:
+		return "darkgray"
+	case BrightRed:
+		return "brightred"
+	case BrightGreen:
+		return "brightgreen"
+	case BrightYellow:
+		return "brightyellow"
+	case BrightBlue:
+		return "brightblue"
+	case BrightMagenta:
+		return "brightmagenta"
+	case BrightCyan:
+		return "brightcyan"
+	case White:
+		return "white"
+	default:
+		return ""
+	}
+}
+
+func (c Color) foreground() attribute {
+	switch c {
+	case Default:
+		return 39
+	case Black:
+		return 30
+	case Red:
+		return 31
+	case Green:
+		return 32
+	case Yellow:
+		return 33
+	case Blue:
+		return 34
+	case Magenta:
+		return 35
+	case Cyan:
+		return 36
+	case Gray:
+		return 37
+	case DarkGray:
+		return 90
+	case BrightRed:
+		return 91
+	case BrightGreen:
+		return 92
+	case BrightYellow:
+		return 93
+	case BrightBlue:
+		return 94
+	case BrightMagenta:
+		return 95
+	case BrightCyan:
+		return 96
+	case White:
+		return 97
+	default:
+		return unknownAttribute
+	}
+}
+
+func (c Color) background() attribute {
+	value := c.foreground()
+	if value != unknownAttribute {
+		return value + 10
+	}
+	return value
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/context.go b/automation/vendor/github.com/juju/ansiterm/context.go
new file mode 100644
index 0000000..e61a867
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/context.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Context provides a way to specify both foreground and background colors
+// along with other styles and write text to a Writer with those colors and
+// styles.
+type Context struct {
+	Foreground Color
+	Background Color
+	Styles     []Style
+}
+
+// Foreground is a convenience function that creates a Context with the
+// specified color as the foreground color.
+func Foreground(color Color) *Context {
+	return &Context{Foreground: color}
+}
+
+// Background is a convenience function that creates a Context with the
+// specified color as the background color.
+func Background(color Color) *Context {
+	return &Context{Background: color}
+}
+
+// Styles is a convenience function that creates a Context with the
+// specified styles set.
+func Styles(styles ...Style) *Context {
+	return &Context{Styles: styles}
+}
+
+// SetForeground sets the foreground to the specified color.
+func (c *Context) SetForeground(color Color) *Context {
+	c.Foreground = color
+	return c
+}
+
+// SetBackground sets the background to the specified color.
+func (c *Context) SetBackground(color Color) *Context {
+	c.Background = color
+	return c
+}
+
+// SetStyle replaces the styles with the new values.
+func (c *Context) SetStyle(styles ...Style) *Context {
+	c.Styles = styles
+	return c
+}
+
+type sgrWriter interface {
+	io.Writer
+	writeSGR(value sgr)
+}
+
+// Fprintf will set the sgr values of the writer to the specified
+// foreground, background and styles, then write the formatted string,
+// then reset the writer.
+func (c *Context) Fprintf(w sgrWriter, format string, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprintf(w, format, args...)
+	w.writeSGR(reset)
+}
+
+// Fprint will set the sgr values of the writer to the specified foreground,
+// background and styles, then formats using the default formats for its
+// operands and writes to w. Spaces are added between operands when neither is
+// a string. It returns the number of bytes written and any write error
+// encountered.
+func (c *Context) Fprint(w sgrWriter, args ...interface{}) {
+	w.writeSGR(c)
+	fmt.Fprint(w, args...)
+	w.writeSGR(reset)
+}
+
+func (c *Context) sgr() string {
+	var values attributes
+	if foreground := c.Foreground.foreground(); foreground != unknownAttribute {
+		values = append(values, foreground)
+	}
+	if background := c.Background.background(); background != unknownAttribute {
+		values = append(values, background)
+	}
+	for _, style := range c.Styles {
+		if value := style.enable(); value != unknownAttribute {
+			values = append(values, value)
+		}
+	}
+	return values.sgr()
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/doc.go b/automation/vendor/github.com/juju/ansiterm/doc.go
new file mode 100644
index 0000000..7827007
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/doc.go
@@ -0,0 +1,6 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// Package ansiterm provides a Writer that writes out the ANSI escape
+// codes for color and styles.
+package ansiterm
diff --git a/automation/vendor/github.com/juju/ansiterm/style.go b/automation/vendor/github.com/juju/ansiterm/style.go
new file mode 100644
index 0000000..0be42da
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/style.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+const (
+	_ Style = iota
+	Bold
+	Faint
+	Italic
+	Underline
+	Blink
+	Reverse
+	Strikethrough
+	Conceal
+)
+
+type Style int
+
+func (s Style) String() string {
+	switch s {
+	case Bold:
+		return "bold"
+	case Faint:
+		return "faint"
+	case Italic:
+		return "italic"
+	case Underline:
+		return "underline"
+	case Blink:
+		return "blink"
+	case Reverse:
+		return "reverse"
+	case Strikethrough:
+		return "strikethrough"
+	case Conceal:
+		return "conceal"
+	default:
+		return ""
+	}
+}
+
+func (s Style) enable() attribute {
+	switch s {
+	case Bold:
+		return 1
+	case Faint:
+		return 2
+	case Italic:
+		return 3
+	case Underline:
+		return 4
+	case Blink:
+		return 5
+	case Reverse:
+		return 7
+	case Conceal:
+		return 8
+	case Strikethrough:
+		return 9
+	default:
+		return unknownAttribute
+	}
+}
+
+func (s Style) disable() attribute {
+	value := s.enable()
+	if value != unknownAttribute {
+		return value + 20
+	}
+	return value
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/tabwriter.go b/automation/vendor/github.com/juju/ansiterm/tabwriter.go
new file mode 100644
index 0000000..1ff6faa
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/tabwriter.go
@@ -0,0 +1,64 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+
+	"github.com/juju/ansiterm/tabwriter"
+)
+
+// NewTabWriter returns a writer that is able to set colors and styels.
+// The ansi escape codes are stripped for width calculations.
+func NewTabWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	return new(TabWriter).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
+
+// TabWriter is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// It also setting of colors and styles over and above the standard
+// tabwriter package.
+type TabWriter struct {
+	Writer
+	tw tabwriter.Writer
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (t *TabWriter) Flush() error {
+	return t.tw.Flush()
+}
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (t *TabWriter) SetColumnAlignRight(column int) {
+	t.tw.SetColumnAlignRight(column)
+}
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (t *TabWriter) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *TabWriter {
+	writer, colorCapable := colorEnabledWriter(output)
+	t.Writer = Writer{
+		Writer:  t.tw.Init(writer, minwidth, tabwidth, padding, padchar, flags),
+		noColor: !colorCapable,
+	}
+	return t
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go b/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
new file mode 100644
index 0000000..98949d0
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/tabwriter/tabwriter.go
@@ -0,0 +1,587 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is mostly a copy of the go standard library text/tabwriter. With
+// the additional stripping of ansi control characters for width calculations.
+
+// Package tabwriter implements a write filter (tabwriter.Writer) that
+// translates tabbed columns in input into properly aligned text.
+//
+// The package is using the Elastic Tabstops algorithm described at
+// http://nickgravgaard.com/elastictabstops/index.html.
+//
+package tabwriter
+
+import (
+	"bytes"
+	"io"
+	"unicode/utf8"
+
+	"github.com/lunixbochs/vtclean"
+)
+
+// ----------------------------------------------------------------------------
+// Filter implementation
+
+// A cell represents a segment of text terminated by tabs or line breaks.
+// The text itself is stored in a separate buffer; cell only describes the
+// segment's size in bytes, its width in runes, and whether it's an htab
+// ('\t') terminated cell.
+//
+type cell struct {
+	size  int  // cell size in bytes
+	width int  // cell width in runes
+	htab  bool // true if the cell is terminated by an htab ('\t')
+}
+
+// A Writer is a filter that inserts padding around tab-delimited
+// columns in its input to align them in the output.
+//
+// The Writer treats incoming bytes as UTF-8 encoded text consisting
+// of cells terminated by (horizontal or vertical) tabs or line
+// breaks (newline or formfeed characters). Cells in adjacent lines
+// constitute a column. The Writer inserts padding as needed to
+// make all cells in a column have the same width, effectively
+// aligning the columns. It assumes that all characters have the
+// same width except for tabs for which a tabwidth must be specified.
+// Note that cells are tab-terminated, not tab-separated: trailing
+// non-tab text at the end of a line does not form a column cell.
+//
+// The Writer assumes that all Unicode code points have the same width;
+// this may not be true in some fonts.
+//
+// If DiscardEmptyColumns is set, empty columns that are terminated
+// entirely by vertical (or "soft") tabs are discarded. Columns
+// terminated by horizontal (or "hard") tabs are not affected by
+// this flag.
+//
+// If a Writer is configured to filter HTML, HTML tags and entities
+// are passed through. The widths of tags and entities are
+// assumed to be zero (tags) and one (entities) for formatting purposes.
+//
+// A segment of text may be escaped by bracketing it with Escape
+// characters. The tabwriter passes escaped text segments through
+// unchanged. In particular, it does not interpret any tabs or line
+// breaks within the segment. If the StripEscape flag is set, the
+// Escape characters are stripped from the output; otherwise they
+// are passed through as well. For the purpose of formatting, the
+// width of the escaped text is always computed excluding the Escape
+// characters.
+//
+// The formfeed character ('\f') acts like a newline but it also
+// terminates all columns in the current line (effectively calling
+// Flush). Cells in the next line start new columns. Unless found
+// inside an HTML tag or inside an escaped text segment, formfeed
+// characters appear as newlines in the output.
+//
+// The Writer must buffer input internally, because proper spacing
+// of one line may depend on the cells in future lines. Clients must
+// call Flush when done calling Write.
+//
+type Writer struct {
+	// configuration
+	output   io.Writer
+	minwidth int
+	tabwidth int
+	padding  int
+	padbytes [8]byte
+	flags    uint
+
+	// current state
+	buf       bytes.Buffer // collected text excluding tabs or line breaks
+	pos       int          // buffer position up to which cell.width of incomplete cell has been computed
+	cell      cell         // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
+	endChar   byte         // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
+	lines     [][]cell     // list of lines; each line is a list of cells
+	widths    []int        // list of column widths in runes - re-used during formatting
+	alignment map[int]uint // column alignment
+}
+
+func (b *Writer) addLine() { b.lines = append(b.lines, []cell{}) }
+
+// Reset the current state.
+func (b *Writer) reset() {
+	b.buf.Reset()
+	b.pos = 0
+	b.cell = cell{}
+	b.endChar = 0
+	b.lines = b.lines[0:0]
+	b.widths = b.widths[0:0]
+	b.alignment = make(map[int]uint)
+	b.addLine()
+}
+
+// Internal representation (current state):
+//
+// - all text written is appended to buf; tabs and line breaks are stripped away
+// - at any given time there is a (possibly empty) incomplete cell at the end
+//   (the cell starts after a tab or line break)
+// - cell.size is the number of bytes belonging to the cell so far
+// - cell.width is text width in runes of that cell from the start of the cell to
+//   position pos; html tags and entities are excluded from this width if html
+//   filtering is enabled
+// - the sizes and widths of processed text are kept in the lines list
+//   which contains a list of cells for each line
+// - the widths list is a temporary list with current widths used during
+//   formatting; it is kept in Writer because it's re-used
+//
+//                    |<---------- size ---------->|
+//                    |                            |
+//                    |<- width ->|<- ignored ->|  |
+//                    |           |             |  |
+// [---processed---tab------------<tag>...</tag>...]
+// ^                  ^                         ^
+// |                  |                         |
+// buf                start of incomplete cell  pos
+
+// Formatting can be controlled with these flags.
+const (
+	// Ignore html tags and treat entities (starting with '&'
+	// and ending in ';') as single characters (width = 1).
+	FilterHTML uint = 1 << iota
+
+	// Strip Escape characters bracketing escaped text segments
+	// instead of passing them through unchanged with the text.
+	StripEscape
+
+	// Force right-alignment of cell content.
+	// Default is left-alignment.
+	AlignRight
+
+	// Handle empty columns as if they were not present in
+	// the input in the first place.
+	DiscardEmptyColumns
+
+	// Always use tabs for indentation columns (i.e., padding of
+	// leading empty cells on the left) independent of padchar.
+	TabIndent
+
+	// Print a vertical bar ('|') between columns (after formatting).
+	// Discarded columns appear as zero-width columns ("||").
+	Debug
+)
+
+// A Writer must be initialized with a call to Init. The first parameter (output)
+// specifies the filter output. The remaining parameters control the formatting:
+//
+//	minwidth	minimal cell width including any padding
+//	tabwidth	width of tab characters (equivalent number of spaces)
+//	padding		padding added to a cell before computing its width
+//	padchar		ASCII char used for padding
+//			if padchar == '\t', the Writer will assume that the
+//			width of a '\t' in the formatted output is tabwidth,
+//			and cells are left-aligned independent of align_left
+//			(for correct-looking results, tabwidth must correspond
+//			to the tab width in the viewer displaying the result)
+//	flags		formatting control
+//
+func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	if minwidth < 0 || tabwidth < 0 || padding < 0 {
+		panic("negative minwidth, tabwidth, or padding")
+	}
+	b.output = output
+	b.minwidth = minwidth
+	b.tabwidth = tabwidth
+	b.padding = padding
+	for i := range b.padbytes {
+		b.padbytes[i] = padchar
+	}
+	if padchar == '\t' {
+		// tab padding enforces left-alignment
+		flags &^= AlignRight
+	}
+	b.flags = flags
+
+	b.reset()
+
+	return b
+}
+
+// debugging support (keep code around)
+func (b *Writer) dump() {
+	pos := 0
+	for i, line := range b.lines {
+		print("(", i, ") ")
+		for _, c := range line {
+			print("[", string(b.buf.Bytes()[pos:pos+c.size]), "]")
+			pos += c.size
+		}
+		print("\n")
+	}
+	print("\n")
+}
+
+// local error wrapper so we can distinguish errors we want to return
+// as errors from genuine panics (which we don't want to return as errors)
+type osError struct {
+	err error
+}
+
+func (b *Writer) write0(buf []byte) {
+	n, err := b.output.Write(buf)
+	if n != len(buf) && err == nil {
+		err = io.ErrShortWrite
+	}
+	if err != nil {
+		panic(osError{err})
+	}
+}
+
+func (b *Writer) writeN(src []byte, n int) {
+	for n > len(src) {
+		b.write0(src)
+		n -= len(src)
+	}
+	b.write0(src[0:n])
+}
+
+var (
+	newline = []byte{'\n'}
+	tabs    = []byte("\t\t\t\t\t\t\t\t")
+)
+
+func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
+	if b.padbytes[0] == '\t' || useTabs {
+		// padding is done with tabs
+		if b.tabwidth == 0 {
+			return // tabs have no width - can't do any padding
+		}
+		// make cellw the smallest multiple of b.tabwidth
+		cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
+		n := cellw - textw // amount of padding
+		if n < 0 {
+			panic("internal error")
+		}
+		b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
+		return
+	}
+
+	// padding is done with non-tab characters
+	b.writeN(b.padbytes[0:], cellw-textw)
+}
+
+var vbar = []byte{'|'}
+
+func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	for i := line0; i < line1; i++ {
+		line := b.lines[i]
+
+		// if TabIndent is set, use tabs to pad leading empty cells
+		useTabs := b.flags&TabIndent != 0
+
+		for j, c := range line {
+			if j > 0 && b.flags&Debug != 0 {
+				// indicate column break
+				b.write0(vbar)
+			}
+
+			if c.size == 0 {
+				// empty cell
+				if j < len(b.widths) {
+					b.writePadding(c.width, b.widths[j], useTabs)
+				}
+			} else {
+				// non-empty cell
+				useTabs = false
+				alignColumnRight := b.alignment[j] == AlignRight
+				if (b.flags&AlignRight == 0) && !alignColumnRight { // align left
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+				} else if alignColumnRight && j < len(b.widths) {
+					// just this column
+					internalSize := b.widths[j] - b.padding
+					if j < len(b.widths) {
+						b.writePadding(c.width, internalSize, false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					if b.padding > 0 {
+						b.writePadding(0, b.padding, false)
+					}
+					pos += c.size
+				} else { // align right
+					if j < len(b.widths) {
+						b.writePadding(c.width, b.widths[j], false)
+					}
+					b.write0(b.buf.Bytes()[pos : pos+c.size])
+					pos += c.size
+				}
+			}
+		}
+
+		if i+1 == len(b.lines) {
+			// last buffered line - we don't have a newline, so just write
+			// any outstanding buffered data
+			b.write0(b.buf.Bytes()[pos : pos+b.cell.size])
+			pos += b.cell.size
+		} else {
+			// not the last line - write newline
+			b.write0(newline)
+		}
+	}
+	return
+}
+
+// Format the text between line0 and line1 (excluding line1); pos
+// is the buffer position corresponding to the beginning of line0.
+// Returns the buffer position corresponding to the beginning of
+// line1 and an error, if any.
+//
+func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
+	pos = pos0
+	column := len(b.widths)
+	for this := line0; this < line1; this++ {
+		line := b.lines[this]
+
+		if column < len(line)-1 {
+			// cell exists in this column => this line
+			// has more cells than the previous line
+			// (the last cell per line is ignored because cells are
+			// tab-terminated; the last cell per line describes the
+			// text before the newline/formfeed and does not belong
+			// to a column)
+
+			// print unprinted lines until beginning of block
+			pos = b.writeLines(pos, line0, this)
+			line0 = this
+
+			// column block begin
+			width := b.minwidth // minimal column width
+			discardable := true // true if all cells in this column are empty and "soft"
+			for ; this < line1; this++ {
+				line = b.lines[this]
+				if column < len(line)-1 {
+					// cell exists in this column
+					c := line[column]
+					// update width
+					if w := c.width + b.padding; w > width {
+						width = w
+					}
+					// update discardable
+					if c.width > 0 || c.htab {
+						discardable = false
+					}
+				} else {
+					break
+				}
+			}
+			// column block end
+
+			// discard empty columns if necessary
+			if discardable && b.flags&DiscardEmptyColumns != 0 {
+				width = 0
+			}
+
+			// format and print all columns to the right of this column
+			// (we know the widths of this column and all columns to the left)
+			b.widths = append(b.widths, width) // push width
+			pos = b.format(pos, line0, this)
+			b.widths = b.widths[0 : len(b.widths)-1] // pop width
+			line0 = this
+		}
+	}
+
+	// print unprinted lines until end
+	return b.writeLines(pos, line0, line1)
+}
+
+// Append text to current cell.
+func (b *Writer) append(text []byte) {
+	b.buf.Write(text)
+	b.cell.size += len(text)
+}
+
+// Update the cell width.
+func (b *Writer) updateWidth() {
+	// ---- Changes here -----
+	newChars := b.buf.Bytes()[b.pos:b.buf.Len()]
+	cleaned := vtclean.Clean(string(newChars), false) // false to strip colors
+	b.cell.width += utf8.RuneCount([]byte(cleaned))
+	// --- end of changes ----
+	b.pos = b.buf.Len()
+}
+
+// To escape a text segment, bracket it with Escape characters.
+// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
+// does not terminate a cell and constitutes a single character of
+// width one for formatting purposes.
+//
+// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
+//
+const Escape = '\xff'
+
+// Start escaped mode.
+func (b *Writer) startEscape(ch byte) {
+	switch ch {
+	case Escape:
+		b.endChar = Escape
+	case '<':
+		b.endChar = '>'
+	case '&':
+		b.endChar = ';'
+	}
+}
+
+// Terminate escaped mode. If the escaped text was an HTML tag, its width
+// is assumed to be zero for formatting purposes; if it was an HTML entity,
+// its width is assumed to be one. In all other cases, the width is the
+// unicode width of the text.
+//
+func (b *Writer) endEscape() {
+	switch b.endChar {
+	case Escape:
+		b.updateWidth()
+		if b.flags&StripEscape == 0 {
+			b.cell.width -= 2 // don't count the Escape chars
+		}
+	case '>': // tag of zero width
+	case ';':
+		b.cell.width++ // entity, count as one rune
+	}
+	b.pos = b.buf.Len()
+	b.endChar = 0
+}
+
+// Terminate the current cell by adding it to the list of cells of the
+// current line. Returns the number of cells in that line.
+//
+func (b *Writer) terminateCell(htab bool) int {
+	b.cell.htab = htab
+	line := &b.lines[len(b.lines)-1]
+	*line = append(*line, b.cell)
+	b.cell = cell{}
+	return len(*line)
+}
+
+func handlePanic(err *error, op string) {
+	if e := recover(); e != nil {
+		if nerr, ok := e.(osError); ok {
+			*err = nerr.err
+			return
+		}
+		panic("tabwriter: panic during " + op)
+	}
+}
+
+// Flush should be called after the last call to Write to ensure
+// that any data buffered in the Writer is written to output. Any
+// incomplete escape sequence at the end is considered
+// complete for formatting purposes.
+//
+func (b *Writer) Flush() (err error) {
+	defer b.reset() // even in the presence of errors
+	defer handlePanic(&err, "Flush")
+
+	// add current cell if not empty
+	if b.cell.size > 0 {
+		if b.endChar != 0 {
+			// inside escape - terminate it even if incomplete
+			b.endEscape()
+		}
+		b.terminateCell(false)
+	}
+
+	// format contents of buffer
+	b.format(0, 0, len(b.lines))
+
+	return
+}
+
+var hbar = []byte("---\n")
+
+// SetColumnAlignRight will mark a particular column as align right.
+// This is reset on the next flush.
+func (b *Writer) SetColumnAlignRight(column int) {
+	b.alignment[column] = AlignRight
+}
+
+// Write writes buf to the writer b.
+// The only errors returned are ones encountered
+// while writing to the underlying output stream.
+//
+func (b *Writer) Write(buf []byte) (n int, err error) {
+	defer handlePanic(&err, "Write")
+
+	// split text into cells
+	n = 0
+	for i, ch := range buf {
+		if b.endChar == 0 {
+			// outside escape
+			switch ch {
+			case '\t', '\v', '\n', '\f':
+				// end of cell
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i + 1 // ch consumed
+				ncells := b.terminateCell(ch == '\t')
+				if ch == '\n' || ch == '\f' {
+					// terminate line
+					b.addLine()
+					if ch == '\f' || ncells == 1 {
+						// A '\f' always forces a flush. Otherwise, if the previous
+						// line has only one cell which does not have an impact on
+						// the formatting of the following lines (the last cell per
+						// line is ignored by format()), thus we can flush the
+						// Writer contents.
+						if err = b.Flush(); err != nil {
+							return
+						}
+						if ch == '\f' && b.flags&Debug != 0 {
+							// indicate section break
+							b.write0(hbar)
+						}
+					}
+				}
+
+			case Escape:
+				// start of escaped sequence
+				b.append(buf[n:i])
+				b.updateWidth()
+				n = i
+				if b.flags&StripEscape != 0 {
+					n++ // strip Escape
+				}
+				b.startEscape(Escape)
+
+			case '<', '&':
+				// possibly an html tag/entity
+				if b.flags&FilterHTML != 0 {
+					// begin of tag/entity
+					b.append(buf[n:i])
+					b.updateWidth()
+					n = i
+					b.startEscape(ch)
+				}
+			}
+
+		} else {
+			// inside escape
+			if ch == b.endChar {
+				// end of tag/entity
+				j := i + 1
+				if ch == Escape && b.flags&StripEscape != 0 {
+					j = i // strip Escape
+				}
+				b.append(buf[n:j])
+				n = i + 1 // ch consumed
+				b.endEscape()
+			}
+		}
+	}
+
+	// append leftover text
+	b.append(buf[n:])
+	n = len(buf)
+	return
+}
+
+// NewWriter allocates and initializes a new tabwriter.Writer.
+// The parameters are the same as for the Init function.
+//
+func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
+	return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/terminal.go b/automation/vendor/github.com/juju/ansiterm/terminal.go
new file mode 100644
index 0000000..96fd11c
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/terminal.go
@@ -0,0 +1,32 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"io"
+	"os"
+
+	"github.com/mattn/go-colorable"
+	"github.com/mattn/go-isatty"
+)
+
+// colorEnabledWriter returns a writer that can handle the ansi color codes
+// and true if the writer passed in is a terminal capable of color. If the
+// TERM environment variable is set to "dumb", the terminal is not considered
+// color capable.
+func colorEnabledWriter(w io.Writer) (io.Writer, bool) {
+	f, ok := w.(*os.File)
+	if !ok {
+		return w, false
+	}
+	// Check the TERM environment variable specifically
+	// to check for "dumb" terminals.
+	if os.Getenv("TERM") == "dumb" {
+		return w, false
+	}
+	if !isatty.IsTerminal(f.Fd()) {
+		return w, false
+	}
+	return colorable.NewColorable(f), true
+}
diff --git a/automation/vendor/github.com/juju/ansiterm/writer.go b/automation/vendor/github.com/juju/ansiterm/writer.go
new file mode 100644
index 0000000..32437bb
--- /dev/null
+++ b/automation/vendor/github.com/juju/ansiterm/writer.go
@@ -0,0 +1,74 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package ansiterm
+
+import (
+	"fmt"
+	"io"
+)
+
+// Writer allows colors and styles to be specified. If the io.Writer
+// is not a terminal capable of color, all attempts to set colors or
+// styles are no-ops.
+type Writer struct {
+	io.Writer
+
+	noColor bool
+}
+
+// NewWriter returns a Writer that allows the caller to specify colors and
+// styles. If the io.Writer is not a terminal capable of color, all attempts
+// to set colors or styles are no-ops.
+func NewWriter(w io.Writer) *Writer {
+	writer, colorCapable := colorEnabledWriter(w)
+	return &Writer{
+		Writer:  writer,
+		noColor: !colorCapable,
+	}
+}
+
+// SetColorCapable forces the writer to either write the ANSI escape color
+// if capable is true, or to not write them if capable is false.
+func (w *Writer) SetColorCapable(capable bool) {
+	w.noColor = !capable
+}
+
+// SetForeground sets the foreground color.
+func (w *Writer) SetForeground(c Color) {
+	w.writeSGR(c.foreground())
+}
+
+// SetBackground sets the background color.
+func (w *Writer) SetBackground(c Color) {
+	w.writeSGR(c.background())
+}
+
+// SetStyle sets the text style.
+func (w *Writer) SetStyle(s Style) {
+	w.writeSGR(s.enable())
+}
+
+// ClearStyle clears the text style.
+func (w *Writer) ClearStyle(s Style) {
+	w.writeSGR(s.disable())
+}
+
+// Reset returns the default foreground and background colors with no styles.
+func (w *Writer) Reset() {
+	w.writeSGR(reset)
+}
+
+type sgr interface {
+	// sgr returns the combined escape sequence for the Select Graphic Rendition.
+	sgr() string
+}
+
+// writeSGR takes the appropriate integer SGR parameters
+// and writes out the ANIS escape code.
+func (w *Writer) writeSGR(value sgr) {
+	if w.noColor {
+		return
+	}
+	fmt.Fprint(w, value.sgr())
+}
diff --git a/automation/vendor/github.com/juju/errors/LICENSE b/automation/vendor/github.com/juju/errors/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/errors/Makefile b/automation/vendor/github.com/juju/errors/Makefile
new file mode 100644
index 0000000..ab7c2e6
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test && go test -compiler gccgo
+
+docs:
+	godoc2md github.com/juju/errors > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/errors/README.md b/automation/vendor/github.com/juju/errors/README.md
new file mode 100644
index 0000000..ee24891
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/README.md
@@ -0,0 +1,536 @@
+
+# errors
+    import "github.com/juju/errors"
+
+[![GoDoc](https://godoc.org/github.com/juju/errors?status.svg)](https://godoc.org/github.com/juju/errors)
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+
+	    if err := SomeFunc(); err != nil {
+		    return err
+		}
+
+This instead becomes:
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Trace(err)
+		}
+
+which just records the file and line number of the Trace call, or
+
+
+	    if err := SomeFunc(); err != nil {
+		    return errors.Annotate(err, "more context")
+		}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+
+	    if err := FindField(field); err != nil {
+		    return errors.Wrap(err, errors.NotFoundf(field))
+		}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+
+
+
+
+
+## func AlreadyExistsf
+``` go
+func AlreadyExistsf(format string, args ...interface{}) error
+```
+AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+
+
+## func Annotate
+``` go
+func Annotate(other error, message string) error
+```
+Annotate is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "failed to frombulate")
+	}
+
+
+## func Annotatef
+``` go
+func Annotatef(other error, format string, args ...interface{}) error
+```
+Annotatef is used to add extra context to an existing error. The location of
+the Annotate call is recorded with the annotations. The file, line and
+function are also recorded.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Annotatef(err, "failed to frombulate the %s", arg)
+	}
+
+
+## func Cause
+``` go
+func Cause(err error) error
+```
+Cause returns the cause of the given error.  This will be either the
+original error, or the result of a Wrap or Mask call.
+
+Cause is the usual way to diagnose errors that may have been wrapped by
+the other errors functions.
+
+
+## func DeferredAnnotatef
+``` go
+func DeferredAnnotatef(err *error, format string, args ...interface{})
+```
+DeferredAnnotatef annotates the given error (when it is not nil) with the given
+format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+does nothing. This method is used in a defer statement in order to annotate any
+resulting error with the same message.
+
+For example:
+
+
+	defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+
+
+## func Details
+``` go
+func Details(err error) string
+```
+Details returns information about the stack of errors wrapped by err, in
+the format:
+
+
+	[{filename:99: error one} {otherfile:55: cause of error one}]
+
+This is a terse alternative to ErrorStack as it returns a single line.
+
+
+## func ErrorStack
+``` go
+func ErrorStack(err error) string
+```
+ErrorStack returns a string representation of the annotated error. If the
+error passed as the parameter is not an annotated error, the result is
+simply the result of the Error() method on that error.
+
+If the error is an annotated error, a multi-line string is returned where
+each line represents one entry in the annotation stack. The full filename
+from the call stack is used in the output.
+
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+
+## func Errorf
+``` go
+func Errorf(format string, args ...interface{}) error
+```
+Errorf creates a new annotated error and records the location that the
+error is created.  This should be a drop in replacement for fmt.Errorf.
+
+For example:
+
+
+	return errors.Errorf("validation failed: %s", message)
+
+
+## func IsAlreadyExists
+``` go
+func IsAlreadyExists(err error) bool
+```
+IsAlreadyExists reports whether the error was created with
+AlreadyExistsf() or NewAlreadyExists().
+
+
+## func IsNotFound
+``` go
+func IsNotFound(err error) bool
+```
+IsNotFound reports whether err was created with NotFoundf() or
+NewNotFound().
+
+
+## func IsNotImplemented
+``` go
+func IsNotImplemented(err error) bool
+```
+IsNotImplemented reports whether err was created with
+NotImplementedf() or NewNotImplemented().
+
+
+## func IsNotSupported
+``` go
+func IsNotSupported(err error) bool
+```
+IsNotSupported reports whether the error was created with
+NotSupportedf() or NewNotSupported().
+
+
+## func IsNotValid
+``` go
+func IsNotValid(err error) bool
+```
+IsNotValid reports whether the error was created with NotValidf() or
+NewNotValid().
+
+
+## func IsUnauthorized
+``` go
+func IsUnauthorized(err error) bool
+```
+IsUnauthorized reports whether err was created with Unauthorizedf() or
+NewUnauthorized().
+
+
+## func Mask
+``` go
+func Mask(other error) error
+```
+Mask hides the underlying error type, and records the location of the masking.
+
+
+## func Maskf
+``` go
+func Maskf(other error, format string, args ...interface{}) error
+```
+Mask masks the given error with the given format string and arguments (like
+fmt.Sprintf), returning a new error that maintains the error stack, but
+hides the underlying error type.  The error string still contains the full
+annotations. If you want to hide the annotations, call Wrap.
+
+
+## func New
+``` go
+func New(message string) error
+```
+New is a drop in replacement for the standard libary errors module that records
+the location that the error is created.
+
+For example:
+
+
+	return errors.New("validation failed")
+
+
+## func NewAlreadyExists
+``` go
+func NewAlreadyExists(err error, msg string) error
+```
+NewAlreadyExists returns an error which wraps err and satisfies
+IsAlreadyExists().
+
+
+## func NewNotFound
+``` go
+func NewNotFound(err error, msg string) error
+```
+NewNotFound returns an error which wraps err that satisfies
+IsNotFound().
+
+
+## func NewNotImplemented
+``` go
+func NewNotImplemented(err error, msg string) error
+```
+NewNotImplemented returns an error which wraps err and satisfies
+IsNotImplemented().
+
+
+## func NewNotSupported
+``` go
+func NewNotSupported(err error, msg string) error
+```
+NewNotSupported returns an error which wraps err and satisfies
+IsNotSupported().
+
+
+## func NewNotValid
+``` go
+func NewNotValid(err error, msg string) error
+```
+NewNotValid returns an error which wraps err and satisfies IsNotValid().
+
+
+## func NewUnauthorized
+``` go
+func NewUnauthorized(err error, msg string) error
+```
+NewUnauthorized returns an error which wraps err and satisfies
+IsUnauthorized().
+
+
+## func NotFoundf
+``` go
+func NotFoundf(format string, args ...interface{}) error
+```
+NotFoundf returns an error which satisfies IsNotFound().
+
+
+## func NotImplementedf
+``` go
+func NotImplementedf(format string, args ...interface{}) error
+```
+NotImplementedf returns an error which satisfies IsNotImplemented().
+
+
+## func NotSupportedf
+``` go
+func NotSupportedf(format string, args ...interface{}) error
+```
+NotSupportedf returns an error which satisfies IsNotSupported().
+
+
+## func NotValidf
+``` go
+func NotValidf(format string, args ...interface{}) error
+```
+NotValidf returns an error which satisfies IsNotValid().
+
+
+## func Trace
+``` go
+func Trace(other error) error
+```
+Trace adds the location of the Trace call to the stack.  The Cause of the
+resulting error is the same as the error parameter.  If the other error is
+nil, the result will be nil.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+
+## func Unauthorizedf
+``` go
+func Unauthorizedf(format string, args ...interface{}) error
+```
+Unauthorizedf returns an error which satisfies IsUnauthorized().
+
+
+## func Wrap
+``` go
+func Wrap(other, newDescriptive error) error
+```
+Wrap changes the Cause of the error. The location of the Wrap call is also
+stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    newErr := &packageError{"more context", private_value}
+	    return errors.Wrap(err, newErr)
+	}
+
+
+## func Wrapf
+``` go
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error
+```
+Wrapf changes the Cause of the error, and adds an annotation. The location
+of the Wrap call is also stored in the error stack.
+
+For example:
+
+
+	if err := SomeFunc(); err != nil {
+	    return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+	}
+
+
+
+## type Err
+``` go
+type Err struct {
+    // contains filtered or unexported fields
+}
+```
+Err holds a description of an error along with information about
+where the error was created.
+
+It may be embedded in custom error types to add extra information that
+this errors package can understand.
+
+
+
+
+
+
+
+
+
+### func NewErr
+``` go
+func NewErr(format string, args ...interface{}) Err
+```
+NewErr is used to return an Err for the purpose of embedding in other
+structures.  The location is not specified, and needs to be set with a call
+to SetLocation.
+
+For example:
+
+
+	type FooError struct {
+	    errors.Err
+	    code int
+	}
+	
+	func NewFooError(code int) error {
+	    err := &FooError{errors.NewErr("foo"), code}
+	    err.SetLocation(1)
+	    return err
+	}
+
+
+
+
+### func (\*Err) Cause
+``` go
+func (e *Err) Cause() error
+```
+The Cause of an error is the most recent error in the error stack that
+meets one of these criteria: the original error that was raised; the new
+error that was passed into the Wrap function; the most recently masked
+error; or nil if the error itself is considered the Cause.  Normally this
+method is not invoked directly, but instead through the Cause stand alone
+function.
+
+
+
+### func (\*Err) Error
+``` go
+func (e *Err) Error() string
+```
+Error implements error.Error.
+
+
+
+### func (\*Err) Location
+``` go
+func (e *Err) Location() (filename string, line int)
+```
+Location is the file and line of where the error was most recently
+created or annotated.
+
+
+
+### func (\*Err) Message
+``` go
+func (e *Err) Message() string
+```
+Message returns the message stored with the most recent location. This is
+the empty string if the most recent call was Trace, or the message stored
+with Annotate or Mask.
+
+
+
+### func (\*Err) SetLocation
+``` go
+func (e *Err) SetLocation(callDepth int)
+```
+SetLocation records the source location of the error at callDepth stack
+frames above the call.
+
+
+
+### func (\*Err) StackTrace
+``` go
+func (e *Err) StackTrace() []string
+```
+StackTrace returns one string for each location recorded in the stack of
+errors. The first value is the originating error, with a line for each
+other annotation or tracing of the error.
+
+
+
+### func (\*Err) Underlying
+``` go
+func (e *Err) Underlying() error
+```
+Underlying returns the previous error in the error stack, if any. A client
+should not ever really call this method.  It is used to build the error
+stack and should not be introspected by client calls.  Or more
+specifically, clients should not depend on anything but the `Cause` of an
+error.
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/errors/doc.go b/automation/vendor/github.com/juju/errors/doc.go
new file mode 100644
index 0000000..35b119a
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/doc.go
@@ -0,0 +1,81 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+The juju/errors provides an easy way to annotate errors without losing the
+orginal error context.
+
+The exported `New` and `Errorf` functions are designed to replace the
+`errors.New` and `fmt.Errorf` functions respectively. The same underlying
+error is there, but the package also records the location at which the error
+was created.
+
+A primary use case for this library is to add extra context any time an
+error is returned from a function.
+
+    if err := SomeFunc(); err != nil {
+	    return err
+	}
+
+This instead becomes:
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Trace(err)
+	}
+
+which just records the file and line number of the Trace call, or
+
+    if err := SomeFunc(); err != nil {
+	    return errors.Annotate(err, "more context")
+	}
+
+which also adds an annotation to the error.
+
+When you want to check to see if an error is of a particular type, a helper
+function is normally exported by the package that returned the error, like the
+`os` package does.  The underlying cause of the error is available using the
+`Cause` function.
+
+	os.IsNotExist(errors.Cause(err))
+
+The result of the `Error()` call on an annotated error is the annotations joined
+with colons, then the result of the `Error()` method for the underlying error
+that was the cause.
+
+	err := errors.Errorf("original")
+	err = errors.Annotatef(err, "context")
+	err = errors.Annotatef(err, "more context")
+	err.Error() -> "more context: context: original"
+
+Obviously recording the file, line and functions is not very useful if you
+cannot get them back out again.
+
+	errors.ErrorStack(err)
+
+will return something like:
+
+	first error
+	github.com/juju/errors/annotation_test.go:193:
+	github.com/juju/errors/annotation_test.go:194: annotation
+	github.com/juju/errors/annotation_test.go:195:
+	github.com/juju/errors/annotation_test.go:196: more context
+	github.com/juju/errors/annotation_test.go:197:
+
+The first error was generated by an external system, so there was no location
+associated. The second, fourth, and last lines were generated with Trace calls,
+and the other two through Annotate.
+
+Sometimes when responding to an error you want to return a more specific error
+for the situation.
+
+    if err := FindField(field); err != nil {
+	    return errors.Wrap(err, errors.NotFoundf(field))
+	}
+
+This returns an error where the complete error stack is still available, and
+`errors.Cause()` will return the `NotFound` error.
+
+*/
+package errors
diff --git a/automation/vendor/github.com/juju/errors/error.go b/automation/vendor/github.com/juju/errors/error.go
new file mode 100644
index 0000000..8c51c45
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/error.go
@@ -0,0 +1,145 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"reflect"
+	"runtime"
+)
+
+// Err holds a description of an error along with information about
+// where the error was created.
+//
+// It may be embedded in custom error types to add extra information that
+// this errors package can understand.
+type Err struct {
+	// message holds an annotation of the error.
+	message string
+
+	// cause holds the cause of the error as returned
+	// by the Cause method.
+	cause error
+
+	// previous holds the previous error in the error stack, if any.
+	previous error
+
+	// file and line hold the source code location where the error was
+	// created.
+	file string
+	line int
+}
+
+// NewErr is used to return an Err for the purpose of embedding in other
+// structures.  The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func NewFooError(code int) error {
+//         err := &FooError{errors.NewErr("foo"), code}
+//         err.SetLocation(1)
+//         return err
+//     }
+func NewErr(format string, args ...interface{}) Err {
+	return Err{
+		message: fmt.Sprintf(format, args...),
+	}
+}
+
+// NewErrWithCause is used to return an Err with case by other error for the purpose of embedding in other
+// structures. The location is not specified, and needs to be set with a call
+// to SetLocation.
+//
+// For example:
+//     type FooError struct {
+//         errors.Err
+//         code int
+//     }
+//
+//     func (e *FooError) Annotate(format string, args ...interface{}) error {
+//         err := &FooError{errors.NewErrWithCause(e.Err, format, args...), e.code}
+//         err.SetLocation(1)
+//         return err
+//     })
+func NewErrWithCause(other error, format string, args ...interface{}) Err {
+	return Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(other),
+		previous: other,
+	}
+}
+
+// Location is the file and line of where the error was most recently
+// created or annotated.
+func (e *Err) Location() (filename string, line int) {
+	return e.file, e.line
+}
+
+// Underlying returns the previous error in the error stack, if any. A client
+// should not ever really call this method.  It is used to build the error
+// stack and should not be introspected by client calls.  Or more
+// specifically, clients should not depend on anything but the `Cause` of an
+// error.
+func (e *Err) Underlying() error {
+	return e.previous
+}
+
+// The Cause of an error is the most recent error in the error stack that
+// meets one of these criteria: the original error that was raised; the new
+// error that was passed into the Wrap function; the most recently masked
+// error; or nil if the error itself is considered the Cause.  Normally this
+// method is not invoked directly, but instead through the Cause stand alone
+// function.
+func (e *Err) Cause() error {
+	return e.cause
+}
+
+// Message returns the message stored with the most recent location. This is
+// the empty string if the most recent call was Trace, or the message stored
+// with Annotate or Mask.
+func (e *Err) Message() string {
+	return e.message
+}
+
+// Error implements error.Error.
+func (e *Err) Error() string {
+	// We want to walk up the stack of errors showing the annotations
+	// as long as the cause is the same.
+	err := e.previous
+	if !sameError(Cause(err), e.cause) && e.cause != nil {
+		err = e.cause
+	}
+	switch {
+	case err == nil:
+		return e.message
+	case e.message == "":
+		return err.Error()
+	}
+	return fmt.Sprintf("%s: %v", e.message, err)
+}
+
+// SetLocation records the source location of the error at callDepth stack
+// frames above the call.
+func (e *Err) SetLocation(callDepth int) {
+	_, file, line, _ := runtime.Caller(callDepth + 1)
+	e.file = trimGoPath(file)
+	e.line = line
+}
+
+// StackTrace returns one string for each location recorded in the stack of
+// errors. The first value is the originating error, with a line for each
+// other annotation or tracing of the error.
+func (e *Err) StackTrace() []string {
+	return errorStack(e)
+}
+
+// Ideally we'd have a way to check identity, but deep equals will do.
+func sameError(e1, e2 error) bool {
+	return reflect.DeepEqual(e1, e2)
+}
diff --git a/automation/vendor/github.com/juju/errors/errortypes.go b/automation/vendor/github.com/juju/errors/errortypes.go
new file mode 100644
index 0000000..10b3b19
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/errortypes.go
@@ -0,0 +1,284 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+)
+
+// wrap is a helper to construct an *wrapper.
+func wrap(err error, format, suffix string, args ...interface{}) Err {
+	newErr := Err{
+		message:  fmt.Sprintf(format+suffix, args...),
+		previous: err,
+	}
+	newErr.SetLocation(2)
+	return newErr
+}
+
+// notFound represents an error when something has not been found.
+type notFound struct {
+	Err
+}
+
+// NotFoundf returns an error which satisfies IsNotFound().
+func NotFoundf(format string, args ...interface{}) error {
+	return &notFound{wrap(nil, format, " not found", args...)}
+}
+
+// NewNotFound returns an error which wraps err that satisfies
+// IsNotFound().
+func NewNotFound(err error, msg string) error {
+	return &notFound{wrap(err, msg, "")}
+}
+
+// IsNotFound reports whether err was created with NotFoundf() or
+// NewNotFound().
+func IsNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notFound)
+	return ok
+}
+
+// userNotFound represents an error when an inexistent user is looked up.
+type userNotFound struct {
+	Err
+}
+
+// UserNotFoundf returns an error which satisfies IsUserNotFound().
+func UserNotFoundf(format string, args ...interface{}) error {
+	return &userNotFound{wrap(nil, format, " user not found", args...)}
+}
+
+// NewUserNotFound returns an error which wraps err and satisfies
+// IsUserNotFound().
+func NewUserNotFound(err error, msg string) error {
+	return &userNotFound{wrap(err, msg, "")}
+}
+
+// IsUserNotFound reports whether err was created with UserNotFoundf() or
+// NewUserNotFound().
+func IsUserNotFound(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*userNotFound)
+	return ok
+}
+
+// unauthorized represents an error when an operation is unauthorized.
+type unauthorized struct {
+	Err
+}
+
+// Unauthorizedf returns an error which satisfies IsUnauthorized().
+func Unauthorizedf(format string, args ...interface{}) error {
+	return &unauthorized{wrap(nil, format, "", args...)}
+}
+
+// NewUnauthorized returns an error which wraps err and satisfies
+// IsUnauthorized().
+func NewUnauthorized(err error, msg string) error {
+	return &unauthorized{wrap(err, msg, "")}
+}
+
+// IsUnauthorized reports whether err was created with Unauthorizedf() or
+// NewUnauthorized().
+func IsUnauthorized(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*unauthorized)
+	return ok
+}
+
+// notImplemented represents an error when something is not
+// implemented.
+type notImplemented struct {
+	Err
+}
+
+// NotImplementedf returns an error which satisfies IsNotImplemented().
+func NotImplementedf(format string, args ...interface{}) error {
+	return &notImplemented{wrap(nil, format, " not implemented", args...)}
+}
+
+// NewNotImplemented returns an error which wraps err and satisfies
+// IsNotImplemented().
+func NewNotImplemented(err error, msg string) error {
+	return &notImplemented{wrap(err, msg, "")}
+}
+
+// IsNotImplemented reports whether err was created with
+// NotImplementedf() or NewNotImplemented().
+func IsNotImplemented(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notImplemented)
+	return ok
+}
+
+// alreadyExists represents and error when something already exists.
+type alreadyExists struct {
+	Err
+}
+
+// AlreadyExistsf returns an error which satisfies IsAlreadyExists().
+func AlreadyExistsf(format string, args ...interface{}) error {
+	return &alreadyExists{wrap(nil, format, " already exists", args...)}
+}
+
+// NewAlreadyExists returns an error which wraps err and satisfies
+// IsAlreadyExists().
+func NewAlreadyExists(err error, msg string) error {
+	return &alreadyExists{wrap(err, msg, "")}
+}
+
+// IsAlreadyExists reports whether the error was created with
+// AlreadyExistsf() or NewAlreadyExists().
+func IsAlreadyExists(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*alreadyExists)
+	return ok
+}
+
+// notSupported represents an error when something is not supported.
+type notSupported struct {
+	Err
+}
+
+// NotSupportedf returns an error which satisfies IsNotSupported().
+func NotSupportedf(format string, args ...interface{}) error {
+	return &notSupported{wrap(nil, format, " not supported", args...)}
+}
+
+// NewNotSupported returns an error which wraps err and satisfies
+// IsNotSupported().
+func NewNotSupported(err error, msg string) error {
+	return &notSupported{wrap(err, msg, "")}
+}
+
+// IsNotSupported reports whether the error was created with
+// NotSupportedf() or NewNotSupported().
+func IsNotSupported(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notSupported)
+	return ok
+}
+
+// notValid represents an error when something is not valid.
+type notValid struct {
+	Err
+}
+
+// NotValidf returns an error which satisfies IsNotValid().
+func NotValidf(format string, args ...interface{}) error {
+	return &notValid{wrap(nil, format, " not valid", args...)}
+}
+
+// NewNotValid returns an error which wraps err and satisfies IsNotValid().
+func NewNotValid(err error, msg string) error {
+	return &notValid{wrap(err, msg, "")}
+}
+
+// IsNotValid reports whether the error was created with NotValidf() or
+// NewNotValid().
+func IsNotValid(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notValid)
+	return ok
+}
+
+// notProvisioned represents an error when something is not yet provisioned.
+type notProvisioned struct {
+	Err
+}
+
+// NotProvisionedf returns an error which satisfies IsNotProvisioned().
+func NotProvisionedf(format string, args ...interface{}) error {
+	return &notProvisioned{wrap(nil, format, " not provisioned", args...)}
+}
+
+// NewNotProvisioned returns an error which wraps err that satisfies
+// IsNotProvisioned().
+func NewNotProvisioned(err error, msg string) error {
+	return &notProvisioned{wrap(err, msg, "")}
+}
+
+// IsNotProvisioned reports whether err was created with NotProvisionedf() or
+// NewNotProvisioned().
+func IsNotProvisioned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notProvisioned)
+	return ok
+}
+
+// notAssigned represents an error when something is not yet assigned to
+// something else.
+type notAssigned struct {
+	Err
+}
+
+// NotAssignedf returns an error which satisfies IsNotAssigned().
+func NotAssignedf(format string, args ...interface{}) error {
+	return &notAssigned{wrap(nil, format, " not assigned", args...)}
+}
+
+// NewNotAssigned returns an error which wraps err that satisfies
+// IsNotAssigned().
+func NewNotAssigned(err error, msg string) error {
+	return &notAssigned{wrap(err, msg, "")}
+}
+
+// IsNotAssigned reports whether err was created with NotAssignedf() or
+// NewNotAssigned().
+func IsNotAssigned(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*notAssigned)
+	return ok
+}
+
+// badRequest represents an error when a request has bad parameters.
+type badRequest struct {
+	Err
+}
+
+// BadRequestf returns an error which satisfies IsBadRequest().
+func BadRequestf(format string, args ...interface{}) error {
+	return &badRequest{wrap(nil, format, "", args...)}
+}
+
+// NewBadRequest returns an error which wraps err that satisfies
+// IsBadRequest().
+func NewBadRequest(err error, msg string) error {
+	return &badRequest{wrap(err, msg, "")}
+}
+
+// IsBadRequest reports whether err was created with BadRequestf() or
+// NewBadRequest().
+func IsBadRequest(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*badRequest)
+	return ok
+}
+
+// methodNotAllowed represents an error when an HTTP request
+// is made with an inappropriate method.
+type methodNotAllowed struct {
+	Err
+}
+
+// MethodNotAllowedf returns an error which satisfies IsMethodNotAllowed().
+func MethodNotAllowedf(format string, args ...interface{}) error {
+	return &methodNotAllowed{wrap(nil, format, "", args...)}
+}
+
+// NewMethodNotAllowed returns an error which wraps err that satisfies
+// IsMethodNotAllowed().
+func NewMethodNotAllowed(err error, msg string) error {
+	return &methodNotAllowed{wrap(err, msg, "")}
+}
+
+// IsMethodNotAllowed reports whether err was created with MethodNotAllowedf() or
+// NewMethodNotAllowed().
+func IsMethodNotAllowed(err error) bool {
+	err = Cause(err)
+	_, ok := err.(*methodNotAllowed)
+	return ok
+}
diff --git a/automation/vendor/github.com/juju/errors/functions.go b/automation/vendor/github.com/juju/errors/functions.go
new file mode 100644
index 0000000..994208d
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/functions.go
@@ -0,0 +1,330 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"fmt"
+	"strings"
+)
+
+// New is a drop in replacement for the standard libary errors module that records
+// the location that the error is created.
+//
+// For example:
+//    return errors.New("validation failed")
+//
+func New(message string) error {
+	err := &Err{message: message}
+	err.SetLocation(1)
+	return err
+}
+
+// Errorf creates a new annotated error and records the location that the
+// error is created.  This should be a drop in replacement for fmt.Errorf.
+//
+// For example:
+//    return errors.Errorf("validation failed: %s", message)
+//
+func Errorf(format string, args ...interface{}) error {
+	err := &Err{message: fmt.Sprintf(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// Trace adds the location of the Trace call to the stack.  The Cause of the
+// resulting error is the same as the error parameter.  If the other error is
+// nil, the result will be nil.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Trace(err)
+//   }
+//
+func Trace(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{previous: other, cause: Cause(other)}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotate is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotate(err, "failed to frombulate")
+//   }
+//
+func Annotate(other error, message string) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  message,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Annotatef is used to add extra context to an existing error. The location of
+// the Annotate call is recorded with the annotations. The file, line and
+// function are also recorded.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Annotatef(err, "failed to frombulate the %s", arg)
+//   }
+//
+func Annotatef(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+		cause:    Cause(other),
+		message:  fmt.Sprintf(format, args...),
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// DeferredAnnotatef annotates the given error (when it is not nil) with the given
+// format string and arguments (like fmt.Sprintf). If *err is nil, DeferredAnnotatef
+// does nothing. This method is used in a defer statement in order to annotate any
+// resulting error with the same message.
+//
+// For example:
+//
+//    defer DeferredAnnotatef(&err, "failed to frombulate the %s", arg)
+//
+func DeferredAnnotatef(err *error, format string, args ...interface{}) {
+	if *err == nil {
+		return
+	}
+	newErr := &Err{
+		message:  fmt.Sprintf(format, args...),
+		cause:    Cause(*err),
+		previous: *err,
+	}
+	newErr.SetLocation(1)
+	*err = newErr
+}
+
+// Wrap changes the Cause of the error. The location of the Wrap call is also
+// stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       newErr := &packageError{"more context", private_value}
+//       return errors.Wrap(err, newErr)
+//   }
+//
+func Wrap(other, newDescriptive error) error {
+	err := &Err{
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Wrapf changes the Cause of the error, and adds an annotation. The location
+// of the Wrap call is also stored in the error stack.
+//
+// For example:
+//   if err := SomeFunc(); err != nil {
+//       return errors.Wrapf(err, simpleErrorType, "invalid value %q", value)
+//   }
+//
+func Wrapf(other, newDescriptive error, format string, args ...interface{}) error {
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+		cause:    newDescriptive,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask masks the given error with the given format string and arguments (like
+// fmt.Sprintf), returning a new error that maintains the error stack, but
+// hides the underlying error type.  The error string still contains the full
+// annotations. If you want to hide the annotations, call Wrap.
+func Maskf(other error, format string, args ...interface{}) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		message:  fmt.Sprintf(format, args...),
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Mask hides the underlying error type, and records the location of the masking.
+func Mask(other error) error {
+	if other == nil {
+		return nil
+	}
+	err := &Err{
+		previous: other,
+	}
+	err.SetLocation(1)
+	return err
+}
+
+// Cause returns the cause of the given error.  This will be either the
+// original error, or the result of a Wrap or Mask call.
+//
+// Cause is the usual way to diagnose errors that may have been wrapped by
+// the other errors functions.
+func Cause(err error) error {
+	var diag error
+	if err, ok := err.(causer); ok {
+		diag = err.Cause()
+	}
+	if diag != nil {
+		return diag
+	}
+	return err
+}
+
+type causer interface {
+	Cause() error
+}
+
+type wrapper interface {
+	// Message returns the top level error message,
+	// not including the message from the Previous
+	// error.
+	Message() string
+
+	// Underlying returns the Previous error, or nil
+	// if there is none.
+	Underlying() error
+}
+
+type locationer interface {
+	Location() (string, int)
+}
+
+var (
+	_ wrapper    = (*Err)(nil)
+	_ locationer = (*Err)(nil)
+	_ causer     = (*Err)(nil)
+)
+
+// Details returns information about the stack of errors wrapped by err, in
+// the format:
+//
+// 	[{filename:99: error one} {otherfile:55: cause of error one}]
+//
+// This is a terse alternative to ErrorStack as it returns a single line.
+func Details(err error) string {
+	if err == nil {
+		return "[]"
+	}
+	var s []byte
+	s = append(s, '[')
+	for {
+		s = append(s, '{')
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			if file != "" {
+				s = append(s, fmt.Sprintf("%s:%d", file, line)...)
+				s = append(s, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			s = append(s, cerr.Message()...)
+			err = cerr.Underlying()
+		} else {
+			s = append(s, err.Error()...)
+			err = nil
+		}
+		s = append(s, '}')
+		if err == nil {
+			break
+		}
+		s = append(s, ' ')
+	}
+	s = append(s, ']')
+	return string(s)
+}
+
+// ErrorStack returns a string representation of the annotated error. If the
+// error passed as the parameter is not an annotated error, the result is
+// simply the result of the Error() method on that error.
+//
+// If the error is an annotated error, a multi-line string is returned where
+// each line represents one entry in the annotation stack. The full filename
+// from the call stack is used in the output.
+//
+//     first error
+//     github.com/juju/errors/annotation_test.go:193:
+//     github.com/juju/errors/annotation_test.go:194: annotation
+//     github.com/juju/errors/annotation_test.go:195:
+//     github.com/juju/errors/annotation_test.go:196: more context
+//     github.com/juju/errors/annotation_test.go:197:
+func ErrorStack(err error) string {
+	return strings.Join(errorStack(err), "\n")
+}
+
+func errorStack(err error) []string {
+	if err == nil {
+		return nil
+	}
+
+	// We want the first error first
+	var lines []string
+	for {
+		var buff []byte
+		if err, ok := err.(locationer); ok {
+			file, line := err.Location()
+			// Strip off the leading GOPATH/src path elements.
+			file = trimGoPath(file)
+			if file != "" {
+				buff = append(buff, fmt.Sprintf("%s:%d", file, line)...)
+				buff = append(buff, ": "...)
+			}
+		}
+		if cerr, ok := err.(wrapper); ok {
+			message := cerr.Message()
+			buff = append(buff, message...)
+			// If there is a cause for this error, and it is different to the cause
+			// of the underlying error, then output the error string in the stack trace.
+			var cause error
+			if err1, ok := err.(causer); ok {
+				cause = err1.Cause()
+			}
+			err = cerr.Underlying()
+			if cause != nil && !sameError(Cause(err), cause) {
+				if message != "" {
+					buff = append(buff, ": "...)
+				}
+				buff = append(buff, cause.Error()...)
+			}
+		} else {
+			buff = append(buff, err.Error()...)
+			err = nil
+		}
+		lines = append(lines, string(buff))
+		if err == nil {
+			break
+		}
+	}
+	// reverse the lines to get the original error, which was at the end of
+	// the list, back to the start.
+	var result []string
+	for i := len(lines); i > 0; i-- {
+		result = append(result, lines[i-1])
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/errors/path.go b/automation/vendor/github.com/juju/errors/path.go
new file mode 100644
index 0000000..a7b726a
--- /dev/null
+++ b/automation/vendor/github.com/juju/errors/path.go
@@ -0,0 +1,38 @@
+// Copyright 2013, 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package errors
+
+import (
+	"runtime"
+	"strings"
+)
+
+// prefixSize is used internally to trim the user specific path from the
+// front of the returned filenames from the runtime call stack.
+var prefixSize int
+
+// goPath is the deduced path based on the location of this file as compiled.
+var goPath string
+
+func init() {
+	_, file, _, ok := runtime.Caller(0)
+	if file == "?" {
+		return
+	}
+	if ok {
+		// We know that the end of the file should be:
+		// github.com/juju/errors/path.go
+		size := len(file)
+		suffix := len("github.com/juju/errors/path.go")
+		goPath = file[:size-suffix]
+		prefixSize = len(goPath)
+	}
+}
+
+func trimGoPath(filename string) string {
+	if strings.HasPrefix(filename, goPath) {
+		return filename[prefixSize:]
+	}
+	return filename
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/LICENSE b/automation/vendor/github.com/juju/gomaasapi/LICENSE
new file mode 100644
index 0000000..d5836af
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2012-2016 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/gomaasapi/Makefile b/automation/vendor/github.com/juju/gomaasapi/Makefile
new file mode 100644
index 0000000..ea6cbb5
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/Makefile
@@ -0,0 +1,26 @@
+# Build, and run tests.
+check: examples
+	go test ./...
+
+example_source := $(wildcard example/*.go)
+example_binaries := $(patsubst %.go,%,$(example_source))
+
+# Clean up binaries.
+clean:
+	$(RM) $(example_binaries)
+
+# Reformat the source files to match our layout standards.
+format:
+	gofmt -w .
+
+# Invoke gofmt's "simplify" option to streamline the source code.
+simplify:
+	gofmt -w -s .
+
+# Build the examples (we have no tests for them).
+examples: $(example_binaries)
+
+%: %.go
+	go build -o $@ $<
+
+.PHONY: check clean format examples simplify
diff --git a/automation/vendor/github.com/juju/gomaasapi/README.rst b/automation/vendor/github.com/juju/gomaasapi/README.rst
new file mode 100644
index 0000000..c153cd3
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/README.rst
@@ -0,0 +1,12 @@
+.. -*- mode: rst -*-
+
+******************************
+MAAS API client library for Go
+******************************
+
+This library serves as a minimal client for communicating with the MAAS web
+API in Go programs.
+
+For more information see the `project homepage`_.
+
+.. _project homepage: https://github.com/juju/gomaasapi
diff --git a/automation/vendor/github.com/juju/gomaasapi/blockdevice.go b/automation/vendor/github.com/juju/gomaasapi/blockdevice.go
new file mode 100644
index 0000000..ad04f9d
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/blockdevice.go
@@ -0,0 +1,176 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type blockdevice struct {
+	resourceURI string
+
+	id      int
+	name    string
+	model   string
+	path    string
+	usedFor string
+	tags    []string
+
+	blockSize uint64
+	usedSize  uint64
+	size      uint64
+
+	partitions []*partition
+}
+
+// ID implements BlockDevice.
+func (b *blockdevice) ID() int {
+	return b.id
+}
+
+// Name implements BlockDevice.
+func (b *blockdevice) Name() string {
+	return b.name
+}
+
+// Model implements BlockDevice.
+func (b *blockdevice) Model() string {
+	return b.model
+}
+
+// Path implements BlockDevice.
+func (b *blockdevice) Path() string {
+	return b.path
+}
+
+// UsedFor implements BlockDevice.
+func (b *blockdevice) UsedFor() string {
+	return b.usedFor
+}
+
+// Tags implements BlockDevice.
+func (b *blockdevice) Tags() []string {
+	return b.tags
+}
+
+// BlockSize implements BlockDevice.
+func (b *blockdevice) BlockSize() uint64 {
+	return b.blockSize
+}
+
+// UsedSize implements BlockDevice.
+func (b *blockdevice) UsedSize() uint64 {
+	return b.usedSize
+}
+
+// Size implements BlockDevice.
+func (b *blockdevice) Size() uint64 {
+	return b.size
+}
+
+// Partitions implements BlockDevice.
+func (b *blockdevice) Partitions() []Partition {
+	result := make([]Partition, len(b.partitions))
+	for i, v := range b.partitions {
+		result[i] = v
+	}
+	return result
+}
+
+func readBlockDevices(controllerVersion version.Number, source interface{}) ([]*blockdevice, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range blockdeviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no blockdevice read func for version %s", controllerVersion)
+	}
+	readFunc := blockdeviceDeserializationFuncs[deserialisationVersion]
+	return readBlockDeviceList(valid, readFunc)
+}
+
+// readBlockDeviceList expects the values of the sourceList to be string maps.
+func readBlockDeviceList(sourceList []interface{}, readFunc blockdeviceDeserializationFunc) ([]*blockdevice, error) {
+	result := make([]*blockdevice, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for blockdevice %d, %T", i, value)
+		}
+		blockdevice, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "blockdevice %d", i)
+		}
+		result = append(result, blockdevice)
+	}
+	return result, nil
+}
+
+type blockdeviceDeserializationFunc func(map[string]interface{}) (*blockdevice, error)
+
+var blockdeviceDeserializationFuncs = map[version.Number]blockdeviceDeserializationFunc{
+	twoDotOh: blockdevice_2_0,
+}
+
+func blockdevice_2_0(source map[string]interface{}) (*blockdevice, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":       schema.ForceInt(),
+		"name":     schema.String(),
+		"model":    schema.OneOf(schema.Nil(""), schema.String()),
+		"path":     schema.String(),
+		"used_for": schema.String(),
+		"tags":     schema.List(schema.String()),
+
+		"block_size": schema.ForceUint(),
+		"used_size":  schema.ForceUint(),
+		"size":       schema.ForceUint(),
+
+		"partitions": schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "blockdevice 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	partitions, err := readPartitionList(valid["partitions"].([]interface{}), partition_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	model, _ := valid["model"].(string)
+	result := &blockdevice{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		model:   model,
+		path:    valid["path"].(string),
+		usedFor: valid["used_for"].(string),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		blockSize: valid["block_size"].(uint64),
+		usedSize:  valid["used_size"].(uint64),
+		size:      valid["size"].(uint64),
+
+		partitions: partitions,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/bootresource.go b/automation/vendor/github.com/juju/gomaasapi/bootresource.go
new file mode 100644
index 0000000..619a2a9
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/bootresource.go
@@ -0,0 +1,136 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+type bootResource struct {
+	// Add the controller in when we need to do things with the bootResource.
+	// controller Controller
+
+	resourceURI string
+
+	id           int
+	name         string
+	type_        string
+	architecture string
+	subArches    string
+	kernelFlavor string
+}
+
+// ID implements BootResource.
+func (b *bootResource) ID() int {
+	return b.id
+}
+
+// Name implements BootResource.
+func (b *bootResource) Name() string {
+	return b.name
+}
+
+// Name implements BootResource.
+func (b *bootResource) Type() string {
+	return b.type_
+}
+
+// Name implements BootResource.
+func (b *bootResource) Architecture() string {
+	return b.architecture
+}
+
+// SubArchitectures implements BootResource.
+func (b *bootResource) SubArchitectures() set.Strings {
+	return set.NewStrings(strings.Split(b.subArches, ",")...)
+}
+
+// KernelFlavor implements BootResource.
+func (b *bootResource) KernelFlavor() string {
+	return b.kernelFlavor
+}
+
+func readBootResources(controllerVersion version.Number, source interface{}) ([]*bootResource, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range bootResourceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no boot resource read func for version %s", controllerVersion)
+	}
+	readFunc := bootResourceDeserializationFuncs[deserialisationVersion]
+	return readBootResourceList(valid, readFunc)
+}
+
+// readBootResourceList expects the values of the sourceList to be string maps.
+func readBootResourceList(sourceList []interface{}, readFunc bootResourceDeserializationFunc) ([]*bootResource, error) {
+	result := make([]*bootResource, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for boot resource %d, %T", i, value)
+		}
+		bootResource, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "boot resource %d", i)
+		}
+		result = append(result, bootResource)
+	}
+	return result, nil
+}
+
+type bootResourceDeserializationFunc func(map[string]interface{}) (*bootResource, error)
+
+var bootResourceDeserializationFuncs = map[version.Number]bootResourceDeserializationFunc{
+	twoDotOh: bootResource_2_0,
+}
+
+func bootResource_2_0(source map[string]interface{}) (*bootResource, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"type":         schema.String(),
+		"architecture": schema.String(),
+		"subarches":    schema.String(),
+		"kflavor":      schema.String(),
+	}
+	defaults := schema.Defaults{
+		"subarches": "",
+		"kflavor":   "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "boot resource 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &bootResource{
+		resourceURI:  valid["resource_uri"].(string),
+		id:           valid["id"].(int),
+		name:         valid["name"].(string),
+		type_:        valid["type"].(string),
+		architecture: valid["architecture"].(string),
+		subArches:    valid["subarches"].(string),
+		kernelFlavor: valid["kflavor"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/client.go b/automation/vendor/github.com/juju/gomaasapi/client.go
new file mode 100644
index 0000000..ef887e6
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/client.go
@@ -0,0 +1,314 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"mime/multipart"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/juju/errors"
+)
+
+const (
+	// Number of retries performed when the server returns a 503
+	// response with a 'Retry-after' header.  A request will be issued
+	// at most NumberOfRetries + 1 times.
+	NumberOfRetries = 4
+
+	RetryAfterHeaderName = "Retry-After"
+)
+
+// Client represents a way to communicating with a MAAS API instance.
+// It is stateless, so it can have concurrent requests in progress.
+type Client struct {
+	APIURL *url.URL
+	Signer OAuthSigner
+}
+
+// ServerError is an http error (or at least, a non-2xx result) received from
+// the server.  It contains the numerical HTTP status code as well as an error
+// string and the response's headers.
+type ServerError struct {
+	error
+	StatusCode  int
+	Header      http.Header
+	BodyMessage string
+}
+
+// GetServerError returns the ServerError from the cause of the error if it is a
+// ServerError, and also returns the bool to indicate if it was a ServerError or
+// not.
+func GetServerError(err error) (ServerError, bool) {
+	svrErr, ok := errors.Cause(err).(ServerError)
+	return svrErr, ok
+}
+
+// readAndClose reads and closes the given ReadCloser.
+//
+// Trying to read from a nil simply returns nil, no error.
+func readAndClose(stream io.ReadCloser) ([]byte, error) {
+	if stream == nil {
+		return nil, nil
+	}
+	defer stream.Close()
+	return ioutil.ReadAll(stream)
+}
+
+// dispatchRequest sends a request to the server, and interprets the response.
+// Client-side errors will return an empty response and a non-nil error.  For
+// server-side errors however (i.e. responses with a non 2XX status code), the
+// returned error will be ServerError and the returned body will reflect the
+// server's response.  If the server returns a 503 response with a 'Retry-after'
+// header, the request will be transparenty retried.
+func (client Client) dispatchRequest(request *http.Request) ([]byte, error) {
+	// First, store the request's body into a byte[] to be able to restore it
+	// after each request.
+	bodyContent, err := readAndClose(request.Body)
+	if err != nil {
+		return nil, err
+	}
+	for retry := 0; retry < NumberOfRetries; retry++ {
+		// Restore body before issuing request.
+		newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+		request.Body = newBody
+		body, err := client.dispatchSingleRequest(request)
+		// If this is a 503 response with a non-void "Retry-After" header: wait
+		// as instructed and retry the request.
+		if err != nil {
+			serverError, ok := errors.Cause(err).(ServerError)
+			if ok && serverError.StatusCode == http.StatusServiceUnavailable {
+				retry_time_int, errConv := strconv.Atoi(serverError.Header.Get(RetryAfterHeaderName))
+				if errConv == nil {
+					select {
+					case <-time.After(time.Duration(retry_time_int) * time.Second):
+					}
+					continue
+				}
+			}
+		}
+		return body, err
+	}
+	// Restore body before issuing request.
+	newBody := ioutil.NopCloser(bytes.NewReader(bodyContent))
+	request.Body = newBody
+	return client.dispatchSingleRequest(request)
+}
+
+func (client Client) dispatchSingleRequest(request *http.Request) ([]byte, error) {
+	client.Signer.OAuthSign(request)
+	httpClient := http.Client{}
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	request.Close = true
+	response, err := httpClient.Do(request)
+	if err != nil {
+		return nil, err
+	}
+	body, err := readAndClose(response.Body)
+	if err != nil {
+		return nil, err
+	}
+	if response.StatusCode < 200 || response.StatusCode > 299 {
+		err := errors.Errorf("ServerError: %v (%s)", response.Status, body)
+		return body, errors.Trace(ServerError{error: err, StatusCode: response.StatusCode, Header: response.Header, BodyMessage: string(body)})
+	}
+	return body, nil
+}
+
+// GetURL returns the URL to a given resource on the API, based on its URI.
+// The resource URI may be absolute or relative; either way the result is a
+// full absolute URL including the network part.
+func (client Client) GetURL(uri *url.URL) *url.URL {
+	return client.APIURL.ResolveReference(uri)
+}
+
+// Get performs an HTTP "GET" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Get(uri *url.URL, operation string, parameters url.Values) ([]byte, error) {
+	if parameters == nil {
+		parameters = make(url.Values)
+	}
+	opParameter := parameters.Get("op")
+	if opParameter != "" {
+		msg := errors.Errorf("reserved parameter 'op' passed (with value '%s')", opParameter)
+		return nil, msg
+	}
+	if operation != "" {
+		parameters.Set("op", operation)
+	}
+	queryUrl := client.GetURL(uri)
+	queryUrl.RawQuery = parameters.Encode()
+	request, err := http.NewRequest("GET", queryUrl.String(), nil)
+	if err != nil {
+		return nil, err
+	}
+	return client.dispatchRequest(request)
+}
+
+// writeMultiPartFiles writes the given files as parts of a multipart message
+// using the given writer.
+func writeMultiPartFiles(writer *multipart.Writer, files map[string][]byte) error {
+	for fileName, fileContent := range files {
+
+		fw, err := writer.CreateFormFile(fileName, fileName)
+		if err != nil {
+			return err
+		}
+		io.Copy(fw, bytes.NewBuffer(fileContent))
+	}
+	return nil
+}
+
+// writeMultiPartParams writes the given parameters as parts of a multipart
+// message using the given writer.
+func writeMultiPartParams(writer *multipart.Writer, parameters url.Values) error {
+	for key, values := range parameters {
+		for _, value := range values {
+			fw, err := writer.CreateFormField(key)
+			if err != nil {
+				return err
+			}
+			buffer := bytes.NewBufferString(value)
+			io.Copy(fw, buffer)
+		}
+	}
+	return nil
+
+}
+
+// nonIdempotentRequestFiles implements the common functionality of PUT and
+// POST requests (but not GET or DELETE requests) when uploading files is
+// needed.
+func (client Client) nonIdempotentRequestFiles(method string, uri *url.URL, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	buf := new(bytes.Buffer)
+	writer := multipart.NewWriter(buf)
+	err := writeMultiPartFiles(writer, files)
+	if err != nil {
+		return nil, err
+	}
+	err = writeMultiPartParams(writer, parameters)
+	if err != nil {
+		return nil, err
+	}
+	writer.Close()
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), buf)
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", writer.FormDataContentType())
+	return client.dispatchRequest(request)
+
+}
+
+// nonIdempotentRequest implements the common functionality of PUT and POST
+// requests (but not GET or DELETE requests).
+func (client Client) nonIdempotentRequest(method string, uri *url.URL, parameters url.Values) ([]byte, error) {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest(method, url.String(), strings.NewReader(string(parameters.Encode())))
+	if err != nil {
+		return nil, err
+	}
+	request.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	return client.dispatchRequest(request)
+}
+
+// Post performs an HTTP "POST" to the API.  This may be either an API method
+// invocation (if you pass its name in "operation") or plain resource
+// retrieval (if you leave "operation" blank).
+func (client Client) Post(uri *url.URL, operation string, parameters url.Values, files map[string][]byte) ([]byte, error) {
+	queryParams := url.Values{"op": {operation}}
+	uri.RawQuery = queryParams.Encode()
+	if files != nil {
+		return client.nonIdempotentRequestFiles("POST", uri, parameters, files)
+	}
+	return client.nonIdempotentRequest("POST", uri, parameters)
+}
+
+// Put updates an object on the API, using an HTTP "PUT" request.
+func (client Client) Put(uri *url.URL, parameters url.Values) ([]byte, error) {
+	return client.nonIdempotentRequest("PUT", uri, parameters)
+}
+
+// Delete deletes an object on the API, using an HTTP "DELETE" request.
+func (client Client) Delete(uri *url.URL) error {
+	url := client.GetURL(uri)
+	request, err := http.NewRequest("DELETE", url.String(), strings.NewReader(""))
+	if err != nil {
+		return err
+	}
+	_, err = client.dispatchRequest(request)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// Anonymous "signature method" implementation.
+type anonSigner struct{}
+
+func (signer anonSigner) OAuthSign(request *http.Request) error {
+	return nil
+}
+
+// *anonSigner implements the OAuthSigner interface.
+var _ OAuthSigner = anonSigner{}
+
+func composeAPIURL(BaseURL string, apiVersion string) (*url.URL, error) {
+	baseurl := EnsureTrailingSlash(BaseURL)
+	apiurl := fmt.Sprintf("%sapi/%s/", baseurl, apiVersion)
+	return url.Parse(apiurl)
+}
+
+// NewAnonymousClient creates a client that issues anonymous requests.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAnonymousClient(BaseURL string, apiVersion string) (*Client, error) {
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: &anonSigner{}, APIURL: parsedBaseURL}, nil
+}
+
+// NewAuthenticatedClient parses the given MAAS API key into the individual
+// OAuth tokens and creates an Client that will use these tokens to sign the
+// requests it issues.
+// BaseURL should refer to the root of the MAAS server path, e.g.
+// http://my.maas.server.example.com/MAAS/
+// apiVersion should contain the version of the MAAS API that you want to use.
+func NewAuthenticatedClient(BaseURL string, apiKey string, apiVersion string) (*Client, error) {
+	elements := strings.Split(apiKey, ":")
+	if len(elements) != 3 {
+		errString := fmt.Sprintf("invalid API key %q; expected \"<consumer secret>:<token key>:<token secret>\"", apiKey)
+		return nil, errors.NewNotValid(nil, errString)
+	}
+	token := &OAuthToken{
+		ConsumerKey: elements[0],
+		// The consumer secret is the empty string in MAAS' authentication.
+		ConsumerSecret: "",
+		TokenKey:       elements[1],
+		TokenSecret:    elements[2],
+	}
+	signer, err := NewPlainTestOAuthSigner(token, "MAAS API")
+	if err != nil {
+		return nil, err
+	}
+	parsedBaseURL, err := composeAPIURL(BaseURL, apiVersion)
+	if err != nil {
+		return nil, err
+	}
+	return &Client{Signer: signer, APIURL: parsedBaseURL}, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/controller.go b/automation/vendor/github.com/juju/gomaasapi/controller.go
new file mode 100644
index 0000000..3c729c2
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/controller.go
@@ -0,0 +1,890 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"path"
+	"strings"
+	"sync/atomic"
+
+	"github.com/juju/errors"
+	"github.com/juju/loggo"
+	"github.com/juju/schema"
+	"github.com/juju/utils/set"
+	"github.com/juju/version"
+)
+
+var (
+	logger = loggo.GetLogger("maas")
+
+	// The supported versions should be ordered from most desirable version to
+	// least as they will be tried in order.
+	supportedAPIVersions = []string{"2.0"}
+
+	// Each of the api versions that change the request or response structure
+	// for any given call should have a value defined for easy definition of
+	// the deserialization functions.
+	twoDotOh = version.Number{Major: 2, Minor: 0}
+
+	// Current request number. Informational only for logging.
+	requestNumber int64
+)
+
+// ControllerArgs is an argument struct for passing the required parameters
+// to the NewController method.
+type ControllerArgs struct {
+	BaseURL string
+	APIKey  string
+}
+
+// NewController creates an authenticated client to the MAAS API, and checks
+// the capabilities of the server.
+//
+// If the APIKey is not valid, a NotValid error is returned.
+// If the credentials are incorrect, a PermissionError is returned.
+func NewController(args ControllerArgs) (Controller, error) {
+	// For now we don't need to test multiple versions. It is expected that at
+	// some time in the future, we will try the most up to date version and then
+	// work our way backwards.
+	for _, apiVersion := range supportedAPIVersions {
+		major, minor, err := version.ParseMajorMinor(apiVersion)
+		// We should not get an error here. See the test.
+		if err != nil {
+			return nil, errors.Errorf("bad version defined in supported versions: %q", apiVersion)
+		}
+		client, err := NewAuthenticatedClient(args.BaseURL, args.APIKey, apiVersion)
+		if err != nil {
+			// If the credentials aren't valid, return now.
+			if errors.IsNotValid(err) {
+				return nil, errors.Trace(err)
+			}
+			// Any other error attempting to create the authenticated client
+			// is an unexpected error and return now.
+			return nil, NewUnexpectedError(err)
+		}
+		controllerVersion := version.Number{
+			Major: major,
+			Minor: minor,
+		}
+		controller := &controller{client: client}
+		// The controllerVersion returned from the function will include any patch version.
+		controller.capabilities, controller.apiVersion, err = controller.readAPIVersion(controllerVersion)
+		if err != nil {
+			logger.Debugf("read version failed: %#v", err)
+			continue
+		}
+
+		if err := controller.checkCreds(); err != nil {
+			return nil, errors.Trace(err)
+		}
+		return controller, nil
+	}
+
+	return nil, NewUnsupportedVersionError("controller at %s does not support any of %s", args.BaseURL, supportedAPIVersions)
+}
+
+type controller struct {
+	client       *Client
+	apiVersion   version.Number
+	capabilities set.Strings
+}
+
+// Capabilities implements Controller.
+func (c *controller) Capabilities() set.Strings {
+	return c.capabilities
+}
+
+// BootResources implements Controller.
+func (c *controller) BootResources() ([]BootResource, error) {
+	source, err := c.get("boot-resources")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	resources, err := readBootResources(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []BootResource
+	for _, r := range resources {
+		result = append(result, r)
+	}
+	return result, nil
+}
+
+// Fabrics implements Controller.
+func (c *controller) Fabrics() ([]Fabric, error) {
+	source, err := c.get("fabrics")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	fabrics, err := readFabrics(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Fabric
+	for _, f := range fabrics {
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// Spaces implements Controller.
+func (c *controller) Spaces() ([]Space, error) {
+	source, err := c.get("spaces")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	spaces, err := readSpaces(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Space
+	for _, space := range spaces {
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+// Zones implements Controller.
+func (c *controller) Zones() ([]Zone, error) {
+	source, err := c.get("zones")
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	zones, err := readZones(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Zone
+	for _, z := range zones {
+		result = append(result, z)
+	}
+	return result, nil
+}
+
+// DevicesArgs is a argument struct for selecting Devices.
+// Only devices that match the specified criteria are returned.
+type DevicesArgs struct {
+	Hostname     []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+}
+
+// Devices implements Controller.
+func (c *controller) Devices(args DevicesArgs) ([]Device, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostname)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	source, err := c.getQuery("devices", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	devices, err := readDevices(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, d := range devices {
+		d.controller = c
+		result = append(result, d)
+	}
+	return result, nil
+}
+
+// CreateDeviceArgs is a argument struct for passing information into CreateDevice.
+type CreateDeviceArgs struct {
+	Hostname     string
+	MACAddresses []string
+	Domain       string
+	Parent       string
+}
+
+// Devices implements Controller.
+func (c *controller) CreateDevice(args CreateDeviceArgs) (Device, error) {
+	// There must be at least one mac address.
+	if len(args.MACAddresses) == 0 {
+		return nil, NewBadRequestError("at least one MAC address must be specified")
+	}
+	params := NewURLParams()
+	params.MaybeAdd("hostname", args.Hostname)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAddMany("mac_addresses", args.MACAddresses)
+	params.MaybeAdd("parent", args.Parent)
+	result, err := c.post("devices", "", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, NewUnexpectedError(err)
+	}
+
+	device, err := readDevice(c.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	device.controller = c
+	return device, nil
+}
+
+// MachinesArgs is a argument struct for selecting Machines.
+// Only machines that match the specified criteria are returned.
+type MachinesArgs struct {
+	Hostnames    []string
+	MACAddresses []string
+	SystemIDs    []string
+	Domain       string
+	Zone         string
+	AgentName    string
+	OwnerData    map[string]string
+}
+
+// Machines implements Controller.
+func (c *controller) Machines(args MachinesArgs) ([]Machine, error) {
+	params := NewURLParams()
+	params.MaybeAddMany("hostname", args.Hostnames)
+	params.MaybeAddMany("mac_address", args.MACAddresses)
+	params.MaybeAddMany("id", args.SystemIDs)
+	params.MaybeAdd("domain", args.Domain)
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	// At the moment the MAAS API doesn't support filtering by owner
+	// data so we do that ourselves below.
+	source, err := c.getQuery("machines", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	machines, err := readMachines(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Machine
+	for _, m := range machines {
+		m.controller = c
+		if ownerDataMatches(m.ownerData, args.OwnerData) {
+			result = append(result, m)
+		}
+	}
+	return result, nil
+}
+
+func ownerDataMatches(ownerData, filter map[string]string) bool {
+	for key, value := range filter {
+		if ownerData[key] != value {
+			return false
+		}
+	}
+	return true
+}
+
+// StorageSpec represents one element of storage constraints necessary
+// to be satisfied to allocate a machine.
+type StorageSpec struct {
+	// Label is optional and an arbitrary string. Labels need to be unique
+	// across the StorageSpec elements specified in the AllocateMachineArgs.
+	Label string
+	// Size is required and refers to the required minimum size in GB.
+	Size int
+	// Zero or more tags assocated to with the disks.
+	Tags []string
+}
+
+// Validate ensures that there is a positive size and that there are no Empty
+// tag values.
+func (s *StorageSpec) Validate() error {
+	if s.Size <= 0 {
+		return errors.NotValidf("Size value %d", s.Size)
+	}
+	for _, v := range s.Tags {
+		if v == "" {
+			return errors.NotValidf("empty tag")
+		}
+	}
+	return nil
+}
+
+// String returns the string representation of the storage spec.
+func (s *StorageSpec) String() string {
+	label := s.Label
+	if label != "" {
+		label += ":"
+	}
+	tags := strings.Join(s.Tags, ",")
+	if tags != "" {
+		tags = "(" + tags + ")"
+	}
+	return fmt.Sprintf("%s%d%s", label, s.Size, tags)
+}
+
+// InterfaceSpec represents one elemenet of network related constraints.
+type InterfaceSpec struct {
+	// Label is required and an arbitrary string. Labels need to be unique
+	// across the InterfaceSpec elements specified in the AllocateMachineArgs.
+	// The label is returned in the ConstraintMatches response from
+	// AllocateMachine.
+	Label string
+	Space string
+
+	// NOTE: there are other interface spec values that we are not exposing at
+	// this stage that can be added on an as needed basis. Other possible values are:
+	//     'fabric_class', 'not_fabric_class',
+	//     'subnet_cidr', 'not_subnet_cidr',
+	//     'vid', 'not_vid',
+	//     'fabric', 'not_fabric',
+	//     'subnet', 'not_subnet',
+	//     'mode'
+}
+
+// Validate ensures that a Label is specified and that there is at least one
+// Space or NotSpace value set.
+func (a *InterfaceSpec) Validate() error {
+	if a.Label == "" {
+		return errors.NotValidf("missing Label")
+	}
+	// Perhaps at some stage in the future there will be other possible specs
+	// supported (like vid, subnet, etc), but until then, just space to check.
+	if a.Space == "" {
+		return errors.NotValidf("empty Space constraint")
+	}
+	return nil
+}
+
+// String returns the interface spec as MaaS requires it.
+func (a *InterfaceSpec) String() string {
+	return fmt.Sprintf("%s:space=%s", a.Label, a.Space)
+}
+
+// AllocateMachineArgs is an argument struct for passing args into Machine.Allocate.
+type AllocateMachineArgs struct {
+	Hostname     string
+	Architecture string
+	MinCPUCount  int
+	// MinMemory represented in MB.
+	MinMemory int
+	Tags      []string
+	NotTags   []string
+	Zone      string
+	NotInZone []string
+	// Storage represents the required disks on the Machine. If any are specified
+	// the first value is used for the root disk.
+	Storage []StorageSpec
+	// Interfaces represents a number of required interfaces on the machine.
+	// Each InterfaceSpec relates to an individual network interface.
+	Interfaces []InterfaceSpec
+	// NotSpace is a machine level constraint, and applies to the entire machine
+	// rather than specific interfaces.
+	NotSpace  []string
+	AgentName string
+	Comment   string
+	DryRun    bool
+}
+
+// Validate makes sure that any labels specifed in Storage or Interfaces
+// are unique, and that the required specifications are valid.
+func (a *AllocateMachineArgs) Validate() error {
+	storageLabels := set.NewStrings()
+	for _, spec := range a.Storage {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Storage")
+		}
+		if spec.Label != "" {
+			if storageLabels.Contains(spec.Label) {
+				return errors.NotValidf("reusing storage label %q", spec.Label)
+			}
+			storageLabels.Add(spec.Label)
+		}
+	}
+	interfaceLabels := set.NewStrings()
+	for _, spec := range a.Interfaces {
+		if err := spec.Validate(); err != nil {
+			return errors.Annotate(err, "Interfaces")
+		}
+		if interfaceLabels.Contains(spec.Label) {
+			return errors.NotValidf("reusing interface label %q", spec.Label)
+		}
+		interfaceLabels.Add(spec.Label)
+	}
+	for _, v := range a.NotSpace {
+		if v == "" {
+			return errors.NotValidf("empty NotSpace constraint")
+		}
+	}
+	return nil
+}
+
+func (a *AllocateMachineArgs) storage() string {
+	var values []string
+	for _, spec := range a.Storage {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ",")
+}
+
+func (a *AllocateMachineArgs) interfaces() string {
+	var values []string
+	for _, spec := range a.Interfaces {
+		values = append(values, spec.String())
+	}
+	return strings.Join(values, ";")
+}
+
+func (a *AllocateMachineArgs) notSubnets() []string {
+	var values []string
+	for _, v := range a.NotSpace {
+		values = append(values, "space:"+v)
+	}
+	return values
+}
+
+// ConstraintMatches provides a way for the caller of AllocateMachine to determine
+//.how the allocated machine matched the storage and interfaces constraints specified.
+// The labels that were used in the constraints are the keys in the maps.
+type ConstraintMatches struct {
+	// Interface is a mapping of the constraint label specified to the Interfaces
+	// that match that constraint.
+	Interfaces map[string][]Interface
+
+	// Storage is a mapping of the constraint label specified to the BlockDevices
+	// that match that constraint.
+	Storage map[string][]BlockDevice
+}
+
+// AllocateMachine implements Controller.
+//
+// Returns an error that satisfies IsNoMatchError if the requested
+// constraints cannot be met.
+func (c *controller) AllocateMachine(args AllocateMachineArgs) (Machine, ConstraintMatches, error) {
+	var matches ConstraintMatches
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Hostname)
+	params.MaybeAdd("arch", args.Architecture)
+	params.MaybeAddInt("cpu_count", args.MinCPUCount)
+	params.MaybeAddInt("mem", args.MinMemory)
+	params.MaybeAddMany("tags", args.Tags)
+	params.MaybeAddMany("not_tags", args.NotTags)
+	params.MaybeAdd("storage", args.storage())
+	params.MaybeAdd("interfaces", args.interfaces())
+	params.MaybeAddMany("not_subnets", args.notSubnets())
+	params.MaybeAdd("zone", args.Zone)
+	params.MaybeAddMany("not_in_zone", args.NotInZone)
+	params.MaybeAdd("agent_name", args.AgentName)
+	params.MaybeAdd("comment", args.Comment)
+	params.MaybeAddBool("dry_run", args.DryRun)
+	result, err := c.post("machines", "allocate", params.Values)
+	if err != nil {
+		// A 409 Status code is "No Matching Machines"
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusConflict {
+				return nil, matches, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		// Translate http errors.
+		return nil, matches, NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(c.apiVersion, result)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+	machine.controller = c
+
+	// Parse the constraint matches.
+	matches, err = parseAllocateConstraintsResponse(result, machine)
+	if err != nil {
+		return nil, matches, errors.Trace(err)
+	}
+
+	return machine, matches, nil
+}
+
+// ReleaseMachinesArgs is an argument struct for passing the machine system IDs
+// and an optional comment into the ReleaseMachines method.
+type ReleaseMachinesArgs struct {
+	SystemIDs []string
+	Comment   string
+}
+
+// ReleaseMachines implements Controller.
+//
+// Release multiple machines at once. Returns
+//  - BadRequestError if any of the machines cannot be found
+//  - PermissionError if the user does not have permission to release any of the machines
+//  - CannotCompleteError if any of the machines could not be released due to their current state
+func (c *controller) ReleaseMachines(args ReleaseMachinesArgs) error {
+	params := NewURLParams()
+	params.MaybeAddMany("machines", args.SystemIDs)
+	params.MaybeAdd("comment", args.Comment)
+	_, err := c.post("machines", "release", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusConflict:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	return nil
+}
+
+// Files implements Controller.
+func (c *controller) Files(prefix string) ([]File, error) {
+	params := NewURLParams()
+	params.MaybeAdd("prefix", prefix)
+	source, err := c.getQuery("files", params.Values)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	files, err := readFiles(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []File
+	for _, f := range files {
+		f.controller = c
+		result = append(result, f)
+	}
+	return result, nil
+}
+
+// GetFile implements Controller.
+func (c *controller) GetFile(filename string) (File, error) {
+	if filename == "" {
+		return nil, errors.NotValidf("missing filename")
+	}
+	source, err := c.get("files/" + filename)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusNotFound {
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	file, err := readFile(c.apiVersion, source)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	file.controller = c
+	return file, nil
+}
+
+// AddFileArgs is a argument struct for passing information into AddFile.
+// One of Content or (Reader, Length) must be specified.
+type AddFileArgs struct {
+	Filename string
+	Content  []byte
+	Reader   io.Reader
+	Length   int64
+}
+
+// Validate checks to make sure the filename has no slashes, and that one of
+// Content or (Reader, Length) is specified.
+func (a *AddFileArgs) Validate() error {
+	dir, _ := path.Split(a.Filename)
+	if dir != "" {
+		return errors.NotValidf("paths in Filename %q", a.Filename)
+	}
+	if a.Filename == "" {
+		return errors.NotValidf("missing Filename")
+	}
+	if a.Content == nil {
+		if a.Reader == nil {
+			return errors.NotValidf("missing Content or Reader")
+		}
+		if a.Length == 0 {
+			return errors.NotValidf("missing Length")
+		}
+	} else {
+		if a.Reader != nil {
+			return errors.NotValidf("specifying Content and Reader")
+		}
+		if a.Length != 0 {
+			return errors.NotValidf("specifying Length and Content")
+		}
+	}
+	return nil
+}
+
+// AddFile implements Controller.
+func (c *controller) AddFile(args AddFileArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	fileContent := args.Content
+	if fileContent == nil {
+		content, err := ioutil.ReadAll(io.LimitReader(args.Reader, args.Length))
+		if err != nil {
+			return errors.Annotatef(err, "cannot read file content")
+		}
+		fileContent = content
+	}
+	params := url.Values{"filename": {args.Filename}}
+	_, err := c.postFile("files", "", params, fileContent)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusBadRequest {
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) checkCreds() error {
+	if _, err := c.getOp("users", "whoami"); err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			if svrErr.StatusCode == http.StatusUnauthorized {
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func (c *controller) put(path string, params url.Values) (interface{}, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: PUT %s%s, params: %s", requestID, c.client.APIURL, path, params.Encode())
+	bytes, err := c.client.Put(&url.URL{Path: path}, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) post(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._postRaw(path, op, params, nil)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) postFile(path, op string, params url.Values, fileContent []byte) (interface{}, error) {
+	// Only one file is ever sent at a time.
+	files := map[string][]byte{"file": fileContent}
+	return c._postRaw(path, op, params, files)
+}
+
+func (c *controller) _postRaw(path, op string, params url.Values, files map[string][]byte) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		opArg := ""
+		if op != "" {
+			opArg = "?op=" + op
+		}
+		logger.Tracef("request %x: POST %s%s%s, params=%s", requestID, c.client.APIURL, path, opArg, params.Encode())
+	}
+	bytes, err := c.client.Post(&url.URL{Path: path}, op, params, files)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func (c *controller) delete(path string) error {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	logger.Tracef("request %x: DELETE %s%s", requestID, c.client.APIURL, path)
+	err := c.client.Delete(&url.URL{Path: path})
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return errors.Trace(err)
+	}
+	logger.Tracef("response %x: complete", requestID)
+	return nil
+}
+
+func (c *controller) getQuery(path string, params url.Values) (interface{}, error) {
+	return c._get(path, "", params)
+}
+
+func (c *controller) get(path string) (interface{}, error) {
+	return c._get(path, "", nil)
+}
+
+func (c *controller) getOp(path, op string) (interface{}, error) {
+	return c._get(path, op, nil)
+}
+
+func (c *controller) _get(path, op string, params url.Values) (interface{}, error) {
+	bytes, err := c._getRaw(path, op, params)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var parsed interface{}
+	err = json.Unmarshal(bytes, &parsed)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	return parsed, nil
+}
+
+func (c *controller) _getRaw(path, op string, params url.Values) ([]byte, error) {
+	path = EnsureTrailingSlash(path)
+	requestID := nextRequestID()
+	if logger.IsTraceEnabled() {
+		var query string
+		if params != nil {
+			query = "?" + params.Encode()
+		}
+		logger.Tracef("request %x: GET %s%s%s", requestID, c.client.APIURL, path, query)
+	}
+	bytes, err := c.client.Get(&url.URL{Path: path}, op, params)
+	if err != nil {
+		logger.Tracef("response %x: error: %q", requestID, err.Error())
+		logger.Tracef("error detail: %#v", err)
+		return nil, errors.Trace(err)
+	}
+	logger.Tracef("response %x: %s", requestID, string(bytes))
+	return bytes, nil
+}
+
+func nextRequestID() int64 {
+	return atomic.AddInt64(&requestNumber, 1)
+}
+
+func (c *controller) readAPIVersion(apiVersion version.Number) (set.Strings, version.Number, error) {
+	parsed, err := c.get("version")
+	if err != nil {
+		return nil, apiVersion, errors.Trace(err)
+	}
+
+	// As we care about other fields, add them.
+	fields := schema.Fields{
+		"capabilities": schema.List(schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(parsed, nil)
+	if err != nil {
+		return nil, apiVersion, WrapWithDeserializationError(err, "version response")
+	}
+	// For now, we don't append any subversion, but as it becomes used, we
+	// should parse and check.
+
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	capabilities := set.NewStrings()
+	capabilityValues := valid["capabilities"].([]interface{})
+	for _, value := range capabilityValues {
+		capabilities.Add(value.(string))
+	}
+
+	return capabilities, apiVersion, nil
+}
+
+func parseAllocateConstraintsResponse(source interface{}, machine *machine) (ConstraintMatches, error) {
+	var empty ConstraintMatches
+	matchFields := schema.Fields{
+		"storage":    schema.StringMap(schema.List(schema.ForceInt())),
+		"interfaces": schema.StringMap(schema.List(schema.ForceInt())),
+	}
+	matchDefaults := schema.Defaults{
+		"storage":    schema.Omit,
+		"interfaces": schema.Omit,
+	}
+	fields := schema.Fields{
+		"constraints_by_type": schema.FieldMap(matchFields, matchDefaults),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return empty, WrapWithDeserializationError(err, "allocation constraints response schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	constraintsMap := valid["constraints_by_type"].(map[string]interface{})
+	result := ConstraintMatches{
+		Interfaces: make(map[string][]Interface),
+		Storage:    make(map[string][]BlockDevice),
+	}
+
+	if interfaceMatches, found := constraintsMap["interfaces"]; found {
+		matches := convertConstraintMatches(interfaceMatches)
+		for label, ids := range matches {
+			interfaces := make([]Interface, len(ids))
+			for index, id := range ids {
+				iface := machine.Interface(id)
+				if iface == nil {
+					return empty, NewDeserializationError("constraint match interface %q: %d does not match an interface for the machine", label, id)
+				}
+				interfaces[index] = iface
+			}
+			result.Interfaces[label] = interfaces
+		}
+	}
+
+	if storageMatches, found := constraintsMap["storage"]; found {
+		matches := convertConstraintMatches(storageMatches)
+		for label, ids := range matches {
+			blockDevices := make([]BlockDevice, len(ids))
+			for index, id := range ids {
+				blockDevice := machine.PhysicalBlockDevice(id)
+				if blockDevice == nil {
+					return empty, NewDeserializationError("constraint match storage %q: %d does not match a physical block device for the machine", label, id)
+				}
+				blockDevices[index] = blockDevice
+			}
+			result.Storage[label] = blockDevices
+		}
+	}
+	return result, nil
+}
+
+func convertConstraintMatches(source interface{}) map[string][]int {
+	// These casts are all safe because of the schema check.
+	result := make(map[string][]int)
+	matchMap := source.(map[string]interface{})
+	for label, values := range matchMap {
+		items := values.([]interface{})
+		result[label] = make([]int, len(items))
+		for index, value := range items {
+			result[label][index] = value.(int)
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv b/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv
new file mode 100644
index 0000000..4cd966f
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/dependencies.tsv
@@ -0,0 +1,11 @@
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/loggo	git	8477fc936adf0e382d680310047ca27e128a309a	2015-05-27T03:58:39Z
+github.com/juju/names	git	8a0aa0963bbacdc790914892e9ff942e94d6f795	2016-03-30T15:05:33Z
+github.com/juju/schema	git	075de04f9b7d7580d60a1e12a0b3f50bb18e6998	2016-04-20T04:42:03Z
+github.com/juju/testing	git	162fafccebf20a4207ab93d63b986c230e3f4d2e	2016-04-04T09:43:17Z
+github.com/juju/utils	git	eb6cb958762135bb61aed1e0951f657c674d427f	2016-04-11T02:40:59Z
+github.com/juju/version	git	ef897ad7f130870348ce306f61332f5335355063	2015-11-27T20:34:00Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/automation/vendor/github.com/juju/gomaasapi/device.go b/automation/vendor/github.com/juju/gomaasapi/device.go
new file mode 100644
index 0000000..7c9bc70
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/device.go
@@ -0,0 +1,293 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type device struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID string
+	hostname string
+	fqdn     string
+
+	parent string
+	owner  string
+
+	ipAddresses  []string
+	interfaceSet []*interface_
+	zone         *zone
+}
+
+// SystemID implements Device.
+func (d *device) SystemID() string {
+	return d.systemID
+}
+
+// Hostname implements Device.
+func (d *device) Hostname() string {
+	return d.hostname
+}
+
+// FQDN implements Device.
+func (d *device) FQDN() string {
+	return d.fqdn
+}
+
+// Parent implements Device.
+func (d *device) Parent() string {
+	return d.parent
+}
+
+// Owner implements Device.
+func (d *device) Owner() string {
+	return d.owner
+}
+
+// IPAddresses implements Device.
+func (d *device) IPAddresses() []string {
+	return d.ipAddresses
+}
+
+// Zone implements Device.
+func (d *device) Zone() Zone {
+	if d.zone == nil {
+		return nil
+	}
+	return d.zone
+}
+
+// InterfaceSet implements Device.
+func (d *device) InterfaceSet() []Interface {
+	result := make([]Interface, len(d.interfaceSet))
+	for i, v := range d.interfaceSet {
+		v.controller = d.controller
+		result[i] = v
+	}
+	return result
+}
+
+// CreateInterfaceArgs is an argument struct for passing parameters to
+// the Machine.CreateInterface method.
+type CreateInterfaceArgs struct {
+	// Name of the interface (required).
+	Name string
+	// MACAddress is the MAC address of the interface (required).
+	MACAddress string
+	// VLAN is the untagged VLAN the interface is connected to (required).
+	VLAN VLAN
+	// Tags to attach to the interface (optional).
+	Tags []string
+	// MTU - Maximum transmission unit. (optional)
+	MTU int
+	// AcceptRA - Accept router advertisements. (IPv6 only)
+	AcceptRA bool
+	// Autoconf - Perform stateless autoconfiguration. (IPv6 only)
+	Autoconf bool
+}
+
+// Validate checks the required fields are set for the arg structure.
+func (a *CreateInterfaceArgs) Validate() error {
+	if a.Name == "" {
+		return errors.NotValidf("missing Name")
+	}
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+	if a.VLAN == nil {
+		return errors.NotValidf("missing VLAN")
+	}
+	return nil
+}
+
+// interfacesURI used to add interfaces for this device. The operations
+// are on the nodes endpoint, not devices.
+func (d *device) interfacesURI() string {
+	return strings.Replace(d.resourceURI, "devices", "nodes", 1) + "interfaces/"
+}
+
+// CreateInterface implements Device.
+func (d *device) CreateInterface(args CreateInterfaceArgs) (Interface, error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("name", args.Name)
+	params.Values.Add("mac_address", args.MACAddress)
+	params.Values.Add("vlan", fmt.Sprint(args.VLAN.ID()))
+	params.MaybeAdd("tags", strings.Join(args.Tags, ","))
+	params.MaybeAddInt("mtu", args.MTU)
+	params.MaybeAddBool("accept_ra", args.AcceptRA)
+	params.MaybeAddBool("autoconf", args.Autoconf)
+	result, err := d.controller.post(d.interfacesURI(), "create_physical", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return nil, errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return nil, errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+
+	iface, err := readInterface(d.controller.apiVersion, result)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	iface.controller = d.controller
+
+	// TODO: add to the interfaces for the device when the interfaces are returned.
+	// lp:bug 1567213.
+	return iface, nil
+}
+
+// Delete implements Device.
+func (d *device) Delete() error {
+	err := d.controller.delete(d.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+func readDevice(controllerVersion version.Number, source interface{}) (*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readDevices(controllerVersion version.Number, source interface{}) ([]*device, error) {
+	readFunc, err := getDeviceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readDeviceList(valid, readFunc)
+}
+
+func getDeviceDeserializationFunc(controllerVersion version.Number) (deviceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range deviceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no device read func for version %s", controllerVersion)
+	}
+	return deviceDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readDeviceList expects the values of the sourceList to be string maps.
+func readDeviceList(sourceList []interface{}, readFunc deviceDeserializationFunc) ([]*device, error) {
+	result := make([]*device, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for device %d, %T", i, value)
+		}
+		device, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "device %d", i)
+		}
+		result = append(result, device)
+	}
+	return result, nil
+}
+
+type deviceDeserializationFunc func(map[string]interface{}) (*device, error)
+
+var deviceDeserializationFuncs = map[version.Number]deviceDeserializationFunc{
+	twoDotOh: device_2_0,
+}
+
+func device_2_0(source map[string]interface{}) (*device, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id": schema.String(),
+		"hostname":  schema.String(),
+		"fqdn":      schema.String(),
+		"parent":    schema.OneOf(schema.Nil(""), schema.String()),
+		"owner":     schema.OneOf(schema.Nil(""), schema.String()),
+
+		"ip_addresses":  schema.List(schema.String()),
+		"interface_set": schema.List(schema.StringMap(schema.Any())),
+		"zone":          schema.StringMap(schema.Any()),
+	}
+	defaults := schema.Defaults{
+		"owner":  "",
+		"parent": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "device 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	owner, _ := valid["owner"].(string)
+	parent, _ := valid["parent"].(string)
+	result := &device{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID: valid["system_id"].(string),
+		hostname: valid["hostname"].(string),
+		fqdn:     valid["fqdn"].(string),
+		parent:   parent,
+		owner:    owner,
+
+		ipAddresses:  convertToStringSlice(valid["ip_addresses"]),
+		interfaceSet: interfaceSet,
+		zone:         zone,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/enum.go b/automation/vendor/github.com/juju/gomaasapi/enum.go
new file mode 100644
index 0000000..a516d6b
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/enum.go
@@ -0,0 +1,57 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+const (
+	// NodeStatus* values represent the vocabulary of a Node‘s possible statuses.
+
+	// The node has been created and has a system ID assigned to it.
+	NodeStatusDeclared = "0"
+
+	//Testing and other commissioning steps are taking place.
+	NodeStatusCommissioning = "1"
+
+	// Smoke or burn-in testing has a found a problem.
+	NodeStatusFailedTests = "2"
+
+	// The node can’t be contacted.
+	NodeStatusMissing = "3"
+
+	// The node is in the general pool ready to be deployed.
+	NodeStatusReady = "4"
+
+	// The node is ready for named deployment.
+	NodeStatusReserved = "5"
+
+	// The node is powering a service from a charm or is ready for use with a fresh Ubuntu install.
+	NodeStatusDeployed = "6"
+
+	// The node has been removed from service manually until an admin overrides the retirement.
+	NodeStatusRetired = "7"
+
+	// The node is broken: a step in the node lifecyle failed. More details
+	// can be found in the node's event log.
+	NodeStatusBroken = "8"
+
+	// The node is being installed.
+	NodeStatusDeploying = "9"
+
+	// The node has been allocated to a user and is ready for deployment.
+	NodeStatusAllocated = "10"
+
+	// The deployment of the node failed.
+	NodeStatusFailedDeployment = "11"
+
+	// The node is powering down after a release request.
+	NodeStatusReleasing = "12"
+
+	// The releasing of the node failed.
+	NodeStatusFailedReleasing = "13"
+
+	// The node is erasing its disks.
+	NodeStatusDiskErasing = "14"
+
+	// The node failed to erase its disks.
+	NodeStatusFailedDiskErasing = "15"
+)
diff --git a/automation/vendor/github.com/juju/gomaasapi/errors.go b/automation/vendor/github.com/juju/gomaasapi/errors.go
new file mode 100644
index 0000000..8931d56
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/errors.go
@@ -0,0 +1,161 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+
+	"github.com/juju/errors"
+)
+
+// NoMatchError is returned when the requested action cannot be performed
+// due to being unable to service due to no entities available that match the
+// request.
+type NoMatchError struct {
+	errors.Err
+}
+
+// NewNoMatchError constructs a new NoMatchError and sets the location.
+func NewNoMatchError(message string) error {
+	err := &NoMatchError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsNoMatchError returns true if err is a NoMatchError.
+func IsNoMatchError(err error) bool {
+	_, ok := errors.Cause(err).(*NoMatchError)
+	return ok
+}
+
+// UnexpectedError is an error for a condition that hasn't been determined.
+type UnexpectedError struct {
+	errors.Err
+}
+
+// NewUnexpectedError constructs a new UnexpectedError and sets the location.
+func NewUnexpectedError(err error) error {
+	uerr := &UnexpectedError{Err: errors.NewErr("unexpected: %v", err)}
+	uerr.SetLocation(1)
+	return errors.Wrap(err, uerr)
+}
+
+// IsUnexpectedError returns true if err is an UnexpectedError.
+func IsUnexpectedError(err error) bool {
+	_, ok := errors.Cause(err).(*UnexpectedError)
+	return ok
+}
+
+// UnsupportedVersionError refers to calls made to an unsupported api version.
+type UnsupportedVersionError struct {
+	errors.Err
+}
+
+// NewUnsupportedVersionError constructs a new UnsupportedVersionError and sets the location.
+func NewUnsupportedVersionError(format string, args ...interface{}) error {
+	err := &UnsupportedVersionError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsUnsupportedVersionError returns true if err is an UnsupportedVersionError.
+func IsUnsupportedVersionError(err error) bool {
+	_, ok := errors.Cause(err).(*UnsupportedVersionError)
+	return ok
+}
+
+// DeserializationError types are returned when the returned JSON data from
+// the controller doesn't match the code's expectations.
+type DeserializationError struct {
+	errors.Err
+}
+
+// NewDeserializationError constructs a new DeserializationError and sets the location.
+func NewDeserializationError(format string, args ...interface{}) error {
+	err := &DeserializationError{Err: errors.NewErr(format, args...)}
+	err.SetLocation(1)
+	return err
+}
+
+// WrapWithDeserializationError constructs a new DeserializationError with the
+// specified message, and sets the location and returns a new error with the
+// full error stack set including the error passed in.
+func WrapWithDeserializationError(err error, format string, args ...interface{}) error {
+	message := fmt.Sprintf(format, args...)
+	// We want the deserialization error message to include the error text of the
+	// previous error, but wrap it in the new type.
+	derr := &DeserializationError{Err: errors.NewErr(message + ": " + err.Error())}
+	derr.SetLocation(1)
+	wrapped := errors.Wrap(err, derr)
+	// We want the location of the wrapped error to be the caller of this function,
+	// not the line above.
+	if errType, ok := wrapped.(*errors.Err); ok {
+		// We know it is because that is what Wrap returns.
+		errType.SetLocation(1)
+	}
+	return wrapped
+}
+
+// IsDeserializationError returns true if err is a DeserializationError.
+func IsDeserializationError(err error) bool {
+	_, ok := errors.Cause(err).(*DeserializationError)
+	return ok
+}
+
+// BadRequestError is returned when the requested action cannot be performed
+// due to bad or incorrect parameters passed to the server.
+type BadRequestError struct {
+	errors.Err
+}
+
+// NewBadRequestError constructs a new BadRequestError and sets the location.
+func NewBadRequestError(message string) error {
+	err := &BadRequestError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsBadRequestError returns true if err is a NoMatchError.
+func IsBadRequestError(err error) bool {
+	_, ok := errors.Cause(err).(*BadRequestError)
+	return ok
+}
+
+// PermissionError is returned when the user does not have permission to do the
+// requested action.
+type PermissionError struct {
+	errors.Err
+}
+
+// NewPermissionError constructs a new PermissionError and sets the location.
+func NewPermissionError(message string) error {
+	err := &PermissionError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsPermissionError returns true if err is a NoMatchError.
+func IsPermissionError(err error) bool {
+	_, ok := errors.Cause(err).(*PermissionError)
+	return ok
+}
+
+// CannotCompleteError is returned when the requested action is unable to
+// complete for some server side reason.
+type CannotCompleteError struct {
+	errors.Err
+}
+
+// NewCannotCompleteError constructs a new CannotCompleteError and sets the location.
+func NewCannotCompleteError(message string) error {
+	err := &CannotCompleteError{Err: errors.NewErr(message)}
+	err.SetLocation(1)
+	return err
+}
+
+// IsCannotCompleteError returns true if err is a NoMatchError.
+func IsCannotCompleteError(err error) bool {
+	_, ok := errors.Cause(err).(*CannotCompleteError)
+	return ok
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/fabric.go b/automation/vendor/github.com/juju/gomaasapi/fabric.go
new file mode 100644
index 0000000..e38a61a
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/fabric.go
@@ -0,0 +1,128 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type fabric struct {
+	// Add the controller in when we need to do things with the fabric.
+	// controller Controller
+
+	resourceURI string
+
+	id        int
+	name      string
+	classType string
+
+	vlans []*vlan
+}
+
+// ID implements Fabric.
+func (f *fabric) ID() int {
+	return f.id
+}
+
+// Name implements Fabric.
+func (f *fabric) Name() string {
+	return f.name
+}
+
+// ClassType implements Fabric.
+func (f *fabric) ClassType() string {
+	return f.classType
+}
+
+// VLANs implements Fabric.
+func (f *fabric) VLANs() []VLAN {
+	var result []VLAN
+	for _, v := range f.vlans {
+		result = append(result, v)
+	}
+	return result
+}
+
+func readFabrics(controllerVersion version.Number, source interface{}) ([]*fabric, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range fabricDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no fabric read func for version %s", controllerVersion)
+	}
+	readFunc := fabricDeserializationFuncs[deserialisationVersion]
+	return readFabricList(valid, readFunc)
+}
+
+// readFabricList expects the values of the sourceList to be string maps.
+func readFabricList(sourceList []interface{}, readFunc fabricDeserializationFunc) ([]*fabric, error) {
+	result := make([]*fabric, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for fabric %d, %T", i, value)
+		}
+		fabric, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "fabric %d", i)
+		}
+		result = append(result, fabric)
+	}
+	return result, nil
+}
+
+type fabricDeserializationFunc func(map[string]interface{}) (*fabric, error)
+
+var fabricDeserializationFuncs = map[version.Number]fabricDeserializationFunc{
+	twoDotOh: fabric_2_0,
+}
+
+func fabric_2_0(source map[string]interface{}) (*fabric, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"class_type":   schema.OneOf(schema.Nil(""), schema.String()),
+		"vlans":        schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "fabric 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlans, err := readVLANList(valid["vlans"].([]interface{}), vlan_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the class_type is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	classType, _ := valid["class_type"].(string)
+
+	result := &fabric{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		classType:   classType,
+		vlans:       vlans,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/file.go b/automation/vendor/github.com/juju/gomaasapi/file.go
new file mode 100644
index 0000000..63fb854
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/file.go
@@ -0,0 +1,181 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/base64"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type file struct {
+	controller *controller
+
+	resourceURI  string
+	filename     string
+	anonymousURI *url.URL
+	content      string
+}
+
+// Filename implements File.
+func (f *file) Filename() string {
+	return f.filename
+}
+
+// AnonymousURL implements File.
+func (f *file) AnonymousURL() string {
+	url := f.controller.client.GetURL(f.anonymousURI)
+	return url.String()
+}
+
+// Delete implements File.
+func (f *file) Delete() error {
+	err := f.controller.delete(f.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// ReadAll implements File.
+func (f *file) ReadAll() ([]byte, error) {
+	if f.content == "" {
+		return f.readFromServer()
+	}
+	bytes, err := base64.StdEncoding.DecodeString(f.content)
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func (f *file) readFromServer() ([]byte, error) {
+	// If the content is available, it is base64 encoded, so
+	args := make(url.Values)
+	args.Add("filename", f.filename)
+	bytes, err := f.controller._getRaw("files", "get", args)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return nil, errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return nil, errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return nil, NewUnexpectedError(err)
+	}
+	return bytes, nil
+}
+
+func readFiles(controllerVersion version.Number, source interface{}) ([]*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readFileList(valid, readFunc)
+}
+
+func readFile(controllerVersion version.Number, source interface{}) (*file, error) {
+	readFunc, err := getFileDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func getFileDeserializationFunc(controllerVersion version.Number) (fileDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range fileDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no file read func for version %s", controllerVersion)
+	}
+	return fileDeserializationFuncs[deserialisationVersion], nil
+}
+
+// readFileList expects the values of the sourceList to be string maps.
+func readFileList(sourceList []interface{}, readFunc fileDeserializationFunc) ([]*file, error) {
+	result := make([]*file, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for file %d, %T", i, value)
+		}
+		file, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "file %d", i)
+		}
+		result = append(result, file)
+	}
+	return result, nil
+}
+
+type fileDeserializationFunc func(map[string]interface{}) (*file, error)
+
+var fileDeserializationFuncs = map[version.Number]fileDeserializationFunc{
+	twoDotOh: file_2_0,
+}
+
+func file_2_0(source map[string]interface{}) (*file, error) {
+	fields := schema.Fields{
+		"resource_uri":      schema.String(),
+		"filename":          schema.String(),
+		"anon_resource_uri": schema.String(),
+		"content":           schema.String(),
+	}
+	defaults := schema.Defaults{
+		"content": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "file 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	anonURI, err := url.ParseRequestURI(valid["anon_resource_uri"].(string))
+	if err != nil {
+		return nil, NewUnexpectedError(err)
+	}
+
+	result := &file{
+		resourceURI:  valid["resource_uri"].(string),
+		filename:     valid["filename"].(string),
+		anonymousURI: anonURI,
+		content:      valid["content"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/filesystem.go b/automation/vendor/github.com/juju/gomaasapi/filesystem.go
new file mode 100644
index 0000000..4514e52
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/filesystem.go
@@ -0,0 +1,69 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/schema"
+
+type filesystem struct {
+	fstype     string
+	mountPoint string
+	label      string
+	uuid       string
+	// no idea what the mount_options are as a value type, so ignoring for now.
+}
+
+// Type implements FileSystem.
+func (f *filesystem) Type() string {
+	return f.fstype
+}
+
+// MountPoint implements FileSystem.
+func (f *filesystem) MountPoint() string {
+	return f.mountPoint
+}
+
+// Label implements FileSystem.
+func (f *filesystem) Label() string {
+	return f.label
+}
+
+// UUID implements FileSystem.
+func (f *filesystem) UUID() string {
+	return f.uuid
+}
+
+// There is no need for controller based parsing of filesystems until we need it.
+// Currently the filesystem reading is only called by the Partition parsing.
+
+func filesystem2_0(source map[string]interface{}) (*filesystem, error) {
+	fields := schema.Fields{
+		"fstype":      schema.String(),
+		"mount_point": schema.OneOf(schema.Nil(""), schema.String()),
+		"label":       schema.OneOf(schema.Nil(""), schema.String()),
+		"uuid":        schema.String(),
+		// TODO: mount_options when we know the type (note it can be
+		// nil).
+	}
+	defaults := schema.Defaults{
+		"mount_point": "",
+		"label":       "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "filesystem 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+	mount_point, _ := valid["mount_point"].(string)
+	label, _ := valid["label"].(string)
+	result := &filesystem{
+		fstype:     valid["fstype"].(string),
+		mountPoint: mount_point,
+		label:      label,
+		uuid:       valid["uuid"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go b/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go
new file mode 100644
index 0000000..f457e29
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/gomaasapi.go
@@ -0,0 +1,4 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
diff --git a/automation/vendor/github.com/juju/gomaasapi/interface.go b/automation/vendor/github.com/juju/gomaasapi/interface.go
new file mode 100644
index 0000000..f30a9a8
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/interface.go
@@ -0,0 +1,440 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+// Can't use interface as a type, so add an underscore. Yay.
+type interface_ struct {
+	controller *controller
+
+	resourceURI string
+
+	id      int
+	name    string
+	type_   string
+	enabled bool
+	tags    []string
+
+	vlan  *vlan
+	links []*link
+
+	macAddress   string
+	effectiveMTU int
+
+	parents  []string
+	children []string
+}
+
+func (i *interface_) updateFrom(other *interface_) {
+	i.resourceURI = other.resourceURI
+	i.id = other.id
+	i.name = other.name
+	i.type_ = other.type_
+	i.enabled = other.enabled
+	i.tags = other.tags
+	i.vlan = other.vlan
+	i.links = other.links
+	i.macAddress = other.macAddress
+	i.effectiveMTU = other.effectiveMTU
+	i.parents = other.parents
+	i.children = other.children
+}
+
+// ID implements Interface.
+func (i *interface_) ID() int {
+	return i.id
+}
+
+// Name implements Interface.
+func (i *interface_) Name() string {
+	return i.name
+}
+
+// Parents implements Interface.
+func (i *interface_) Parents() []string {
+	return i.parents
+}
+
+// Children implements Interface.
+func (i *interface_) Children() []string {
+	return i.children
+}
+
+// Type implements Interface.
+func (i *interface_) Type() string {
+	return i.type_
+}
+
+// Enabled implements Interface.
+func (i *interface_) Enabled() bool {
+	return i.enabled
+}
+
+// Tags implements Interface.
+func (i *interface_) Tags() []string {
+	return i.tags
+}
+
+// VLAN implements Interface.
+func (i *interface_) VLAN() VLAN {
+	if i.vlan == nil {
+		return nil
+	}
+	return i.vlan
+}
+
+// Links implements Interface.
+func (i *interface_) Links() []Link {
+	result := make([]Link, len(i.links))
+	for i, link := range i.links {
+		result[i] = link
+	}
+	return result
+}
+
+// MACAddress implements Interface.
+func (i *interface_) MACAddress() string {
+	return i.macAddress
+}
+
+// EffectiveMTU implements Interface.
+func (i *interface_) EffectiveMTU() int {
+	return i.effectiveMTU
+}
+
+// UpdateInterfaceArgs is an argument struct for calling Interface.Update.
+type UpdateInterfaceArgs struct {
+	Name       string
+	MACAddress string
+	VLAN       VLAN
+}
+
+func (a *UpdateInterfaceArgs) vlanID() int {
+	if a.VLAN == nil {
+		return 0
+	}
+	return a.VLAN.ID()
+}
+
+// Update implements Interface.
+func (i *interface_) Update(args UpdateInterfaceArgs) error {
+	var empty UpdateInterfaceArgs
+	if args == empty {
+		return nil
+	}
+	params := NewURLParams()
+	params.MaybeAdd("name", args.Name)
+	params.MaybeAdd("mac_address", args.MACAddress)
+	params.MaybeAddInt("vlan", args.vlanID())
+	source, err := i.controller.put(i.resourceURI, params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+// Delete implements Interface.
+func (i *interface_) Delete() error {
+	err := i.controller.delete(i.resourceURI)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound:
+				return errors.Wrap(err, NewNoMatchError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+	return nil
+}
+
+// InterfaceLinkMode is the type of the various link mode constants used for
+// LinkSubnetArgs.
+type InterfaceLinkMode string
+
+const (
+	// LinkModeDHCP - Bring the interface up with DHCP on the given subnet. Only
+	// one subnet can be set to DHCP. If the subnet is managed this interface
+	// will pull from the dynamic IP range.
+	LinkModeDHCP InterfaceLinkMode = "DHCP"
+
+	// LinkModeStatic - Bring the interface up with a STATIC IP address on the
+	// given subnet. Any number of STATIC links can exist on an interface.
+	LinkModeStatic InterfaceLinkMode = "STATIC"
+
+	// LinkModeLinkUp - Bring the interface up only on the given subnet. No IP
+	// address will be assigned to this interface. The interface cannot have any
+	// current DHCP or STATIC links.
+	LinkModeLinkUp InterfaceLinkMode = "LINK_UP"
+)
+
+// LinkSubnetArgs is an argument struct for passing parameters to
+// the Interface.LinkSubnet method.
+type LinkSubnetArgs struct {
+	// Mode is used to describe how the address is provided for the Link.
+	// Required field.
+	Mode InterfaceLinkMode
+	// Subnet is the subnet to link to. Required field.
+	Subnet Subnet
+	// IPAddress is only valid when the Mode is set to LinkModeStatic. If
+	// not specified with a Mode of LinkModeStatic, an IP address from the
+	// subnet will be auto selected.
+	IPAddress string
+	// DefaultGateway will set the gateway IP address for the Subnet as the
+	// default gateway for the machine or device the interface belongs to.
+	// Option can only be used with mode LinkModeStatic.
+	DefaultGateway bool
+}
+
+// Validate ensures that the Mode and Subnet are set, and that the other options
+// are consistent with the Mode.
+func (a *LinkSubnetArgs) Validate() error {
+	switch a.Mode {
+	case LinkModeDHCP, LinkModeLinkUp, LinkModeStatic:
+	case "":
+		return errors.NotValidf("missing Mode")
+	default:
+		return errors.NotValidf("unknown Mode value (%q)", a.Mode)
+	}
+	if a.Subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	if a.IPAddress != "" && a.Mode != LinkModeStatic {
+		return errors.NotValidf("setting IP Address when Mode is not LinkModeStatic")
+	}
+	if a.DefaultGateway && a.Mode != LinkModeStatic {
+		return errors.NotValidf("specifying DefaultGateway for Mode %q", a.Mode)
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) LinkSubnet(args LinkSubnetArgs) error {
+	if err := args.Validate(); err != nil {
+		return errors.Trace(err)
+	}
+	params := NewURLParams()
+	params.Values.Add("mode", string(args.Mode))
+	params.Values.Add("subnet", fmt.Sprint(args.Subnet.ID()))
+	params.MaybeAdd("ip_address", args.IPAddress)
+	params.MaybeAddBool("default_gateway", args.DefaultGateway)
+	source, err := i.controller.post(i.resourceURI, "link_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+	return nil
+}
+
+func (i *interface_) linkForSubnet(subnet Subnet) *link {
+	for _, link := range i.links {
+		if s := link.Subnet(); s != nil && s.ID() == subnet.ID() {
+			return link
+		}
+	}
+	return nil
+}
+
+// LinkSubnet implements Interface.
+func (i *interface_) UnlinkSubnet(subnet Subnet) error {
+	if subnet == nil {
+		return errors.NotValidf("missing Subnet")
+	}
+	link := i.linkForSubnet(subnet)
+	if link == nil {
+		return errors.NotValidf("unlinked Subnet")
+	}
+	params := NewURLParams()
+	params.Values.Add("id", fmt.Sprint(link.ID()))
+	source, err := i.controller.post(i.resourceURI, "unlink_subnet", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusBadRequest:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	response, err := readInterface(i.controller.apiVersion, source)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	i.updateFrom(response)
+
+	return nil
+}
+
+func readInterface(controllerVersion version.Number, source interface{}) (*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readInterfaces(controllerVersion version.Number, source interface{}) ([]*interface_, error) {
+	readFunc, err := getInterfaceDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readInterfaceList(valid, readFunc)
+}
+
+func getInterfaceDeserializationFunc(controllerVersion version.Number) (interfaceDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range interfaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no interface read func for version %s", controllerVersion)
+	}
+	return interfaceDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readInterfaceList(sourceList []interface{}, readFunc interfaceDeserializationFunc) ([]*interface_, error) {
+	result := make([]*interface_, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for interface %d, %T", i, value)
+		}
+		read, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "interface %d", i)
+		}
+		result = append(result, read)
+	}
+	return result, nil
+}
+
+type interfaceDeserializationFunc func(map[string]interface{}) (*interface_, error)
+
+var interfaceDeserializationFuncs = map[version.Number]interfaceDeserializationFunc{
+	twoDotOh: interface_2_0,
+}
+
+func interface_2_0(source map[string]interface{}) (*interface_, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":      schema.ForceInt(),
+		"name":    schema.String(),
+		"type":    schema.String(),
+		"enabled": schema.Bool(),
+		"tags":    schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+
+		"vlan":  schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"links": schema.List(schema.StringMap(schema.Any())),
+
+		"mac_address":   schema.OneOf(schema.Nil(""), schema.String()),
+		"effective_mtu": schema.ForceInt(),
+
+		"parents":  schema.List(schema.String()),
+		"children": schema.List(schema.String()),
+	}
+	defaults := schema.Defaults{
+		"mac_address": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "interface 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var vlan *vlan
+	// If it's not an attribute map then we know it's nil from the schema check.
+	if vlanMap, ok := valid["vlan"].(map[string]interface{}); ok {
+		vlan, err = vlan_2_0(vlanMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	links, err := readLinkList(valid["links"].([]interface{}), link_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	macAddress, _ := valid["mac_address"].(string)
+	result := &interface_{
+		resourceURI: valid["resource_uri"].(string),
+
+		id:      valid["id"].(int),
+		name:    valid["name"].(string),
+		type_:   valid["type"].(string),
+		enabled: valid["enabled"].(bool),
+		tags:    convertToStringSlice(valid["tags"]),
+
+		vlan:  vlan,
+		links: links,
+
+		macAddress:   macAddress,
+		effectiveMTU: valid["effective_mtu"].(int),
+
+		parents:  convertToStringSlice(valid["parents"]),
+		children: convertToStringSlice(valid["children"]),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/interfaces.go b/automation/vendor/github.com/juju/gomaasapi/interfaces.go
new file mode 100644
index 0000000..6b80115
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/interfaces.go
@@ -0,0 +1,362 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import "github.com/juju/utils/set"
+
+const (
+	// Capability constants.
+	NetworksManagement      = "networks-management"
+	StaticIPAddresses       = "static-ipaddresses"
+	IPv6DeploymentUbuntu    = "ipv6-deployment-ubuntu"
+	DevicesManagement       = "devices-management"
+	StorageDeploymentUbuntu = "storage-deployment-ubuntu"
+	NetworkDeploymentUbuntu = "network-deployment-ubuntu"
+)
+
+// Controller represents an API connection to a MAAS Controller. Since the API
+// is restful, there is no long held connection to the API server, but instead
+// HTTP calls are made and JSON response structures parsed.
+type Controller interface {
+
+	// Capabilities returns a set of capabilities as defined by the string
+	// constants.
+	Capabilities() set.Strings
+
+	BootResources() ([]BootResource, error)
+
+	// Fabrics returns the list of Fabrics defined in the MAAS controller.
+	Fabrics() ([]Fabric, error)
+
+	// Spaces returns the list of Spaces defined in the MAAS controller.
+	Spaces() ([]Space, error)
+
+	// Zones lists all the zones known to the MAAS controller.
+	Zones() ([]Zone, error)
+
+	// Machines returns a list of machines that match the params.
+	Machines(MachinesArgs) ([]Machine, error)
+
+	// AllocateMachine will attempt to allocate a machine to the user.
+	// If successful, the allocated machine is returned.
+	AllocateMachine(AllocateMachineArgs) (Machine, ConstraintMatches, error)
+
+	// ReleaseMachines will stop the specified machines, and release them
+	// from the user making them available to be allocated again.
+	ReleaseMachines(ReleaseMachinesArgs) error
+
+	// Devices returns a list of devices that match the params.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// CreateDevice creates and returns a new Device.
+	CreateDevice(CreateDeviceArgs) (Device, error)
+
+	// Files returns all the files that match the specified prefix.
+	Files(prefix string) ([]File, error)
+
+	// Return a single file by its filename.
+	GetFile(filename string) (File, error)
+
+	// AddFile adds or replaces the content of the specified filename.
+	// If or when the MAAS api is able to return metadata about a single
+	// file without sending the content of the file, we can return a File
+	// instance here too.
+	AddFile(AddFileArgs) error
+}
+
+// File represents a file stored in the MAAS controller.
+type File interface {
+	// Filename is the name of the file. No path, just the filename.
+	Filename() string
+
+	// AnonymousURL is a URL that can be used to retrieve the conents of the
+	// file without credentials.
+	AnonymousURL() string
+
+	// Delete removes the file from the MAAS controller.
+	Delete() error
+
+	// ReadAll returns the content of the file.
+	ReadAll() ([]byte, error)
+}
+
+// Fabric represents a set of interconnected VLANs that are capable of mutual
+// communication. A fabric can be thought of as a logical grouping in which
+// VLANs can be considered unique.
+//
+// For example, a distributed network may have a fabric in London containing
+// VLAN 100, while a separate fabric in San Francisco may contain a VLAN 100,
+// whose attached subnets are completely different and unrelated.
+type Fabric interface {
+	ID() int
+	Name() string
+	ClassType() string
+
+	VLANs() []VLAN
+}
+
+// VLAN represents an instance of a Virtual LAN. VLANs are a common way to
+// create logically separate networks using the same physical infrastructure.
+//
+// Managed switches can assign VLANs to each port in either a “tagged” or an
+// “untagged” manner. A VLAN is said to be “untagged” on a particular port when
+// it is the default VLAN for that port, and requires no special configuration
+// in order to access.
+//
+// “Tagged” VLANs (traditionally used by network administrators in order to
+// aggregate multiple networks over inter-switch “trunk” lines) can also be used
+// with nodes in MAAS. That is, if a switch port is configured such that
+// “tagged” VLAN frames can be sent and received by a MAAS node, that MAAS node
+// can be configured to automatically bring up VLAN interfaces, so that the
+// deployed node can make use of them.
+//
+// A “Default VLAN” is created for every Fabric, to which every new VLAN-aware
+// object in the fabric will be associated to by default (unless otherwise
+// specified).
+type VLAN interface {
+	ID() int
+	Name() string
+	Fabric() string
+
+	// VID is the VLAN ID. eth0.10 -> VID = 10.
+	VID() int
+	// MTU (maximum transmission unit) is the largest size packet or frame,
+	// specified in octets (eight-bit bytes), that can be sent.
+	MTU() int
+	DHCP() bool
+
+	PrimaryRack() string
+	SecondaryRack() string
+}
+
+// Zone represents a physical zone that a Machine is in. The meaning of a
+// physical zone is up to you: it could identify e.g. a server rack, a network,
+// or a data centre. Users can then allocate nodes from specific physical zones,
+// to suit their redundancy or performance requirements.
+type Zone interface {
+	Name() string
+	Description() string
+}
+
+// BootResource is the bomb... find something to say here.
+type BootResource interface {
+	ID() int
+	Name() string
+	Type() string
+	Architecture() string
+	SubArchitectures() set.Strings
+	KernelFlavor() string
+}
+
+// Device represents some form of device in MAAS.
+type Device interface {
+	// TODO: add domain
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	IPAddresses() []string
+	Zone() Zone
+
+	// Parent returns the SystemID of the Parent. Most often this will be a
+	// Machine.
+	Parent() string
+
+	// Owner is the username of the user that created the device.
+	Owner() string
+
+	// InterfaceSet returns all the interfaces for the Device.
+	InterfaceSet() []Interface
+
+	// CreateInterface will create a physical interface for this machine.
+	CreateInterface(CreateInterfaceArgs) (Interface, error)
+
+	// Delete will remove this Device.
+	Delete() error
+}
+
+// Machine represents a physical machine.
+type Machine interface {
+	OwnerDataHolder
+
+	SystemID() string
+	Hostname() string
+	FQDN() string
+	Tags() []string
+
+	OperatingSystem() string
+	DistroSeries() string
+	Architecture() string
+	Memory() int
+	CPUCount() int
+
+	IPAddresses() []string
+	PowerState() string
+
+	// Devices returns a list of devices that match the params and have
+	// this Machine as the parent.
+	Devices(DevicesArgs) ([]Device, error)
+
+	// Consider bundling the status values into a single struct.
+	// but need to check for consistent representation if exposed on other
+	// entities.
+
+	StatusName() string
+	StatusMessage() string
+
+	// BootInterface returns the interface that was used to boot the Machine.
+	BootInterface() Interface
+	// InterfaceSet returns all the interfaces for the Machine.
+	InterfaceSet() []Interface
+	// Interface returns the interface for the machine that matches the id
+	// specified. If there is no match, nil is returned.
+	Interface(id int) Interface
+
+	// PhysicalBlockDevices returns all the physical block devices on the machine.
+	PhysicalBlockDevices() []BlockDevice
+	// PhysicalBlockDevice returns the physical block device for the machine
+	// that matches the id specified. If there is no match, nil is returned.
+	PhysicalBlockDevice(id int) BlockDevice
+
+	// BlockDevices returns all the physical and virtual block devices on the machine.
+	BlockDevices() []BlockDevice
+
+	Zone() Zone
+
+	// Start the machine and install the operating system specified in the args.
+	Start(StartArgs) error
+
+	// CreateDevice creates a new Device with this Machine as the parent.
+	// The device will have one interface that is linked to the specified subnet.
+	CreateDevice(CreateMachineDeviceArgs) (Device, error)
+}
+
+// Space is a name for a collection of Subnets.
+type Space interface {
+	ID() int
+	Name() string
+	Subnets() []Subnet
+}
+
+// Subnet refers to an IP range on a VLAN.
+type Subnet interface {
+	ID() int
+	Name() string
+	Space() string
+	VLAN() VLAN
+
+	Gateway() string
+	CIDR() string
+	// dns_mode
+
+	// DNSServers is a list of ip addresses of the DNS servers for the subnet.
+	// This list may be empty.
+	DNSServers() []string
+}
+
+// Interface represents a physical or virtual network interface on a Machine.
+type Interface interface {
+	ID() int
+	Name() string
+	// The parents of an interface are the names of interfaces that must exist
+	// for this interface  to exist. For example a parent of "eth0.100" would be
+	// "eth0". Parents may be empty.
+	Parents() []string
+	// The children interfaces are the names of those that are dependent on this
+	// interface existing. Children may be empty.
+	Children() []string
+	Type() string
+	Enabled() bool
+	Tags() []string
+
+	VLAN() VLAN
+	Links() []Link
+
+	MACAddress() string
+	EffectiveMTU() int
+
+	// Params is a JSON field, and defaults to an empty string, but is almost
+	// always a JSON object in practice. Gleefully ignoring it until we need it.
+
+	// Update the name, mac address or VLAN.
+	Update(UpdateInterfaceArgs) error
+
+	// Delete this interface.
+	Delete() error
+
+	// LinkSubnet will attempt to make this interface available on the specified
+	// Subnet.
+	LinkSubnet(LinkSubnetArgs) error
+
+	// UnlinkSubnet will remove the Link to the subnet, and release the IP
+	// address associated if there is one.
+	UnlinkSubnet(Subnet) error
+}
+
+// Link represents a network link between an Interface and a Subnet.
+type Link interface {
+	ID() int
+	Mode() string
+	Subnet() Subnet
+	// IPAddress returns the address if one has been assigned.
+	// If unavailble, the address will be empty.
+	IPAddress() string
+}
+
+// FileSystem represents a formatted filesystem mounted at a location.
+type FileSystem interface {
+	// Type is the format type, e.g. "ext4".
+	Type() string
+
+	MountPoint() string
+	Label() string
+	UUID() string
+}
+
+// Partition represents a partition of a block device. It may be mounted
+// as a filesystem.
+type Partition interface {
+	ID() int
+	Path() string
+	// FileSystem may be nil if not mounted.
+	FileSystem() FileSystem
+	UUID() string
+	// UsedFor is a human readable string.
+	UsedFor() string
+	// Size is the number of bytes in the partition.
+	Size() uint64
+}
+
+// BlockDevice represents an entire block device on the machine.
+type BlockDevice interface {
+	ID() int
+	Name() string
+	Model() string
+	Path() string
+	UsedFor() string
+	Tags() []string
+
+	BlockSize() uint64
+	UsedSize() uint64
+	Size() uint64
+
+	Partitions() []Partition
+
+	// There are some other attributes for block devices, but we can
+	// expose them on an as needed basis.
+}
+
+// OwnerDataHolder represents any MAAS object that can store key/value
+// data.
+type OwnerDataHolder interface {
+	// OwnerData returns a copy of the key/value data stored for this
+	// object.
+	OwnerData() map[string]string
+
+	// SetOwnerData updates the key/value data stored for this object
+	// with the values passed in. Existing keys that aren't specified
+	// in the map passed in will be left in place; to clear a key set
+	// its value to "". All owner data is cleared when the object is
+	// released.
+	SetOwnerData(map[string]string) error
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/jsonobject.go b/automation/vendor/github.com/juju/gomaasapi/jsonobject.go
new file mode 100644
index 0000000..cdd3dc1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/jsonobject.go
@@ -0,0 +1,215 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+)
+
+// JSONObject is a wrapper around a JSON structure which provides
+// methods to extract data from that structure.
+// A JSONObject provides a simple structure consisting of the data types
+// defined in JSON: string, number, object, list, and bool.  To get the
+// value you want out of a JSONObject, you must know (or figure out) which
+// kind of value you have, and then call the appropriate Get*() method to
+// get at it.  Reading an item as the wrong type will return an error.
+// For instance, if your JSONObject consists of a number, call GetFloat64()
+// to get the value as a float64.  If it's a list, call GetArray() to get
+// a slice of JSONObjects.  To read any given item from the slice, you'll
+// need to "Get" that as the right type as well.
+// There is one exception: a MAASObject is really a special kind of map,
+// so you can read it as either.
+// Reading a null item is also an error.  So before you try obj.Get*(),
+// first check obj.IsNil().
+type JSONObject struct {
+	// Parsed value.  May actually be any of the types a JSONObject can
+	// wrap, except raw bytes.  If the object can only be interpreted
+	// as raw bytes, this will be nil.
+	value interface{}
+	// Raw bytes, if this object was parsed directly from an API response.
+	// Is nil for sub-objects found within other objects.  An object that
+	// was parsed directly from a response can be both raw bytes and some
+	// other value at the same time.
+	// For example, "[]" looks like a JSON list, so you can read it as an
+	// array.  But it may also be the raw contents of a file that just
+	// happens to look like JSON, and so you can read it as raw bytes as
+	// well.
+	bytes []byte
+	// Client for further communication with the API.
+	client Client
+	// Is this a JSON null?
+	isNull bool
+}
+
+// Our JSON processor distinguishes a MAASObject from a jsonMap by the fact
+// that it contains a key "resource_uri".  (A regular map might contain the
+// same key through sheer coincide, but never mind: you can still treat it
+// as a jsonMap and never notice the difference.)
+const resourceURI = "resource_uri"
+
+// maasify turns a completely untyped json.Unmarshal result into a JSONObject
+// (with the appropriate implementation of course).  This function is
+// recursive.  Maps and arrays are deep-copied, with each individual value
+// being converted to a JSONObject type.
+func maasify(client Client, value interface{}) JSONObject {
+	if value == nil {
+		return JSONObject{isNull: true}
+	}
+	switch value.(type) {
+	case string, float64, bool:
+		return JSONObject{value: value}
+	case map[string]interface{}:
+		original := value.(map[string]interface{})
+		result := make(map[string]JSONObject, len(original))
+		for key, value := range original {
+			result[key] = maasify(client, value)
+		}
+		return JSONObject{value: result, client: client}
+	case []interface{}:
+		original := value.([]interface{})
+		result := make([]JSONObject, len(original))
+		for index, value := range original {
+			result[index] = maasify(client, value)
+		}
+		return JSONObject{value: result}
+	}
+	msg := fmt.Sprintf("Unknown JSON type, can't be converted to JSONObject: %v", value)
+	panic(msg)
+}
+
+// Parse a JSON blob into a JSONObject.
+func Parse(client Client, input []byte) (JSONObject, error) {
+	var obj JSONObject
+	if input == nil {
+		panic(errors.New("Parse() called with nil input"))
+	}
+	var parsed interface{}
+	err := json.Unmarshal(input, &parsed)
+	if err == nil {
+		obj = maasify(client, parsed)
+		obj.bytes = input
+	} else {
+		switch err.(type) {
+		case *json.InvalidUTF8Error:
+		case *json.SyntaxError:
+			// This isn't JSON.  Treat it as raw binary data.
+		default:
+			return obj, err
+		}
+		obj = JSONObject{value: nil, client: client, bytes: input}
+	}
+	return obj, nil
+}
+
+// JSONObjectFromStruct takes a struct and converts it to a JSONObject
+func JSONObjectFromStruct(client Client, input interface{}) (JSONObject, error) {
+	j, err := json.MarshalIndent(input, "", "  ")
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(client, j)
+}
+
+// Return error value for failed type conversion.
+func failConversion(wantedType string, obj JSONObject) error {
+	msg := fmt.Sprintf("Requested %v, got %T.", wantedType, obj.value)
+	return errors.New(msg)
+}
+
+// MarshalJSON tells the standard json package how to serialize a JSONObject
+// back to JSON.
+func (obj JSONObject) MarshalJSON() ([]byte, error) {
+	if obj.IsNil() {
+		return json.Marshal(nil)
+	}
+	return json.MarshalIndent(obj.value, "", "  ")
+}
+
+// With MarshalJSON, JSONObject implements json.Marshaler.
+var _ json.Marshaler = (*JSONObject)(nil)
+
+// IsNil tells you whether a JSONObject is a JSON "null."
+// There is one irregularity.  If the original JSON blob was actually raw
+// data, not JSON, then its IsNil will return false because the object
+// contains the binary data as a non-nil value.  But, if the original JSON
+// blob consisted of a null, then IsNil returns true even though you can
+// still retrieve binary data from it.
+func (obj JSONObject) IsNil() bool {
+	if obj.value != nil {
+		return false
+	}
+	if obj.bytes == nil {
+		return true
+	}
+	// This may be a JSON null.  We can't expect every JSON null to look
+	// the same; there may be leading or trailing space.
+	return obj.isNull
+}
+
+// GetString retrieves the object's value as a string.  If the value wasn't
+// a JSON string, that's an error.
+func (obj JSONObject) GetString() (value string, err error) {
+	value, ok := obj.value.(string)
+	if !ok {
+		err = failConversion("string", obj)
+	}
+	return
+}
+
+// GetFloat64 retrieves the object's value as a float64.  If the value wasn't
+// a JSON number, that's an error.
+func (obj JSONObject) GetFloat64() (value float64, err error) {
+	value, ok := obj.value.(float64)
+	if !ok {
+		err = failConversion("float64", obj)
+	}
+	return
+}
+
+// GetMap retrieves the object's value as a map.  If the value wasn't a JSON
+// object, that's an error.
+func (obj JSONObject) GetMap() (value map[string]JSONObject, err error) {
+	value, ok := obj.value.(map[string]JSONObject)
+	if !ok {
+		err = failConversion("map", obj)
+	}
+	return
+}
+
+// GetArray retrieves the object's value as an array.  If the value wasn't a
+// JSON list, that's an error.
+func (obj JSONObject) GetArray() (value []JSONObject, err error) {
+	value, ok := obj.value.([]JSONObject)
+	if !ok {
+		err = failConversion("array", obj)
+	}
+	return
+}
+
+// GetBool retrieves the object's value as a bool.  If the value wasn't a JSON
+// bool, that's an error.
+func (obj JSONObject) GetBool() (value bool, err error) {
+	value, ok := obj.value.(bool)
+	if !ok {
+		err = failConversion("bool", obj)
+	}
+	return
+}
+
+// GetBytes retrieves the object's value as raw bytes.  A JSONObject that was
+// parsed from the original input (as opposed to one that's embedded in
+// another JSONObject) can contain both the raw bytes and the parsed JSON
+// value, but either can be the case without the other.
+// If this object wasn't parsed directly from the original input, that's an
+// error.
+// If the object was parsed from an original input that just said "null", then
+// IsNil will return true but the raw bytes are still available from GetBytes.
+func (obj JSONObject) GetBytes() ([]byte, error) {
+	if obj.bytes == nil {
+		return nil, failConversion("bytes", obj)
+	}
+	return obj.bytes, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/link.go b/automation/vendor/github.com/juju/gomaasapi/link.go
new file mode 100644
index 0000000..9e930e1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/link.go
@@ -0,0 +1,124 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type link struct {
+	id        int
+	mode      string
+	subnet    *subnet
+	ipAddress string
+}
+
+// NOTE: not using lowercase L as the receiver as it is a horrible idea.
+// Instead using 'k'.
+
+// ID implements Link.
+func (k *link) ID() int {
+	return k.id
+}
+
+// Mode implements Link.
+func (k *link) Mode() string {
+	return k.mode
+}
+
+// Subnet implements Link.
+func (k *link) Subnet() Subnet {
+	if k.subnet == nil {
+		return nil
+	}
+	return k.subnet
+}
+
+// IPAddress implements Link.
+func (k *link) IPAddress() string {
+	return k.ipAddress
+}
+
+func readLinks(controllerVersion version.Number, source interface{}) ([]*link, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range linkDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no link read func for version %s", controllerVersion)
+	}
+	readFunc := linkDeserializationFuncs[deserialisationVersion]
+	return readLinkList(valid, readFunc)
+}
+
+// readLinkList expects the values of the sourceList to be string maps.
+func readLinkList(sourceList []interface{}, readFunc linkDeserializationFunc) ([]*link, error) {
+	result := make([]*link, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for link %d, %T", i, value)
+		}
+		link, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "link %d", i)
+		}
+		result = append(result, link)
+	}
+	return result, nil
+}
+
+type linkDeserializationFunc func(map[string]interface{}) (*link, error)
+
+var linkDeserializationFuncs = map[version.Number]linkDeserializationFunc{
+	twoDotOh: link_2_0,
+}
+
+func link_2_0(source map[string]interface{}) (*link, error) {
+	fields := schema.Fields{
+		"id":         schema.ForceInt(),
+		"mode":       schema.String(),
+		"subnet":     schema.StringMap(schema.Any()),
+		"ip_address": schema.String(),
+	}
+	defaults := schema.Defaults{
+		"ip_address": "",
+		"subnet":     schema.Omit,
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "link 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var subnet *subnet
+	if value, ok := valid["subnet"]; ok {
+		subnet, err = subnet_2_0(value.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	result := &link{
+		id:        valid["id"].(int),
+		mode:      valid["mode"].(string),
+		subnet:    subnet,
+		ipAddress: valid["ip_address"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/maas.go b/automation/vendor/github.com/juju/gomaasapi/maas.go
new file mode 100644
index 0000000..cd6ce29
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/maas.go
@@ -0,0 +1,11 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+// NewMAAS returns an interface to the MAAS API as a *MAASObject.
+func NewMAAS(client Client) *MAASObject {
+	attrs := map[string]interface{}{resourceURI: client.APIURL.String()}
+	obj := newJSONMAASObject(attrs, client)
+	return &obj
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/maasobject.go b/automation/vendor/github.com/juju/gomaasapi/maasobject.go
new file mode 100644
index 0000000..3978252
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/maasobject.go
@@ -0,0 +1,197 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"net/url"
+)
+
+// MAASObject represents a MAAS object as returned by the MAAS API, such as a
+// Node or a Tag.
+// You can extract a MAASObject out of a JSONObject using
+// JSONObject.GetMAASObject.  A MAAS API call will usually return either a
+// MAASObject or a list of MAASObjects.  The list itself would be wrapped in
+// a JSONObject, so if an API call returns a list of objects "l," you first
+// obtain the array using l.GetArray().  Then, for each item "i" in the array,
+// obtain the matching MAASObject using i.GetMAASObject().
+type MAASObject struct {
+	values map[string]JSONObject
+	client Client
+	uri    *url.URL
+}
+
+// newJSONMAASObject creates a new MAAS object.  It will panic if the given map
+// does not contain a valid URL for the 'resource_uri' key.
+func newJSONMAASObject(jmap map[string]interface{}, client Client) MAASObject {
+	obj, err := maasify(client, jmap).GetMAASObject()
+	if err != nil {
+		panic(err)
+	}
+	return obj
+}
+
+// MarshalJSON tells the standard json package how to serialize a MAASObject.
+func (obj MAASObject) MarshalJSON() ([]byte, error) {
+	return json.MarshalIndent(obj.GetMap(), "", "  ")
+}
+
+// With MarshalJSON, MAASObject implements json.Marshaler.
+var _ json.Marshaler = (*MAASObject)(nil)
+
+func marshalNode(node MAASObject) string {
+	res, _ := json.MarshalIndent(node, "", "  ")
+	return string(res)
+
+}
+
+var noResourceURI = errors.New("not a MAAS object: no 'resource_uri' key")
+
+// extractURI obtains the "resource_uri" string from a JSONObject map.
+func extractURI(attrs map[string]JSONObject) (*url.URL, error) {
+	uriEntry, ok := attrs[resourceURI]
+	if !ok {
+		return nil, noResourceURI
+	}
+	uri, err := uriEntry.GetString()
+	if err != nil {
+		return nil, fmt.Errorf("invalid resource_uri: %v", uri)
+	}
+	resourceURL, err := url.Parse(uri)
+	if err != nil {
+		return nil, fmt.Errorf("resource_uri does not contain a valid URL: %v", uri)
+	}
+	return resourceURL, nil
+}
+
+// JSONObject getter for a MAAS object.  From a decoding perspective, a
+// MAASObject is just like a map except it contains a key "resource_uri", and
+// it keeps track of the Client you got it from so that you can invoke API
+// methods directly on their MAAS objects.
+func (obj JSONObject) GetMAASObject() (MAASObject, error) {
+	attrs, err := obj.GetMap()
+	if err != nil {
+		return MAASObject{}, err
+	}
+	uri, err := extractURI(attrs)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return MAASObject{values: attrs, client: obj.client, uri: uri}, nil
+}
+
+// GetField extracts a string field from this MAAS object.
+func (obj MAASObject) GetField(name string) (string, error) {
+	return obj.values[name].GetString()
+}
+
+// URI is the resource URI for this MAAS object.  It is an absolute path, but
+// without a network part.
+func (obj MAASObject) URI() *url.URL {
+	// Duplicate the URL.
+	uri, err := url.Parse(obj.uri.String())
+	if err != nil {
+		panic(err)
+	}
+	return uri
+}
+
+// URL returns a full absolute URL (including network part) for this MAAS
+// object on the API.
+func (obj MAASObject) URL() *url.URL {
+	return obj.client.GetURL(obj.URI())
+}
+
+// GetMap returns all of the object's attributes in the form of a map.
+func (obj MAASObject) GetMap() map[string]JSONObject {
+	return obj.values
+}
+
+// GetSubObject returns a new MAASObject representing the API resource found
+// at a given sub-path of the current object's resource URI.
+func (obj MAASObject) GetSubObject(name string) MAASObject {
+	uri := obj.URI()
+	newURL := url.URL{Path: name}
+	resUrl := uri.ResolveReference(&newURL)
+	resUrl.Path = EnsureTrailingSlash(resUrl.Path)
+	input := map[string]interface{}{resourceURI: resUrl.String()}
+	return newJSONMAASObject(input, obj.client)
+}
+
+var NotImplemented = errors.New("Not implemented")
+
+// Get retrieves a fresh copy of this MAAS object from the API.
+func (obj MAASObject) Get() (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, "", url.Values{})
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Post overwrites this object's existing value on the API with those given
+// in "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Post(params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, "", params, nil)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// Update modifies this object on the API, based on the values given in
+// "params."  It returns the object's new value as received from the API.
+func (obj MAASObject) Update(params url.Values) (MAASObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Put(uri, params)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	jsonObj, err := Parse(obj.client, result)
+	if err != nil {
+		return MAASObject{}, err
+	}
+	return jsonObj.GetMAASObject()
+}
+
+// Delete removes this object on the API.
+func (obj MAASObject) Delete() error {
+	uri := obj.URI()
+	return obj.client.Delete(uri)
+}
+
+// CallGet invokes an idempotent API method on this object.
+func (obj MAASObject) CallGet(operation string, params url.Values) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Get(uri, operation, params)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
+
+// CallPost invokes a non-idempotent API method on this object.
+func (obj MAASObject) CallPost(operation string, params url.Values) (JSONObject, error) {
+	return obj.CallPostFiles(operation, params, nil)
+}
+
+// CallPostFiles invokes a non-idempotent API method on this object.  It is
+// similar to CallPost but has an extra parameter, 'files', which should
+// contain the files that will be uploaded to the API.
+func (obj MAASObject) CallPostFiles(operation string, params url.Values, files map[string][]byte) (JSONObject, error) {
+	uri := obj.URI()
+	result, err := obj.client.Post(uri, operation, params, files)
+	if err != nil {
+		return JSONObject{}, err
+	}
+	return Parse(obj.client, result)
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/machine.go b/automation/vendor/github.com/juju/gomaasapi/machine.go
new file mode 100644
index 0000000..8518d94
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/machine.go
@@ -0,0 +1,584 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/url"
+
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type machine struct {
+	controller *controller
+
+	resourceURI string
+
+	systemID  string
+	hostname  string
+	fqdn      string
+	tags      []string
+	ownerData map[string]string
+
+	operatingSystem string
+	distroSeries    string
+	architecture    string
+	memory          int
+	cpuCount        int
+
+	ipAddresses []string
+	powerState  string
+
+	// NOTE: consider some form of status struct
+	statusName    string
+	statusMessage string
+
+	bootInterface *interface_
+	interfaceSet  []*interface_
+	zone          *zone
+	// Don't really know the difference between these two lists:
+	physicalBlockDevices []*blockdevice
+	blockDevices         []*blockdevice
+}
+
+func (m *machine) updateFrom(other *machine) {
+	m.resourceURI = other.resourceURI
+	m.systemID = other.systemID
+	m.hostname = other.hostname
+	m.fqdn = other.fqdn
+	m.operatingSystem = other.operatingSystem
+	m.distroSeries = other.distroSeries
+	m.architecture = other.architecture
+	m.memory = other.memory
+	m.cpuCount = other.cpuCount
+	m.ipAddresses = other.ipAddresses
+	m.powerState = other.powerState
+	m.statusName = other.statusName
+	m.statusMessage = other.statusMessage
+	m.zone = other.zone
+	m.tags = other.tags
+	m.ownerData = other.ownerData
+}
+
+// SystemID implements Machine.
+func (m *machine) SystemID() string {
+	return m.systemID
+}
+
+// Hostname implements Machine.
+func (m *machine) Hostname() string {
+	return m.hostname
+}
+
+// FQDN implements Machine.
+func (m *machine) FQDN() string {
+	return m.fqdn
+}
+
+// Tags implements Machine.
+func (m *machine) Tags() []string {
+	return m.tags
+}
+
+// IPAddresses implements Machine.
+func (m *machine) IPAddresses() []string {
+	return m.ipAddresses
+}
+
+// Memory implements Machine.
+func (m *machine) Memory() int {
+	return m.memory
+}
+
+// CPUCount implements Machine.
+func (m *machine) CPUCount() int {
+	return m.cpuCount
+}
+
+// PowerState implements Machine.
+func (m *machine) PowerState() string {
+	return m.powerState
+}
+
+// Zone implements Machine.
+func (m *machine) Zone() Zone {
+	if m.zone == nil {
+		return nil
+	}
+	return m.zone
+}
+
+// BootInterface implements Machine.
+func (m *machine) BootInterface() Interface {
+	if m.bootInterface == nil {
+		return nil
+	}
+	m.bootInterface.controller = m.controller
+	return m.bootInterface
+}
+
+// InterfaceSet implements Machine.
+func (m *machine) InterfaceSet() []Interface {
+	result := make([]Interface, len(m.interfaceSet))
+	for i, v := range m.interfaceSet {
+		v.controller = m.controller
+		result[i] = v
+	}
+	return result
+}
+
+// Interface implements Machine.
+func (m *machine) Interface(id int) Interface {
+	for _, iface := range m.interfaceSet {
+		if iface.ID() == id {
+			iface.controller = m.controller
+			return iface
+		}
+	}
+	return nil
+}
+
+// OperatingSystem implements Machine.
+func (m *machine) OperatingSystem() string {
+	return m.operatingSystem
+}
+
+// DistroSeries implements Machine.
+func (m *machine) DistroSeries() string {
+	return m.distroSeries
+}
+
+// Architecture implements Machine.
+func (m *machine) Architecture() string {
+	return m.architecture
+}
+
+// StatusName implements Machine.
+func (m *machine) StatusName() string {
+	return m.statusName
+}
+
+// StatusMessage implements Machine.
+func (m *machine) StatusMessage() string {
+	return m.statusMessage
+}
+
+// PhysicalBlockDevices implements Machine.
+func (m *machine) PhysicalBlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.physicalBlockDevices))
+	for i, v := range m.physicalBlockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// PhysicalBlockDevice implements Machine.
+func (m *machine) PhysicalBlockDevice(id int) BlockDevice {
+	for _, blockDevice := range m.physicalBlockDevices {
+		if blockDevice.ID() == id {
+			return blockDevice
+		}
+	}
+	return nil
+}
+
+// BlockDevices implements Machine.
+func (m *machine) BlockDevices() []BlockDevice {
+	result := make([]BlockDevice, len(m.blockDevices))
+	for i, v := range m.blockDevices {
+		result[i] = v
+	}
+	return result
+}
+
+// Devices implements Machine.
+func (m *machine) Devices(args DevicesArgs) ([]Device, error) {
+	// Perhaps in the future, MAAS will give us a way to query just for the
+	// devices for a particular parent.
+	devices, err := m.controller.Devices(args)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	var result []Device
+	for _, device := range devices {
+		if device.Parent() == m.SystemID() {
+			result = append(result, device)
+		}
+	}
+	return result, nil
+}
+
+// StartArgs is an argument struct for passing parameters to the Machine.Start
+// method.
+type StartArgs struct {
+	// UserData needs to be Base64 encoded user data for cloud-init.
+	UserData     string
+	DistroSeries string
+	Kernel       string
+	Comment      string
+}
+
+// Start implements Machine.
+func (m *machine) Start(args StartArgs) error {
+	params := NewURLParams()
+	params.MaybeAdd("user_data", args.UserData)
+	params.MaybeAdd("distro_series", args.DistroSeries)
+	params.MaybeAdd("hwe_kernel", args.Kernel)
+	params.MaybeAdd("comment", args.Comment)
+	result, err := m.controller.post(m.resourceURI, "deploy", params.Values)
+	if err != nil {
+		if svrErr, ok := errors.Cause(err).(ServerError); ok {
+			switch svrErr.StatusCode {
+			case http.StatusNotFound, http.StatusConflict:
+				return errors.Wrap(err, NewBadRequestError(svrErr.BodyMessage))
+			case http.StatusForbidden:
+				return errors.Wrap(err, NewPermissionError(svrErr.BodyMessage))
+			case http.StatusServiceUnavailable:
+				return errors.Wrap(err, NewCannotCompleteError(svrErr.BodyMessage))
+			}
+		}
+		return NewUnexpectedError(err)
+	}
+
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+// CreateMachineDeviceArgs is an argument structure for Machine.CreateDevice.
+// Only InterfaceName and MACAddress fields are required, the others are only
+// used if set. If Subnet and VLAN are both set, Subnet.VLAN() must match the
+// given VLAN. On failure, returns an error satisfying errors.IsNotValid().
+type CreateMachineDeviceArgs struct {
+	Hostname      string
+	InterfaceName string
+	MACAddress    string
+	Subnet        Subnet
+	VLAN          VLAN
+}
+
+// Validate ensures that all required values are non-emtpy.
+func (a *CreateMachineDeviceArgs) Validate() error {
+	if a.InterfaceName == "" {
+		return errors.NotValidf("missing InterfaceName")
+	}
+
+	if a.MACAddress == "" {
+		return errors.NotValidf("missing MACAddress")
+	}
+
+	if a.Subnet != nil && a.VLAN != nil && a.Subnet.VLAN() != a.VLAN {
+		msg := fmt.Sprintf(
+			"given subnet %q on VLAN %d does not match given VLAN %d",
+			a.Subnet.CIDR(), a.Subnet.VLAN().ID(), a.VLAN.ID(),
+		)
+		return errors.NewNotValid(nil, msg)
+	}
+
+	return nil
+}
+
+// CreateDevice implements Machine
+func (m *machine) CreateDevice(args CreateMachineDeviceArgs) (_ Device, err error) {
+	if err := args.Validate(); err != nil {
+		return nil, errors.Trace(err)
+	}
+	device, err := m.controller.CreateDevice(CreateDeviceArgs{
+		Hostname:     args.Hostname,
+		MACAddresses: []string{args.MACAddress},
+		Parent:       m.SystemID(),
+	})
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	defer func(err *error) {
+		// If there is an error return, at least try to delete the device we just created.
+		if *err != nil {
+			if innerErr := device.Delete(); innerErr != nil {
+				logger.Warningf("could not delete device %q", device.SystemID())
+			}
+		}
+	}(&err)
+
+	// Update the VLAN to use for the interface, if given.
+	vlanToUse := args.VLAN
+	if vlanToUse == nil && args.Subnet != nil {
+		vlanToUse = args.Subnet.VLAN()
+	}
+
+	// There should be one interface created for each MAC Address, and since we
+	// only specified one, there should just be one response.
+	interfaces := device.InterfaceSet()
+	if count := len(interfaces); count != 1 {
+		err := errors.Errorf("unexpected interface count for device: %d", count)
+		return nil, NewUnexpectedError(err)
+	}
+	iface := interfaces[0]
+	nameToUse := args.InterfaceName
+
+	if err := m.updateDeviceInterface(iface, nameToUse, vlanToUse); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	if args.Subnet == nil {
+		// Nothing further to update.
+		return device, nil
+	}
+
+	if err := m.linkDeviceInterfaceToSubnet(iface, args.Subnet); err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	return device, nil
+}
+
+func (m *machine) updateDeviceInterface(iface Interface, nameToUse string, vlanToUse VLAN) error {
+	updateArgs := UpdateInterfaceArgs{}
+	updateArgs.Name = nameToUse
+
+	if vlanToUse != nil {
+		updateArgs.VLAN = vlanToUse
+	}
+
+	if err := iface.Update(updateArgs); err != nil {
+		return errors.Annotatef(err, "updating device interface %q failed", iface.Name())
+	}
+
+	return nil
+}
+
+func (m *machine) linkDeviceInterfaceToSubnet(iface Interface, subnetToUse Subnet) error {
+	err := iface.LinkSubnet(LinkSubnetArgs{
+		Mode:   LinkModeStatic,
+		Subnet: subnetToUse,
+	})
+	if err != nil {
+		return errors.Annotatef(
+			err, "linking device interface %q to subnet %q failed",
+			iface.Name(), subnetToUse.CIDR())
+	}
+
+	return nil
+}
+
+// OwnerData implements OwnerDataHolder.
+func (m *machine) OwnerData() map[string]string {
+	result := make(map[string]string)
+	for key, value := range m.ownerData {
+		result[key] = value
+	}
+	return result
+}
+
+// SetOwnerData implements OwnerDataHolder.
+func (m *machine) SetOwnerData(ownerData map[string]string) error {
+	params := make(url.Values)
+	for key, value := range ownerData {
+		params.Add(key, value)
+	}
+	result, err := m.controller.post(m.resourceURI, "set_owner_data", params)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	machine, err := readMachine(m.controller.apiVersion, result)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	m.updateFrom(machine)
+	return nil
+}
+
+func readMachine(controllerVersion version.Number, source interface{}) (*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.StringMap(schema.Any())
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	return readFunc(valid)
+}
+
+func readMachines(controllerVersion version.Number, source interface{}) ([]*machine, error) {
+	readFunc, err := getMachineDeserializationFunc(controllerVersion)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine base schema check failed")
+	}
+	valid := coerced.([]interface{})
+	return readMachineList(valid, readFunc)
+}
+
+func getMachineDeserializationFunc(controllerVersion version.Number) (machineDeserializationFunc, error) {
+	var deserialisationVersion version.Number
+	for v := range machineDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no machine read func for version %s", controllerVersion)
+	}
+	return machineDeserializationFuncs[deserialisationVersion], nil
+}
+
+func readMachineList(sourceList []interface{}, readFunc machineDeserializationFunc) ([]*machine, error) {
+	result := make([]*machine, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for machine %d, %T", i, value)
+		}
+		machine, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "machine %d", i)
+		}
+		result = append(result, machine)
+	}
+	return result, nil
+}
+
+type machineDeserializationFunc func(map[string]interface{}) (*machine, error)
+
+var machineDeserializationFuncs = map[version.Number]machineDeserializationFunc{
+	twoDotOh: machine_2_0,
+}
+
+func machine_2_0(source map[string]interface{}) (*machine, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"system_id":  schema.String(),
+		"hostname":   schema.String(),
+		"fqdn":       schema.String(),
+		"tag_names":  schema.List(schema.String()),
+		"owner_data": schema.StringMap(schema.String()),
+
+		"osystem":       schema.String(),
+		"distro_series": schema.String(),
+		"architecture":  schema.OneOf(schema.Nil(""), schema.String()),
+		"memory":        schema.ForceInt(),
+		"cpu_count":     schema.ForceInt(),
+
+		"ip_addresses":   schema.List(schema.String()),
+		"power_state":    schema.String(),
+		"status_name":    schema.String(),
+		"status_message": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"boot_interface": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+		"interface_set":  schema.List(schema.StringMap(schema.Any())),
+		"zone":           schema.StringMap(schema.Any()),
+
+		"physicalblockdevice_set": schema.List(schema.StringMap(schema.Any())),
+		"blockdevice_set":         schema.List(schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"architecture": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "machine 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var bootInterface *interface_
+	if ifaceMap, ok := valid["boot_interface"].(map[string]interface{}); ok {
+		bootInterface, err = interface_2_0(ifaceMap)
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+
+	interfaceSet, err := readInterfaceList(valid["interface_set"].([]interface{}), interface_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	zone, err := zone_2_0(valid["zone"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	physicalBlockDevices, err := readBlockDeviceList(valid["physicalblockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	blockDevices, err := readBlockDeviceList(valid["blockdevice_set"].([]interface{}), blockdevice_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+	architecture, _ := valid["architecture"].(string)
+	statusMessage, _ := valid["status_message"].(string)
+	result := &machine{
+		resourceURI: valid["resource_uri"].(string),
+
+		systemID:  valid["system_id"].(string),
+		hostname:  valid["hostname"].(string),
+		fqdn:      valid["fqdn"].(string),
+		tags:      convertToStringSlice(valid["tag_names"]),
+		ownerData: convertToStringMap(valid["owner_data"]),
+
+		operatingSystem: valid["osystem"].(string),
+		distroSeries:    valid["distro_series"].(string),
+		architecture:    architecture,
+		memory:          valid["memory"].(int),
+		cpuCount:        valid["cpu_count"].(int),
+
+		ipAddresses:   convertToStringSlice(valid["ip_addresses"]),
+		powerState:    valid["power_state"].(string),
+		statusName:    valid["status_name"].(string),
+		statusMessage: statusMessage,
+
+		bootInterface:        bootInterface,
+		interfaceSet:         interfaceSet,
+		zone:                 zone,
+		physicalBlockDevices: physicalBlockDevices,
+		blockDevices:         blockDevices,
+	}
+
+	return result, nil
+}
+
+func convertToStringSlice(field interface{}) []string {
+	if field == nil {
+		return nil
+	}
+	fieldSlice := field.([]interface{})
+	result := make([]string, len(fieldSlice))
+	for i, value := range fieldSlice {
+		result[i] = value.(string)
+	}
+	return result
+}
+
+func convertToStringMap(field interface{}) map[string]string {
+	if field == nil {
+		return nil
+	}
+	// This function is only called after a schema Coerce, so it's
+	// safe.
+	fieldMap := field.(map[string]interface{})
+	result := make(map[string]string)
+	for key, value := range fieldMap {
+		result[key] = value.(string)
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/oauth.go b/automation/vendor/github.com/juju/gomaasapi/oauth.go
new file mode 100644
index 0000000..920960d
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/oauth.go
@@ -0,0 +1,80 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"crypto/rand"
+	"fmt"
+	"net/http"
+	"net/url"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Not a true uuidgen, but at least creates same length random
+func generateNonce() (string, error) {
+	randBytes := make([]byte, 16)
+	_, err := rand.Read(randBytes)
+	if err != nil {
+		return "", err
+	}
+	return fmt.Sprintf("%16x", randBytes), nil
+}
+
+func generateTimestamp() string {
+	return strconv.Itoa(int(time.Now().Unix()))
+}
+
+type OAuthSigner interface {
+	OAuthSign(request *http.Request) error
+}
+
+type OAuthToken struct {
+	ConsumerKey    string
+	ConsumerSecret string
+	TokenKey       string
+	TokenSecret    string
+}
+
+// Trick to ensure *plainTextOAuthSigner implements the OAuthSigner interface.
+var _ OAuthSigner = (*plainTextOAuthSigner)(nil)
+
+type plainTextOAuthSigner struct {
+	token *OAuthToken
+	realm string
+}
+
+func NewPlainTestOAuthSigner(token *OAuthToken, realm string) (OAuthSigner, error) {
+	return &plainTextOAuthSigner{token, realm}, nil
+}
+
+// OAuthSignPLAINTEXT signs the provided request using the OAuth PLAINTEXT
+// method: http://oauth.net/core/1.0/#anchor22.
+func (signer plainTextOAuthSigner) OAuthSign(request *http.Request) error {
+
+	signature := signer.token.ConsumerSecret + `&` + signer.token.TokenSecret
+	nonce, err := generateNonce()
+	if err != nil {
+		return err
+	}
+	authData := map[string]string{
+		"realm":                  signer.realm,
+		"oauth_consumer_key":     signer.token.ConsumerKey,
+		"oauth_token":            signer.token.TokenKey,
+		"oauth_signature_method": "PLAINTEXT",
+		"oauth_signature":        signature,
+		"oauth_timestamp":        generateTimestamp(),
+		"oauth_nonce":            nonce,
+		"oauth_version":          "1.0",
+	}
+	// Build OAuth header.
+	var authHeader []string
+	for key, value := range authData {
+		authHeader = append(authHeader, fmt.Sprintf(`%s="%s"`, key, url.QueryEscape(value)))
+	}
+	strHeader := "OAuth " + strings.Join(authHeader, ", ")
+	request.Header.Add("Authorization", strHeader)
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/partition.go b/automation/vendor/github.com/juju/gomaasapi/partition.go
new file mode 100644
index 0000000..f6d6afa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/partition.go
@@ -0,0 +1,145 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type partition struct {
+	resourceURI string
+
+	id   int
+	path string
+	uuid string
+
+	usedFor string
+	size    uint64
+
+	filesystem *filesystem
+}
+
+// ID implements Partition.
+func (p *partition) ID() int {
+	return p.id
+}
+
+// Path implements Partition.
+func (p *partition) Path() string {
+	return p.path
+}
+
+// FileSystem implements Partition.
+func (p *partition) FileSystem() FileSystem {
+	if p.filesystem == nil {
+		return nil
+	}
+	return p.filesystem
+}
+
+// UUID implements Partition.
+func (p *partition) UUID() string {
+	return p.uuid
+}
+
+// UsedFor implements Partition.
+func (p *partition) UsedFor() string {
+	return p.usedFor
+}
+
+// Size implements Partition.
+func (p *partition) Size() uint64 {
+	return p.size
+}
+
+func readPartitions(controllerVersion version.Number, source interface{}) ([]*partition, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range partitionDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, NewUnsupportedVersionError("no partition read func for version %s", controllerVersion)
+	}
+	readFunc := partitionDeserializationFuncs[deserialisationVersion]
+	return readPartitionList(valid, readFunc)
+}
+
+// readPartitionList expects the values of the sourceList to be string maps.
+func readPartitionList(sourceList []interface{}, readFunc partitionDeserializationFunc) ([]*partition, error) {
+	result := make([]*partition, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, NewDeserializationError("unexpected value for partition %d, %T", i, value)
+		}
+		partition, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "partition %d", i)
+		}
+		result = append(result, partition)
+	}
+	return result, nil
+}
+
+type partitionDeserializationFunc func(map[string]interface{}) (*partition, error)
+
+var partitionDeserializationFuncs = map[version.Number]partitionDeserializationFunc{
+	twoDotOh: partition_2_0,
+}
+
+func partition_2_0(source map[string]interface{}) (*partition, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+
+		"id":   schema.ForceInt(),
+		"path": schema.String(),
+		"uuid": schema.OneOf(schema.Nil(""), schema.String()),
+
+		"used_for": schema.String(),
+		"size":     schema.ForceUint(),
+
+		"filesystem": schema.OneOf(schema.Nil(""), schema.StringMap(schema.Any())),
+	}
+	defaults := schema.Defaults{
+		"uuid": "",
+	}
+	checker := schema.FieldMap(fields, defaults)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, WrapWithDeserializationError(err, "partition 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	var filesystem *filesystem
+	if fsSource := valid["filesystem"]; fsSource != nil {
+		filesystem, err = filesystem2_0(fsSource.(map[string]interface{}))
+		if err != nil {
+			return nil, errors.Trace(err)
+		}
+	}
+	uuid, _ := valid["uuid"].(string)
+	result := &partition{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		path:        valid["path"].(string),
+		uuid:        uuid,
+		usedFor:     valid["used_for"].(string),
+		size:        valid["size"].(uint64),
+		filesystem:  filesystem,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/space.go b/automation/vendor/github.com/juju/gomaasapi/space.go
new file mode 100644
index 0000000..5b8b8cf
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/space.go
@@ -0,0 +1,115 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type space struct {
+	// Add the controller in when we need to do things with the space.
+	// controller Controller
+
+	resourceURI string
+
+	id   int
+	name string
+
+	subnets []*subnet
+}
+
+// Id implements Space.
+func (s *space) ID() int {
+	return s.id
+}
+
+// Name implements Space.
+func (s *space) Name() string {
+	return s.name
+}
+
+// Subnets implements Space.
+func (s *space) Subnets() []Subnet {
+	var result []Subnet
+	for _, subnet := range s.subnets {
+		result = append(result, subnet)
+	}
+	return result
+}
+
+func readSpaces(controllerVersion version.Number, source interface{}) ([]*space, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range spaceDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no space read func for version %s", controllerVersion)
+	}
+	readFunc := spaceDeserializationFuncs[deserialisationVersion]
+	return readSpaceList(valid, readFunc)
+}
+
+// readSpaceList expects the values of the sourceList to be string maps.
+func readSpaceList(sourceList []interface{}, readFunc spaceDeserializationFunc) ([]*space, error) {
+	result := make([]*space, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for space %d, %T", i, value)
+		}
+		space, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "space %d", i)
+		}
+		result = append(result, space)
+	}
+	return result, nil
+}
+
+type spaceDeserializationFunc func(map[string]interface{}) (*space, error)
+
+var spaceDeserializationFuncs = map[version.Number]spaceDeserializationFunc{
+	twoDotOh: space_2_0,
+}
+
+func space_2_0(source map[string]interface{}) (*space, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"subnets":      schema.List(schema.StringMap(schema.Any())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "space 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	subnets, err := readSubnetList(valid["subnets"].([]interface{}), subnet_2_0)
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	result := &space{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		subnets:     subnets,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/subnet.go b/automation/vendor/github.com/juju/gomaasapi/subnet.go
new file mode 100644
index 0000000..f509ccd
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/subnet.go
@@ -0,0 +1,152 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type subnet struct {
+	// Add the controller in when we need to do things with the subnet.
+	// controller Controller
+
+	resourceURI string
+
+	id    int
+	name  string
+	space string
+	vlan  *vlan
+
+	gateway string
+	cidr    string
+
+	dnsServers []string
+}
+
+// ID implements Subnet.
+func (s *subnet) ID() int {
+	return s.id
+}
+
+// Name implements Subnet.
+func (s *subnet) Name() string {
+	return s.name
+}
+
+// Space implements Subnet.
+func (s *subnet) Space() string {
+	return s.space
+}
+
+// VLAN implements Subnet.
+func (s *subnet) VLAN() VLAN {
+	if s.vlan == nil {
+		return nil
+	}
+	return s.vlan
+}
+
+// Gateway implements Subnet.
+func (s *subnet) Gateway() string {
+	return s.gateway
+}
+
+// CIDR implements Subnet.
+func (s *subnet) CIDR() string {
+	return s.cidr
+}
+
+// DNSServers implements Subnet.
+func (s *subnet) DNSServers() []string {
+	return s.dnsServers
+}
+
+func readSubnets(controllerVersion version.Number, source interface{}) ([]*subnet, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range subnetDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no subnet read func for version %s", controllerVersion)
+	}
+	readFunc := subnetDeserializationFuncs[deserialisationVersion]
+	return readSubnetList(valid, readFunc)
+}
+
+// readSubnetList expects the values of the sourceList to be string maps.
+func readSubnetList(sourceList []interface{}, readFunc subnetDeserializationFunc) ([]*subnet, error) {
+	result := make([]*subnet, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for subnet %d, %T", i, value)
+		}
+		subnet, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "subnet %d", i)
+		}
+		result = append(result, subnet)
+	}
+	return result, nil
+}
+
+type subnetDeserializationFunc func(map[string]interface{}) (*subnet, error)
+
+var subnetDeserializationFuncs = map[version.Number]subnetDeserializationFunc{
+	twoDotOh: subnet_2_0,
+}
+
+func subnet_2_0(source map[string]interface{}) (*subnet, error) {
+	fields := schema.Fields{
+		"resource_uri": schema.String(),
+		"id":           schema.ForceInt(),
+		"name":         schema.String(),
+		"space":        schema.String(),
+		"gateway_ip":   schema.OneOf(schema.Nil(""), schema.String()),
+		"cidr":         schema.String(),
+		"vlan":         schema.StringMap(schema.Any()),
+		"dns_servers":  schema.OneOf(schema.Nil(""), schema.List(schema.String())),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "subnet 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	vlan, err := vlan_2_0(valid["vlan"].(map[string]interface{}))
+	if err != nil {
+		return nil, errors.Trace(err)
+	}
+
+	// Since the gateway_ip is optional, we use the two part cast assignment. If
+	// the cast fails, then we get the default value we care about, which is the
+	// empty string.
+	gateway, _ := valid["gateway_ip"].(string)
+
+	result := &subnet{
+		resourceURI: valid["resource_uri"].(string),
+		id:          valid["id"].(int),
+		name:        valid["name"].(string),
+		space:       valid["space"].(string),
+		vlan:        vlan,
+		gateway:     gateway,
+		cidr:        valid["cidr"].(string),
+		dnsServers:  convertToStringSlice(valid["dns_servers"]),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testing.go b/automation/vendor/github.com/juju/gomaasapi/testing.go
new file mode 100644
index 0000000..54d67aa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testing.go
@@ -0,0 +1,222 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+	"net/http/httptest"
+	"strings"
+)
+
+type singleServingServer struct {
+	*httptest.Server
+	requestContent *string
+	requestHeader  *http.Header
+}
+
+// newSingleServingServer creates a single-serving test http server which will
+// return only one response as defined by the passed arguments.
+func newSingleServingServer(uri string, response string, code int) *singleServingServer {
+	var requestContent string
+	var requestHeader http.Header
+	var requested bool
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		if requested {
+			http.Error(writer, "Already requested", http.StatusServiceUnavailable)
+		}
+		res, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requestContent = string(res)
+		requestHeader = request.Header
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else {
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, response)
+		}
+		requested = true
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &singleServingServer{server, &requestContent, &requestHeader}
+}
+
+type flakyServer struct {
+	*httptest.Server
+	nbRequests *int
+	requests   *[][]byte
+}
+
+// newFlakyServer creates a "flaky" test http server which will
+// return `nbFlakyResponses` responses with the given code and then a 200 response.
+func newFlakyServer(uri string, code int, nbFlakyResponses int) *flakyServer {
+	nbRequests := 0
+	requests := make([][]byte, nbFlakyResponses+1)
+	handler := func(writer http.ResponseWriter, request *http.Request) {
+		nbRequests += 1
+		body, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+		requests[nbRequests-1] = body
+		if request.URL.String() != uri {
+			errorMsg := fmt.Sprintf("Error 404: page not found (expected '%v', got '%v').", uri, request.URL.String())
+			http.Error(writer, errorMsg, http.StatusNotFound)
+		} else if nbRequests <= nbFlakyResponses {
+			if code == http.StatusServiceUnavailable {
+				writer.Header().Set("Retry-After", "0")
+			}
+			writer.WriteHeader(code)
+			fmt.Fprint(writer, "flaky")
+		} else {
+			writer.WriteHeader(http.StatusOK)
+			fmt.Fprint(writer, "ok")
+		}
+
+	}
+	server := httptest.NewServer(http.HandlerFunc(handler))
+	return &flakyServer{server, &nbRequests, &requests}
+}
+
+type simpleResponse struct {
+	status int
+	body   string
+}
+
+type SimpleTestServer struct {
+	*httptest.Server
+
+	getResponses        map[string][]simpleResponse
+	getResponseIndex    map[string]int
+	putResponses        map[string][]simpleResponse
+	putResponseIndex    map[string]int
+	postResponses       map[string][]simpleResponse
+	postResponseIndex   map[string]int
+	deleteResponses     map[string][]simpleResponse
+	deleteResponseIndex map[string]int
+
+	requests []*http.Request
+}
+
+func NewSimpleServer() *SimpleTestServer {
+	server := &SimpleTestServer{
+		getResponses:        make(map[string][]simpleResponse),
+		getResponseIndex:    make(map[string]int),
+		putResponses:        make(map[string][]simpleResponse),
+		putResponseIndex:    make(map[string]int),
+		postResponses:       make(map[string][]simpleResponse),
+		postResponseIndex:   make(map[string]int),
+		deleteResponses:     make(map[string][]simpleResponse),
+		deleteResponseIndex: make(map[string]int),
+	}
+	server.Server = httptest.NewUnstartedServer(http.HandlerFunc(server.handler))
+	return server
+}
+
+func (s *SimpleTestServer) AddGetResponse(path string, status int, body string) {
+	logger.Debugf("add get response for: %s, %d", path, status)
+	s.getResponses[path] = append(s.getResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPutResponse(path string, status int, body string) {
+	logger.Debugf("add put response for: %s, %d", path, status)
+	s.putResponses[path] = append(s.putResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddPostResponse(path string, status int, body string) {
+	logger.Debugf("add post response for: %s, %d", path, status)
+	s.postResponses[path] = append(s.postResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) AddDeleteResponse(path string, status int, body string) {
+	logger.Debugf("add delete response for: %s, %d", path, status)
+	s.deleteResponses[path] = append(s.deleteResponses[path], simpleResponse{status: status, body: body})
+}
+
+func (s *SimpleTestServer) LastRequest() *http.Request {
+	pos := len(s.requests) - 1
+	if pos < 0 {
+		return nil
+	}
+	return s.requests[pos]
+}
+
+func (s *SimpleTestServer) LastNRequests(n int) []*http.Request {
+	start := len(s.requests) - n
+	if start < 0 {
+		start = 0
+	}
+	return s.requests[start:]
+}
+
+func (s *SimpleTestServer) RequestCount() int {
+	return len(s.requests)
+}
+
+func (s *SimpleTestServer) ResetRequests() {
+	s.requests = nil
+}
+
+func (s *SimpleTestServer) handler(writer http.ResponseWriter, request *http.Request) {
+	method := request.Method
+	var (
+		err           error
+		responses     map[string][]simpleResponse
+		responseIndex map[string]int
+	)
+	switch method {
+	case "GET":
+		responses = s.getResponses
+		responseIndex = s.getResponseIndex
+		_, err = readAndClose(request.Body)
+		if err != nil {
+			panic(err) // it is a test, panic should be fine
+		}
+	case "PUT":
+		responses = s.putResponses
+		responseIndex = s.putResponseIndex
+		err = request.ParseForm()
+		if err != nil {
+			panic(err)
+		}
+	case "POST":
+		responses = s.postResponses
+		responseIndex = s.postResponseIndex
+		contentType := request.Header.Get("Content-Type")
+		if strings.HasPrefix(contentType, "multipart/form-data;") {
+			err = request.ParseMultipartForm(2 << 20)
+		} else {
+			err = request.ParseForm()
+		}
+		if err != nil {
+			panic(err)
+		}
+	case "DELETE":
+		responses = s.deleteResponses
+		responseIndex = s.deleteResponseIndex
+		_, err := readAndClose(request.Body)
+		if err != nil {
+			panic(err)
+		}
+	default:
+		panic("unsupported method " + method)
+	}
+	s.requests = append(s.requests, request)
+	uri := request.URL.String()
+	testResponses, found := responses[uri]
+	if !found {
+		errorMsg := fmt.Sprintf("Error 404: page not found ('%v').", uri)
+		http.Error(writer, errorMsg, http.StatusNotFound)
+	} else {
+		index := responseIndex[uri]
+		response := testResponses[index]
+		responseIndex[uri] = index + 1
+
+		writer.WriteHeader(response.status)
+		fmt.Fprint(writer, response.body)
+	}
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice.go b/automation/vendor/github.com/juju/gomaasapi/testservice.go
new file mode 100644
index 0000000..aa582da
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice.go
@@ -0,0 +1,1672 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bufio"
+	"bytes"
+	"encoding/base64"
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"mime/multipart"
+	"net"
+	"net/http"
+	"net/http/httptest"
+	"net/url"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"text/template"
+	"time"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// TestMAASObject is a fake MAAS server MAASObject.
+type TestMAASObject struct {
+	MAASObject
+	TestServer *TestServer
+}
+
+// checkError is a shorthand helper that panics if err is not nil.
+func checkError(err error) {
+	if err != nil {
+		panic(err)
+	}
+}
+
+// NewTestMAAS returns a TestMAASObject that implements the MAASObject
+// interface and thus can be used as a test object instead of the one returned
+// by gomaasapi.NewMAAS().
+func NewTestMAAS(version string) *TestMAASObject {
+	server := NewTestServer(version)
+	authClient, err := NewAnonymousClient(server.URL, version)
+	checkError(err)
+	maas := NewMAAS(*authClient)
+	return &TestMAASObject{*maas, server}
+}
+
+// Close shuts down the test server.
+func (testMAASObject *TestMAASObject) Close() {
+	testMAASObject.TestServer.Close()
+}
+
+// A TestServer is an HTTP server listening on a system-chosen port on the
+// local loopback interface, which simulates the behavior of a MAAS server.
+// It is intendend for use in end-to-end HTTP tests using the gomaasapi
+// library.
+type TestServer struct {
+	*httptest.Server
+	serveMux   *http.ServeMux
+	client     Client
+	nodes      map[string]MAASObject
+	ownedNodes map[string]bool
+	// mapping system_id -> list of operations performed.
+	nodeOperations map[string][]string
+	// list of operations performed at the /nodes/ level.
+	nodesOperations []string
+	// mapping system_id -> list of Values passed when performing
+	// operations
+	nodeOperationRequestValues map[string][]url.Values
+	// list of Values passed when performing operations at the
+	// /nodes/ level.
+	nodesOperationRequestValues []url.Values
+	nodeMetadata                map[string]Node
+	files                       map[string]MAASObject
+	networks                    map[string]MAASObject
+	networksPerNode             map[string][]string
+	ipAddressesPerNetwork       map[string][]string
+	version                     string
+	macAddressesPerNetwork      map[string]map[string]JSONObject
+	nodeDetails                 map[string]string
+	zones                       map[string]JSONObject
+	// bootImages is a map of nodegroup UUIDs to boot-image objects.
+	bootImages map[string][]JSONObject
+	// nodegroupsInterfaces is a map of nodegroup UUIDs to interface
+	// objects.
+	nodegroupsInterfaces map[string][]JSONObject
+
+	// versionJSON is the response to the /version/ endpoint listing the
+	// capabilities of the MAAS server.
+	versionJSON string
+
+	// devices is a map of device UUIDs to devices.
+	devices map[string]*TestDevice
+
+	subnets        map[uint]TestSubnet
+	subnetNameToID map[string]uint
+	nextSubnet     uint
+	spaces         map[uint]*TestSpace
+	spaceNameToID  map[string]uint
+	nextSpace      uint
+	vlans          map[int]TestVLAN
+	nextVLAN       int
+}
+
+type TestDevice struct {
+	IPAddresses  []string
+	SystemId     string
+	MACAddresses []string
+	Parent       string
+	Hostname     string
+
+	// Not part of the device definition but used by the template.
+	APIVersion string
+}
+
+func getNodesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodes/", version)
+}
+
+func getNodeURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/", version, systemId)
+}
+
+func getNodeURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodes/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getDevicesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/devices/", version)
+}
+
+func getDeviceURL(version, systemId string) string {
+	return fmt.Sprintf("/api/%s/devices/%s/", version, systemId)
+}
+
+func getDeviceURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/devices/([^/]*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getFilesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/files/", version)
+}
+
+func getFileURL(version, filename string) string {
+	// Uses URL object so filename is correctly percent-escaped
+	url := url.URL{}
+	url.Path = fmt.Sprintf("/api/%s/files/%s/", version, filename)
+	return url.String()
+}
+
+func getFileURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/files/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getNetworksEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/networks/", version)
+}
+
+func getNetworkURL(version, name string) string {
+	return fmt.Sprintf("/api/%s/networks/%s/", version, name)
+}
+
+func getNetworkURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/networks/(.*)/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getIPAddressesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/ipaddresses/", version)
+}
+
+func getMACAddressURL(version, systemId, macAddress string) string {
+	return fmt.Sprintf("/api/%s/nodes/%s/macs/%s/", version, systemId, url.QueryEscape(macAddress))
+}
+
+func getVersionURL(version string) string {
+	return fmt.Sprintf("/api/%s/version/", version)
+}
+
+func getNodegroupsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/", version)
+}
+
+func getNodegroupURL(version, uuid string) string {
+	return fmt.Sprintf("/api/%s/nodegroups/%s/", version, uuid)
+}
+
+func getNodegroupsInterfacesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/interfaces/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getBootimagesURLRE(version string) *regexp.Regexp {
+	reString := fmt.Sprintf("^/api/%s/nodegroups/([^/]*)/boot-images/$", regexp.QuoteMeta(version))
+	return regexp.MustCompile(reString)
+}
+
+func getZonesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/zones/", version)
+}
+
+// Clear clears all the fake data stored and recorded by the test server
+// (nodes, recorded operations, etc.).
+func (server *TestServer) Clear() {
+	server.nodes = make(map[string]MAASObject)
+	server.ownedNodes = make(map[string]bool)
+	server.nodesOperations = make([]string, 0)
+	server.nodeOperations = make(map[string][]string)
+	server.nodesOperationRequestValues = make([]url.Values, 0)
+	server.nodeOperationRequestValues = make(map[string][]url.Values)
+	server.nodeMetadata = make(map[string]Node)
+	server.files = make(map[string]MAASObject)
+	server.networks = make(map[string]MAASObject)
+	server.networksPerNode = make(map[string][]string)
+	server.ipAddressesPerNetwork = make(map[string][]string)
+	server.macAddressesPerNetwork = make(map[string]map[string]JSONObject)
+	server.nodeDetails = make(map[string]string)
+	server.bootImages = make(map[string][]JSONObject)
+	server.nodegroupsInterfaces = make(map[string][]JSONObject)
+	server.zones = make(map[string]JSONObject)
+	server.versionJSON = `{"capabilities": ["networks-management","static-ipaddresses","devices-management","network-deployment-ubuntu"]}`
+	server.devices = make(map[string]*TestDevice)
+	server.subnets = make(map[uint]TestSubnet)
+	server.subnetNameToID = make(map[string]uint)
+	server.nextSubnet = 1
+	server.spaces = make(map[uint]*TestSpace)
+	server.spaceNameToID = make(map[string]uint)
+	server.nextSpace = 1
+	server.vlans = make(map[int]TestVLAN)
+	server.nextVLAN = 1
+}
+
+// SetVersionJSON sets the JSON response (capabilities) returned from the
+// /version/ endpoint.
+func (server *TestServer) SetVersionJSON(json string) {
+	server.versionJSON = json
+}
+
+// NodesOperations returns the list of operations performed at the /nodes/
+// level.
+func (server *TestServer) NodesOperations() []string {
+	return server.nodesOperations
+}
+
+// NodeOperations returns the map containing the list of the operations
+// performed for each node.
+func (server *TestServer) NodeOperations() map[string][]string {
+	return server.nodeOperations
+}
+
+// NodesOperationRequestValues returns the list of url.Values extracted
+// from the request used when performing operations at the /nodes/ level.
+func (server *TestServer) NodesOperationRequestValues() []url.Values {
+	return server.nodesOperationRequestValues
+}
+
+// NodeOperationRequestValues returns the map containing the list of the
+// url.Values extracted from the request used when performing operations
+// on nodes.
+func (server *TestServer) NodeOperationRequestValues() map[string][]url.Values {
+	return server.nodeOperationRequestValues
+}
+
+func parseRequestValues(request *http.Request) url.Values {
+	var requestValues url.Values
+	if request.Header.Get("Content-Type") == "application/x-www-form-urlencoded" {
+		if request.PostForm == nil {
+			if err := request.ParseForm(); err != nil {
+				panic(err)
+			}
+		}
+		requestValues = request.PostForm
+	}
+	return requestValues
+}
+
+func (server *TestServer) addNodesOperation(operation string, request *http.Request) url.Values {
+	requestValues := parseRequestValues(request)
+	server.nodesOperations = append(server.nodesOperations, operation)
+	server.nodesOperationRequestValues = append(server.nodesOperationRequestValues, requestValues)
+	return requestValues
+}
+
+func (server *TestServer) addNodeOperation(systemId, operation string, request *http.Request) url.Values {
+	operations, present := server.nodeOperations[systemId]
+	operationRequestValues, present2 := server.nodeOperationRequestValues[systemId]
+	if present != present2 {
+		panic("inconsistent state: nodeOperations and nodeOperationRequestValues don't have the same keys.")
+	}
+	requestValues := parseRequestValues(request)
+	if !present {
+		operations = []string{operation}
+		operationRequestValues = []url.Values{requestValues}
+	} else {
+		operations = append(operations, operation)
+		operationRequestValues = append(operationRequestValues, requestValues)
+	}
+	server.nodeOperations[systemId] = operations
+	server.nodeOperationRequestValues[systemId] = operationRequestValues
+	return requestValues
+}
+
+// NewNode creates a MAAS node.  The provided string should be a valid json
+// string representing a map and contain a string value for the key
+// 'system_id'.  e.g. `{"system_id": "mysystemid"}`.
+// If one of these conditions is not met, NewNode panics.
+func (server *TestServer) NewNode(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	systemIdEntry, hasSystemId := attrs["system_id"]
+	if !hasSystemId {
+		panic("The given map json string does not contain a 'system_id' value.")
+	}
+	systemId := systemIdEntry.(string)
+	attrs[resourceURI] = getNodeURL(server.version, systemId)
+	if _, hasStatus := attrs["status"]; !hasStatus {
+		attrs["status"] = NodeStatusDeployed
+	}
+	obj := newJSONMAASObject(attrs, server.client)
+	server.nodes[systemId] = obj
+	return obj
+}
+
+// Nodes returns a map associating all the nodes' system ids with the nodes'
+// objects.
+func (server *TestServer) Nodes() map[string]MAASObject {
+	return server.nodes
+}
+
+// OwnedNodes returns a map whose keys represent the nodes that are currently
+// allocated.
+func (server *TestServer) OwnedNodes() map[string]bool {
+	return server.ownedNodes
+}
+
+// NewFile creates a file in the test MAAS server.
+func (server *TestServer) NewFile(filename string, filecontent []byte) MAASObject {
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getFileURL(server.version, filename)
+	base64Content := base64.StdEncoding.EncodeToString(filecontent)
+	attrs["content"] = base64Content
+	attrs["filename"] = filename
+
+	// Allocate an arbitrary URL here.  It would be nice if the caller
+	// could do this, but that would change the API and require many
+	// changes.
+	escapedName := url.QueryEscape(filename)
+	attrs["anon_resource_uri"] = "/maas/1.0/files/?op=get_by_key&key=" + escapedName + "_key"
+
+	obj := newJSONMAASObject(attrs, server.client)
+	server.files[filename] = obj
+	return obj
+}
+
+func (server *TestServer) Files() map[string]MAASObject {
+	return server.files
+}
+
+// ChangeNode updates a node with the given key/value.
+func (server *TestServer) ChangeNode(systemId, key, value string) {
+	node, found := server.nodes[systemId]
+	if !found {
+		panic("No node with such 'system_id'.")
+	}
+	node.GetMap()[key] = maasify(server.client, value)
+}
+
+// NewIPAddress creates a new static IP address reservation for the
+// given network/subnet and ipAddress. While networks is being deprecated
+// try the given name as both a netowrk and a subnet.
+func (server *TestServer) NewIPAddress(ipAddress, networkOrSubnet string) {
+	_, foundNetwork := server.networks[networkOrSubnet]
+	subnetID, foundSubnet := server.subnetNameToID[networkOrSubnet]
+
+	if (foundNetwork || foundSubnet) == false {
+		panic("No such network or subnet: " + networkOrSubnet)
+	}
+	if foundNetwork {
+		ips, found := server.ipAddressesPerNetwork[networkOrSubnet]
+		if found {
+			ips = append(ips, ipAddress)
+		} else {
+			ips = []string{ipAddress}
+		}
+		server.ipAddressesPerNetwork[networkOrSubnet] = ips
+	} else {
+		subnet := server.subnets[subnetID]
+		netIp := net.ParseIP(ipAddress)
+		if netIp == nil {
+			panic(ipAddress + " is invalid")
+		}
+		ip := IPFromNetIP(netIp)
+		ip.Purpose = []string{"assigned-ip"}
+		subnet.InUseIPAddresses = append(subnet.InUseIPAddresses, ip)
+		server.subnets[subnetID] = subnet
+	}
+}
+
+// RemoveIPAddress removes the given existing ipAddress and returns
+// whether it was actually removed.
+func (server *TestServer) RemoveIPAddress(ipAddress string) bool {
+	for network, ips := range server.ipAddressesPerNetwork {
+		for i, ip := range ips {
+			if ip == ipAddress {
+				ips = append(ips[:i], ips[i+1:]...)
+				server.ipAddressesPerNetwork[network] = ips
+				return true
+			}
+		}
+	}
+	for _, device := range server.devices {
+		for i, addr := range device.IPAddresses {
+			if addr == ipAddress {
+				device.IPAddresses = append(device.IPAddresses[:i], device.IPAddresses[i+1:]...)
+				return true
+			}
+		}
+	}
+	return false
+}
+
+// IPAddresses returns the map with network names as keys and slices
+// of IP addresses belonging to each network as values.
+func (server *TestServer) IPAddresses() map[string][]string {
+	return server.ipAddressesPerNetwork
+}
+
+// NewNetwork creates a network in the test MAAS server
+func (server *TestServer) NewNetwork(jsonText string) MAASObject {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	nameEntry, hasName := attrs["name"]
+	_, hasIP := attrs["ip"]
+	_, hasNetmask := attrs["netmask"]
+	if !hasName || !hasIP || !hasNetmask {
+		panic("The given map json string does not contain a 'name', 'ip', or 'netmask' value.")
+	}
+	// TODO(gz): Sanity checking done on other fields
+	name := nameEntry.(string)
+	attrs[resourceURI] = getNetworkURL(server.version, name)
+	obj := newJSONMAASObject(attrs, server.client)
+	server.networks[name] = obj
+	return obj
+}
+
+// NewNodegroupInterface adds a nodegroup-interface, for the specified
+// nodegroup,  in the test MAAS server.
+func (server *TestServer) NewNodegroupInterface(uuid, jsonText string) JSONObject {
+	_, ok := server.bootImages[uuid]
+	if !ok {
+		panic("no nodegroup with the given UUID")
+	}
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	requiredMembers := []string{"ip_range_high", "ip_range_low", "broadcast_ip", "static_ip_range_low", "static_ip_range_high", "name", "ip", "subnet_mask", "management", "interface"}
+	for _, member := range requiredMembers {
+		_, hasMember := attrs[member]
+		if !hasMember {
+			panic(fmt.Sprintf("The given map json string does not contain a required %q", member))
+		}
+	}
+	obj := maasify(server.client, attrs)
+	server.nodegroupsInterfaces[uuid] = append(server.nodegroupsInterfaces[uuid], obj)
+	return obj
+}
+
+func (server *TestServer) ConnectNodeToNetwork(systemId, name string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	_, hasNetwork := server.networks[name]
+	if !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, name)
+}
+
+func (server *TestServer) ConnectNodeToNetworkWithMACAddress(systemId, networkName, macAddress string) {
+	node, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	if _, hasNetwork := server.networks[networkName]; !hasNetwork {
+		panic("no network with the given name")
+	}
+	networkNames, _ := server.networksPerNode[systemId]
+	server.networksPerNode[systemId] = append(networkNames, networkName)
+	attrs := make(map[string]interface{})
+	attrs[resourceURI] = getMACAddressURL(server.version, systemId, macAddress)
+	attrs["mac_address"] = macAddress
+	array := []JSONObject{}
+	if set, ok := node.GetMap()["macaddress_set"]; ok {
+		var err error
+		array, err = set.GetArray()
+		if err != nil {
+			panic(err)
+		}
+	}
+	array = append(array, maasify(server.client, attrs))
+	node.GetMap()["macaddress_set"] = JSONObject{value: array, client: server.client}
+	if _, ok := server.macAddressesPerNetwork[networkName]; !ok {
+		server.macAddressesPerNetwork[networkName] = map[string]JSONObject{}
+	}
+	server.macAddressesPerNetwork[networkName][systemId] = maasify(server.client, attrs)
+}
+
+// AddBootImage adds a boot-image object to the specified nodegroup.
+func (server *TestServer) AddBootImage(nodegroupUUID string, jsonText string) {
+	var attrs map[string]interface{}
+	err := json.Unmarshal([]byte(jsonText), &attrs)
+	checkError(err)
+	if _, ok := attrs["architecture"]; !ok {
+		panic("The boot-image json string does not contain an 'architecture' value.")
+	}
+	if _, ok := attrs["release"]; !ok {
+		panic("The boot-image json string does not contain a 'release' value.")
+	}
+	obj := maasify(server.client, attrs)
+	server.bootImages[nodegroupUUID] = append(server.bootImages[nodegroupUUID], obj)
+}
+
+// AddZone adds a physical zone to the server.
+func (server *TestServer) AddZone(name, description string) {
+	attrs := map[string]interface{}{
+		"name":        name,
+		"description": description,
+	}
+	obj := maasify(server.client, attrs)
+	server.zones[name] = obj
+}
+
+func (server *TestServer) AddDevice(device *TestDevice) {
+	server.devices[device.SystemId] = device
+}
+
+func (server *TestServer) Devices() map[string]*TestDevice {
+	return server.devices
+}
+
+// NewTestServer starts and returns a new MAAS test server. The caller should call Close when finished, to shut it down.
+func NewTestServer(version string) *TestServer {
+	server := &TestServer{version: version}
+
+	serveMux := http.NewServeMux()
+	devicesURL := getDevicesEndpoint(server.version)
+	// Register handler for '/api/<version>/devices/*'.
+	serveMux.HandleFunc(devicesURL, func(w http.ResponseWriter, r *http.Request) {
+		devicesHandler(server, w, r)
+	})
+	nodesURL := getNodesEndpoint(server.version)
+	// Register handler for '/api/<version>/nodes/*'.
+	serveMux.HandleFunc(nodesURL, func(w http.ResponseWriter, r *http.Request) {
+		nodesHandler(server, w, r)
+	})
+	filesURL := getFilesEndpoint(server.version)
+	// Register handler for '/api/<version>/files/*'.
+	serveMux.HandleFunc(filesURL, func(w http.ResponseWriter, r *http.Request) {
+		filesHandler(server, w, r)
+	})
+	networksURL := getNetworksEndpoint(server.version)
+	// Register handler for '/api/<version>/networks/'.
+	serveMux.HandleFunc(networksURL, func(w http.ResponseWriter, r *http.Request) {
+		networksHandler(server, w, r)
+	})
+	ipAddressesURL := getIPAddressesEndpoint(server.version)
+	// Register handler for '/api/<version>/ipaddresses/'.
+	serveMux.HandleFunc(ipAddressesURL, func(w http.ResponseWriter, r *http.Request) {
+		ipAddressesHandler(server, w, r)
+	})
+	versionURL := getVersionURL(server.version)
+	// Register handler for '/api/<version>/version/'.
+	serveMux.HandleFunc(versionURL, func(w http.ResponseWriter, r *http.Request) {
+		versionHandler(server, w, r)
+	})
+	// Register handler for '/api/<version>/nodegroups/*'.
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	serveMux.HandleFunc(nodegroupsURL, func(w http.ResponseWriter, r *http.Request) {
+		nodegroupsHandler(server, w, r)
+	})
+
+	// Register handler for '/api/<version>/zones/*'.
+	zonesURL := getZonesEndpoint(server.version)
+	serveMux.HandleFunc(zonesURL, func(w http.ResponseWriter, r *http.Request) {
+		zonesHandler(server, w, r)
+	})
+
+	subnetsURL := getSubnetsEndpoint(server.version)
+	serveMux.HandleFunc(subnetsURL, func(w http.ResponseWriter, r *http.Request) {
+		subnetsHandler(server, w, r)
+	})
+
+	spacesURL := getSpacesEndpoint(server.version)
+	serveMux.HandleFunc(spacesURL, func(w http.ResponseWriter, r *http.Request) {
+		spacesHandler(server, w, r)
+	})
+
+	vlansURL := getVLANsEndpoint(server.version)
+	serveMux.HandleFunc(vlansURL, func(w http.ResponseWriter, r *http.Request) {
+		vlansHandler(server, w, r)
+	})
+
+	var mu sync.Mutex
+	singleFile := func(w http.ResponseWriter, req *http.Request) {
+		mu.Lock()
+		defer mu.Unlock()
+		serveMux.ServeHTTP(w, req)
+	}
+
+	newServer := httptest.NewServer(http.HandlerFunc(singleFile))
+	client, err := NewAnonymousClient(newServer.URL, "1.0")
+	checkError(err)
+	server.Server = newServer
+	server.serveMux = serveMux
+	server.client = *client
+	server.Clear()
+	return server
+}
+
+// devicesHandler handles requests for '/api/<version>/devices/*'.
+func devicesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	deviceURLRE := getDeviceURLRE(server.version)
+	deviceURLMatch := deviceURLRE.FindStringSubmatch(r.URL.Path)
+	devicesURL := getDevicesEndpoint(server.version)
+	switch {
+	case r.URL.Path == devicesURL:
+		devicesTopLevelHandler(server, w, r, op)
+	case deviceURLMatch != nil:
+		// Request for a single device.
+		deviceHandler(server, w, r, deviceURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// devicesTopLevelHandler handles a request for /api/<version>/devices/
+// (with no device id following as part of the path).
+func devicesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Device listing operation.
+		deviceListingHandler(server, w, r)
+	case r.Method == "POST" && op == "new":
+		newDeviceHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+func macMatches(mac string, device *TestDevice) bool {
+	return contains(device.MACAddresses, mac)
+}
+
+// deviceListingHandler handles requests for '/devices/'.
+func deviceListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	// TODO(mfoord): support filtering by hostname and id
+	macs, hasMac := values["mac_address"]
+	var matchedDevices []*TestDevice
+	if !hasMac {
+		for _, device := range server.devices {
+			matchedDevices = append(matchedDevices, device)
+		}
+	} else {
+		for _, mac := range macs {
+			for _, device := range server.devices {
+				if macMatches(mac, device) {
+					matchedDevices = append(matchedDevices, device)
+				}
+			}
+		}
+	}
+	deviceChunks := make([]string, len(matchedDevices))
+	for i := range matchedDevices {
+		deviceChunks[i] = renderDevice(matchedDevices[i])
+	}
+	json := fmt.Sprintf("[%v]", strings.Join(deviceChunks, ", "))
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, json)
+}
+
+var templateFuncs = template.FuncMap{
+	"quotedList": func(items []string) string {
+		var pieces []string
+		for _, item := range items {
+			pieces = append(pieces, fmt.Sprintf("%q", item))
+		}
+		return strings.Join(pieces, ", ")
+	},
+	"last": func(items []string) []string {
+		if len(items) == 0 {
+			return []string{}
+		}
+		return items[len(items)-1:]
+	},
+	"allButLast": func(items []string) []string {
+		if len(items) < 2 {
+			return []string{}
+		}
+		return items[0 : len(items)-1]
+	},
+}
+
+const (
+	// The json template for generating new devices.
+	// TODO(mfoord): set resource_uri in MAC addresses
+	deviceTemplate = `{
+	"macaddress_set": [{{range .MACAddresses | allButLast}}
+	    {
+		"mac_address": "{{.}}"
+	    },{{end}}{{range .MACAddresses | last}}
+	    {
+		"mac_address": "{{.}}"
+	    }{{end}}
+	],
+	"zone": {
+	    "resource_uri": "/MAAS/api/{{.APIVersion}}/zones/default/",
+	    "name": "default",
+	    "description": ""
+	},
+	"parent": "{{.Parent}}",
+	"ip_addresses": [{{.IPAddresses | quotedList }}],
+	"hostname": "{{.Hostname}}",
+	"tag_names": [],
+	"owner": "maas-admin",
+	"system_id": "{{.SystemId}}",
+	"resource_uri": "/MAAS/api/{{.APIVersion}}/devices/{{.SystemId}}/"
+}`
+)
+
+func renderDevice(device *TestDevice) string {
+	t := template.New("Device template")
+	t = t.Funcs(templateFuncs)
+	t, err := t.Parse(deviceTemplate)
+	checkError(err)
+	var buf bytes.Buffer
+	err = t.Execute(&buf, device)
+	checkError(err)
+	return buf.String()
+}
+
+func getValue(values url.Values, value string) (string, bool) {
+	result, hasResult := values[value]
+	if !hasResult || len(result) != 1 || result[0] == "" {
+		return "", false
+	}
+	return result[0], true
+}
+
+func getValues(values url.Values, key string) ([]string, bool) {
+	result, hasResult := values[key]
+	if !hasResult {
+		return nil, false
+	}
+	var output []string
+	for _, val := range result {
+		if val != "" {
+			output = append(output, val)
+		}
+	}
+	if len(output) == 0 {
+		return nil, false
+	}
+	return output, true
+}
+
+// newDeviceHandler creates, stores and returns new devices.
+func newDeviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.PostForm
+
+	// TODO(mfood): generate a "proper" uuid for the system Id.
+	uuid, err := generateNonce()
+	checkError(err)
+	systemId := fmt.Sprintf("node-%v", uuid)
+	// At least one MAC address must be specified.
+	// TODO(mfoord) we only support a single MAC in the test server.
+	macs, hasMacs := getValues(values, "mac_addresses")
+
+	// hostname and parent are optional.
+	// TODO(mfoord): we require both to be set in the test server.
+	hostname, hasHostname := getValue(values, "hostname")
+	parent, hasParent := getValue(values, "parent")
+	if !hasHostname || !hasMacs || !hasParent {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	device := &TestDevice{
+		MACAddresses: macs,
+		APIVersion:   server.version,
+		Parent:       parent,
+		Hostname:     hostname,
+		SystemId:     systemId,
+	}
+
+	deviceJSON := renderDevice(device)
+	server.devices[systemId] = device
+
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, deviceJSON)
+	return
+}
+
+// deviceHandler handles requests for '/api/<version>/devices/<system_id>/'.
+func deviceHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	device, ok := server.devices[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if r.Method == "GET" {
+		deviceJSON := renderDevice(device)
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		if operation == "claim_sticky_ip_address" {
+			err := r.ParseForm()
+			checkError(err)
+			values := r.PostForm
+			// TODO(mfoord): support optional mac_address parameter
+			// TODO(mfoord): requested_address should be optional
+			// and we should generate one if it isn't provided.
+			address, hasAddress := getValue(values, "requested_address")
+			if !hasAddress {
+				w.WriteHeader(http.StatusBadRequest)
+				return
+			}
+			checkError(err)
+			device.IPAddresses = append(device.IPAddresses, address)
+			deviceJSON := renderDevice(device)
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, deviceJSON)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	} else if r.Method == "DELETE" {
+		delete(server.devices, systemId)
+		w.WriteHeader(http.StatusNoContent)
+		return
+
+	}
+
+	// TODO(mfoord): support PUT method for updating device
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// nodesHandler handles requests for '/api/<version>/nodes/*'.
+func nodesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	nodeURLRE := getNodeURLRE(server.version)
+	nodeURLMatch := nodeURLRE.FindStringSubmatch(r.URL.Path)
+	nodesURL := getNodesEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodesURL:
+		nodesTopLevelHandler(server, w, r, op)
+	case nodeURLMatch != nil:
+		// Request for a single node.
+		nodeHandler(server, w, r, nodeURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodeHandler handles requests for '/api/<version>/nodes/<system_id>/'.
+func nodeHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string, operation string) {
+	node, ok := server.nodes[systemId]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	UUID, UUIDError := node.values["system_id"].GetString()
+	if UUIDError == nil {
+		i, err := JSONObjectFromStruct(server.client, server.nodeMetadata[UUID].Interfaces)
+		checkError(err)
+		node.values["interface_set"] = i
+	}
+
+	if r.Method == "GET" {
+		if operation == "" {
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		} else if operation == "details" {
+			nodeDetailsHandler(server, w, r, systemId)
+			return
+		} else {
+			w.WriteHeader(http.StatusBadRequest)
+			return
+		}
+	}
+	if r.Method == "POST" {
+		// The only operations supported are "start", "stop" and "release".
+		if operation == "start" || operation == "stop" || operation == "release" {
+			// Record operation on node.
+			server.addNodeOperation(systemId, operation, r)
+
+			if operation == "release" {
+				delete(server.OwnedNodes(), systemId)
+			}
+
+			w.WriteHeader(http.StatusOK)
+			fmt.Fprint(w, marshalNode(node))
+			return
+		}
+
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	if r.Method == "DELETE" {
+		delete(server.nodes, systemId)
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func contains(slice []string, val string) bool {
+	for _, item := range slice {
+		if item == val {
+			return true
+		}
+	}
+	return false
+}
+
+// nodeListingHandler handles requests for '/nodes/'.
+func nodeListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	ids, hasId := values["id"]
+	var convertedNodes = []map[string]JSONObject{}
+	for systemId, node := range server.nodes {
+		if !hasId || contains(ids, systemId) {
+			convertedNodes = append(convertedNodes, node.GetMap())
+		}
+	}
+	res, err := json.MarshalIndent(convertedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodeDeploymentStatusHandler handles requests for '/nodes/?op=deployment_status'.
+func nodeDeploymentStatusHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	nodes, _ := values["nodes"]
+	var nodeStatus = make(map[string]interface{})
+	for _, systemId := range nodes {
+		node := server.nodes[systemId]
+		field, err := node.GetField("status")
+		if err != nil {
+			continue
+		}
+		switch field {
+		case NodeStatusDeployed:
+			nodeStatus[systemId] = "Deployed"
+		case NodeStatusFailedDeployment:
+			nodeStatus[systemId] = "Failed deployment"
+		default:
+			nodeStatus[systemId] = "Not in Deployment"
+		}
+	}
+	obj := maasify(server.client, nodeStatus)
+	res, err := json.MarshalIndent(obj, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// findFreeNode looks for a node that is currently available, and
+// matches the specified filter.
+func findFreeNode(server *TestServer, filter url.Values) *MAASObject {
+	for systemID, node := range server.Nodes() {
+		_, present := server.OwnedNodes()[systemID]
+		if !present {
+			var agentName, nodeName, zoneName, mem, cpuCores, arch string
+			for k := range filter {
+				switch k {
+				case "agent_name":
+					agentName = filter.Get(k)
+				case "name":
+					nodeName = filter.Get(k)
+				case "zone":
+					zoneName = filter.Get(k)
+				case "mem":
+					mem = filter.Get(k)
+				case "arch":
+					arch = filter.Get(k)
+				case "cpu-cores":
+					cpuCores = filter.Get(k)
+				}
+			}
+			if nodeName != "" && !matchField(node, "hostname", nodeName) {
+				continue
+			}
+			if zoneName != "" && !matchField(node, "zone", zoneName) {
+				continue
+			}
+			if mem != "" && !matchNumericField(node, "memory", mem) {
+				continue
+			}
+			if arch != "" && !matchArchitecture(node, "architecture", arch) {
+				continue
+			}
+			if cpuCores != "" && !matchNumericField(node, "cpu_count", cpuCores) {
+				continue
+			}
+			if agentName != "" {
+				agentNameObj := maasify(server.client, agentName)
+				node.GetMap()["agent_name"] = agentNameObj
+			} else {
+				delete(node.GetMap(), "agent_name")
+			}
+			return &node
+		}
+	}
+	return nil
+}
+
+func matchArchitecture(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	baseArch := strings.Split(field, "/")
+	return v == baseArch[0]
+}
+
+func matchNumericField(node MAASObject, k, v string) bool {
+	field, ok := node.GetMap()[k]
+	if !ok {
+		return false
+	}
+	nodeVal, err := field.GetFloat64()
+	if err != nil {
+		return false
+	}
+	constraintVal, err := strconv.ParseFloat(v, 64)
+	if err != nil {
+		return false
+	}
+	return constraintVal <= nodeVal
+}
+
+func matchField(node MAASObject, k, v string) bool {
+	field, err := node.GetField(k)
+	if err != nil {
+		return false
+	}
+	return field == v
+}
+
+// nodesAcquireHandler simulates acquiring a node.
+func nodesAcquireHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	requestValues := server.addNodesOperation("acquire", r)
+	node := findFreeNode(server, requestValues)
+	if node == nil {
+		w.WriteHeader(http.StatusConflict)
+	} else {
+		systemId, err := node.GetField("system_id")
+		checkError(err)
+		server.OwnedNodes()[systemId] = true
+		res, err := json.MarshalIndent(node, "", "  ")
+		checkError(err)
+		// Record operation.
+		server.addNodeOperation(systemId, "acquire", r)
+		w.WriteHeader(http.StatusOK)
+		fmt.Fprint(w, string(res))
+	}
+}
+
+// nodesReleaseHandler simulates releasing multiple nodes.
+func nodesReleaseHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	server.addNodesOperation("release", r)
+	values := server.NodesOperationRequestValues()
+	systemIds := values[len(values)-1]["nodes"]
+	var unknown []string
+	for _, systemId := range systemIds {
+		if _, ok := server.Nodes()[systemId]; !ok {
+			unknown = append(unknown, systemId)
+		}
+	}
+	if len(unknown) > 0 {
+		w.WriteHeader(http.StatusBadRequest)
+		fmt.Fprintf(w, "Unknown node(s): %s.", strings.Join(unknown, ", "))
+		return
+	}
+	var releasedNodes = []map[string]JSONObject{}
+	for _, systemId := range systemIds {
+		if _, ok := server.OwnedNodes()[systemId]; !ok {
+			continue
+		}
+		delete(server.OwnedNodes(), systemId)
+		node := server.Nodes()[systemId]
+		releasedNodes = append(releasedNodes, node.GetMap())
+	}
+	res, err := json.MarshalIndent(releasedNodes, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodesTopLevelHandler handles a request for /api/<version>/nodes/
+// (with no node id following as part of the path).
+func nodesTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	switch {
+	case r.Method == "GET" && op == "list":
+		// Node listing operation.
+		nodeListingHandler(server, w, r)
+	case r.Method == "GET" && op == "deployment_status":
+		// Node deployment_status operation.
+		nodeDeploymentStatusHandler(server, w, r)
+	case r.Method == "POST" && op == "acquire":
+		nodesAcquireHandler(server, w, r)
+	case r.Method == "POST" && op == "release":
+		nodesReleaseHandler(server, w, r)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// AddNodeDetails stores node details, expected in XML format.
+func (server *TestServer) AddNodeDetails(systemId, xmlText string) {
+	_, hasNode := server.nodes[systemId]
+	if !hasNode {
+		panic("no node with the given system id")
+	}
+	server.nodeDetails[systemId] = xmlText
+}
+
+const lldpXML = `
+<?xml version="1.0" encoding="UTF-8"?>
+<lldp label="LLDP neighbors"/>`
+
+// nodeDetailesHandler handles requests for '/api/<version>/nodes/<system_id>/?op=details'.
+func nodeDetailsHandler(server *TestServer, w http.ResponseWriter, r *http.Request, systemId string) {
+	attrs := make(map[string]interface{})
+	attrs["lldp"] = lldpXML
+	xmlText, _ := server.nodeDetails[systemId]
+	attrs["lshw"] = []byte(xmlText)
+	res, err := bson.Marshal(attrs)
+	checkError(err)
+	w.Header().Set("Content-Type", "application/bson")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// filesHandler handles requests for '/api/<version>/files/*'.
+func filesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	fileURLRE := getFileURLRE(server.version)
+	fileURLMatch := fileURLRE.FindStringSubmatch(r.URL.Path)
+	fileListingURL := getFilesEndpoint(server.version)
+	switch {
+	case r.Method == "GET" && op == "list" && r.URL.Path == fileListingURL:
+		// File listing operation.
+		fileListingHandler(server, w, r)
+	case op == "get" && r.Method == "GET" && r.URL.Path == fileListingURL:
+		getFileHandler(server, w, r)
+	case op == "add" && r.Method == "POST" && r.URL.Path == fileListingURL:
+		addFileHandler(server, w, r)
+	case fileURLMatch != nil:
+		// Request for a single file.
+		fileHandler(server, w, r, fileURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+
+}
+
+// listFilenames returns the names of those uploaded files whose names start
+// with the given prefix, sorted lexicographically.
+func listFilenames(server *TestServer, prefix string) []string {
+	var filenames = make([]string, 0)
+	for filename := range server.files {
+		if strings.HasPrefix(filename, prefix) {
+			filenames = append(filenames, filename)
+		}
+	}
+	sort.Strings(filenames)
+	return filenames
+}
+
+// stripFileContent copies a map of attributes representing an uploaded file,
+// but with the "content" attribute removed.
+func stripContent(original map[string]JSONObject) map[string]JSONObject {
+	newMap := make(map[string]JSONObject, len(original)-1)
+	for key, value := range original {
+		if key != "content" {
+			newMap[key] = value
+		}
+	}
+	return newMap
+}
+
+// fileListingHandler handles requests for '/api/<version>/files/?op=list'.
+func fileListingHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	prefix := values.Get("prefix")
+	filenames := listFilenames(server, prefix)
+
+	// Build a sorted list of the files as map[string]JSONObject objects.
+	convertedFiles := make([]map[string]JSONObject, 0)
+	for _, filename := range filenames {
+		// The "content" attribute is not in the listing.
+		fileMap := stripContent(server.files[filename].GetMap())
+		convertedFiles = append(convertedFiles, fileMap)
+	}
+	res, err := json.MarshalIndent(convertedFiles, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// fileHandler handles requests for '/api/<version>/files/<filename>/'.
+func fileHandler(server *TestServer, w http.ResponseWriter, r *http.Request, filename string, operation string) {
+	switch {
+	case r.Method == "DELETE":
+		delete(server.files, filename)
+		w.WriteHeader(http.StatusOK)
+	case r.Method == "GET":
+		// Retrieve a file's information (including content) as a JSON
+		// object.
+		file, ok := server.files[filename]
+		if !ok {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+		jsonText, err := json.MarshalIndent(file, "", "  ")
+		if err != nil {
+			panic(err)
+		}
+		w.WriteHeader(http.StatusOK)
+		w.Write(jsonText)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// InternalError replies to the request with an HTTP 500 internal error.
+func InternalError(w http.ResponseWriter, r *http.Request, err error) {
+	http.Error(w, err.Error(), http.StatusInternalServerError)
+}
+
+// getFileHandler handles requests for
+// '/api/<version>/files/?op=get&filename=filename'.
+func getFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	filename := values.Get("filename")
+	file, found := server.files[filename]
+	if !found {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	base64Content, err := file.GetField("content")
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	content, err := base64.StdEncoding.DecodeString(base64Content)
+	if err != nil {
+		InternalError(w, r, err)
+		return
+	}
+	w.Write(content)
+}
+
+func readMultipart(upload *multipart.FileHeader) ([]byte, error) {
+	file, err := upload.Open()
+	if err != nil {
+		return nil, err
+	}
+	defer file.Close()
+	reader := bufio.NewReader(file)
+	return ioutil.ReadAll(reader)
+}
+
+// filesHandler handles requests for '/api/<version>/files/?op=add&filename=filename'.
+func addFileHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseMultipartForm(10000000)
+	checkError(err)
+
+	filename := r.Form.Get("filename")
+	if filename == "" {
+		panic("upload has no filename")
+	}
+
+	uploads := r.MultipartForm.File
+	if len(uploads) != 1 {
+		panic("the payload should contain one file and one file only")
+	}
+	var upload *multipart.FileHeader
+	for _, uploadContent := range uploads {
+		upload = uploadContent[0]
+	}
+	content, err := readMultipart(upload)
+	checkError(err)
+	server.NewFile(filename, content)
+	w.WriteHeader(http.StatusOK)
+}
+
+// networkListConnectedMACSHandler handles requests for '/api/<version>/networks/<network>/?op=list_connected_macs'
+func networkListConnectedMACSHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	networkURLRE := getNetworkURLRE(server.version)
+	networkURLREMatch := networkURLRE.FindStringSubmatch(r.URL.Path)
+	if networkURLREMatch == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	networkName := networkURLREMatch[1]
+	convertedMacAddresses := []map[string]JSONObject{}
+	if macAddresses, ok := server.macAddressesPerNetwork[networkName]; ok {
+		for _, macAddress := range macAddresses {
+			m, err := macAddress.GetMap()
+			checkError(err)
+			convertedMacAddresses = append(convertedMacAddresses, m)
+		}
+	}
+	res, err := json.MarshalIndent(convertedMacAddresses, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// networksHandler handles requests for '/api/<version>/networks/?node=system_id'.
+func networksHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only networks GET operation implemented")
+	}
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	systemId := values.Get("node")
+	if op == "list_connected_macs" {
+		networkListConnectedMACSHandler(server, w, r)
+		return
+	}
+	if op != "" {
+		panic("only list_connected_macs and default operations implemented")
+	}
+	if systemId == "" {
+		panic("network missing associated node system id")
+	}
+	networks := []MAASObject{}
+	if networkNames, hasNetworks := server.networksPerNode[systemId]; hasNetworks {
+		networks = make([]MAASObject, len(networkNames))
+		for i, networkName := range networkNames {
+			networks[i] = server.networks[networkName]
+		}
+	}
+	res, err := json.MarshalIndent(networks, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// ipAddressesHandler handles requests for '/api/<version>/ipaddresses/'.
+func ipAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	err := r.ParseForm()
+	checkError(err)
+	values := r.Form
+	op := values.Get("op")
+
+	switch r.Method {
+	case "GET":
+		if op != "" {
+			panic("expected empty op for GET, got " + op)
+		}
+		listIPAddressesHandler(server, w, r)
+		return
+	case "POST":
+		switch op {
+		case "reserve":
+			reserveIPAddressHandler(server, w, r, values.Get("network"), values.Get("requested_address"))
+			return
+		case "release":
+			releaseIPAddressHandler(server, w, r, values.Get("ip"))
+			return
+		default:
+			panic("expected op=release|reserve for POST, got " + op)
+		}
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+func marshalIPAddress(server *TestServer, ipAddress string) (JSONObject, error) {
+	jsonTemplate := `{"alloc_type": 4, "ip": %q, "resource_uri": %q, "created": %q}`
+	uri := getIPAddressesEndpoint(server.version)
+	now := time.Now().UTC().Format(time.RFC3339)
+	bytes := []byte(fmt.Sprintf(jsonTemplate, ipAddress, uri, now))
+	return Parse(server.client, bytes)
+}
+
+func badRequestError(w http.ResponseWriter, err error) {
+	w.WriteHeader(http.StatusBadRequest)
+	fmt.Fprint(w, err.Error())
+}
+
+func listIPAddressesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	results := []MAASObject{}
+	for _, ips := range server.IPAddresses() {
+		for _, ip := range ips {
+			jsonObj, err := marshalIPAddress(server, ip)
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			maasObj, err := jsonObj.GetMAASObject()
+			if err != nil {
+				badRequestError(w, err)
+				return
+			}
+			results = append(results, maasObj)
+		}
+	}
+	res, err := json.MarshalIndent(results, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func reserveIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, network, reqAddress string) {
+	_, ipNet, err := net.ParseCIDR(network)
+	if err != nil {
+		badRequestError(w, fmt.Errorf("Invalid network parameter %s", network))
+		return
+	}
+	if reqAddress != "" {
+		// Validate "requested_address" parameter.
+		reqIP := net.ParseIP(reqAddress)
+		if reqIP == nil {
+			badRequestError(w, fmt.Errorf("failed to detect a valid IP address from u'%s'", reqAddress))
+			return
+		}
+		if !ipNet.Contains(reqIP) {
+			badRequestError(w, fmt.Errorf("%s is not inside the range %s", reqAddress, ipNet.String()))
+			return
+		}
+	}
+	// Find the network name matching the parsed CIDR.
+	foundNetworkName := ""
+	for netName, netObj := range server.networks {
+		// Get the "ip" and "netmask" attributes of the network.
+		netIP, err := netObj.GetField("ip")
+		checkError(err)
+		netMask, err := netObj.GetField("netmask")
+		checkError(err)
+
+		// Convert the netmask string to net.IPMask.
+		parts := strings.Split(netMask, ".")
+		ipMask := make(net.IPMask, len(parts))
+		for i, part := range parts {
+			intPart, err := strconv.Atoi(part)
+			checkError(err)
+			ipMask[i] = byte(intPart)
+		}
+		netNet := &net.IPNet{IP: net.ParseIP(netIP), Mask: ipMask}
+		if netNet.String() == network {
+			// Exact match found.
+			foundNetworkName = netName
+			break
+		}
+	}
+	if foundNetworkName == "" {
+		badRequestError(w, fmt.Errorf("No network found matching %s", network))
+		return
+	}
+	ips, found := server.ipAddressesPerNetwork[foundNetworkName]
+	if !found {
+		// This will be the first address.
+		ips = []string{}
+	}
+	reservedIP := ""
+	if reqAddress != "" {
+		// Use what the user provided. NOTE: Because this is testing
+		// code, no duplicates check is done.
+		reservedIP = reqAddress
+	} else {
+		// Generate an IP in the network range by incrementing the
+		// last byte of the network's IP.
+		firstIP := ipNet.IP
+		firstIP[len(firstIP)-1] += byte(len(ips) + 1)
+		reservedIP = firstIP.String()
+	}
+	ips = append(ips, reservedIP)
+	server.ipAddressesPerNetwork[foundNetworkName] = ips
+	jsonObj, err := marshalIPAddress(server, reservedIP)
+	checkError(err)
+	maasObj, err := jsonObj.GetMAASObject()
+	checkError(err)
+	res, err := json.MarshalIndent(maasObj, "", "  ")
+	checkError(err)
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+func releaseIPAddressHandler(server *TestServer, w http.ResponseWriter, r *http.Request, ip string) {
+	if netIP := net.ParseIP(ip); netIP == nil {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+	if server.RemoveIPAddress(ip) {
+		w.WriteHeader(http.StatusOK)
+		return
+	}
+	http.NotFoundHandler().ServeHTTP(w, r)
+}
+
+// versionHandler handles requests for '/api/<version>/version/'.
+func versionHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		panic("only version GET operation implemented")
+	}
+	w.Header().Set("Content-Type", "application/json; charset=utf-8")
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, server.versionJSON)
+}
+
+// nodegroupsHandler handles requests for '/api/<version>/nodegroups/*'.
+func nodegroupsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	bootimagesURLRE := getBootimagesURLRE(server.version)
+	bootimagesURLMatch := bootimagesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsInterfacesURLRE := getNodegroupsInterfacesURLRE(server.version)
+	nodegroupsInterfacesURLMatch := nodegroupsInterfacesURLRE.FindStringSubmatch(r.URL.Path)
+	nodegroupsURL := getNodegroupsEndpoint(server.version)
+	switch {
+	case r.URL.Path == nodegroupsURL:
+		nodegroupsTopLevelHandler(server, w, r, op)
+	case bootimagesURLMatch != nil:
+		bootimagesHandler(server, w, r, bootimagesURLMatch[1], op)
+	case nodegroupsInterfacesURLMatch != nil:
+		nodegroupsInterfacesHandler(server, w, r, nodegroupsInterfacesURLMatch[1], op)
+	default:
+		// Default handler: not found.
+		http.NotFoundHandler().ServeHTTP(w, r)
+	}
+}
+
+// nodegroupsTopLevelHandler handles requests for '/api/<version>/nodegroups/'.
+func nodegroupsTopLevelHandler(server *TestServer, w http.ResponseWriter, r *http.Request, op string) {
+	if r.Method != "GET" || op != "list" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	nodegroups := []JSONObject{}
+	for uuid := range server.bootImages {
+		attrs := map[string]interface{}{
+			"uuid":      uuid,
+			resourceURI: getNodegroupURL(server.version, uuid),
+		}
+		obj := maasify(server.client, attrs)
+		nodegroups = append(nodegroups, obj)
+	}
+
+	res, err := json.MarshalIndent(nodegroups, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// bootimagesHandler handles requests for '/api/<version>/nodegroups/<uuid>/boot-images/'.
+func bootimagesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	bootImages, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	res, err := json.MarshalIndent(bootImages, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// nodegroupsInterfacesHandler handles requests for '/api/<version>/nodegroups/<uuid>/interfaces/'
+func nodegroupsInterfacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request, nodegroupUUID, op string) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+	_, ok := server.bootImages[nodegroupUUID]
+	if !ok {
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	interfaces, ok := server.nodegroupsInterfaces[nodegroupUUID]
+	if !ok {
+		// we already checked the nodegroup exists, so return an empty list
+		interfaces = []JSONObject{}
+	}
+	res, err := json.MarshalIndent(interfaces, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
+
+// zonesHandler handles requests for '/api/<version>/zones/'.
+func zonesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	if r.Method != "GET" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	if len(server.zones) == 0 {
+		// Until a zone is registered, behave as if the endpoint
+		// does not exist. This way we can simulate older MAAS
+		// servers that do not support zones.
+		http.NotFoundHandler().ServeHTTP(w, r)
+		return
+	}
+
+	zones := make([]JSONObject, 0, len(server.zones))
+	for _, zone := range server.zones {
+		zones = append(zones, zone)
+	}
+	res, err := json.MarshalIndent(zones, "", "  ")
+	checkError(err)
+	w.WriteHeader(http.StatusOK)
+	fmt.Fprint(w, string(res))
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go b/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go
new file mode 100644
index 0000000..c6c1617
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_spaces.go
@@ -0,0 +1,132 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"regexp"
+)
+
+func getSpacesEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/spaces/", version)
+}
+
+// TestSpace is the MAAS API space representation
+type TestSpace struct {
+	Name        string       `json:"name"`
+	Subnets     []TestSubnet `json:"subnets"`
+	ResourceURI string       `json:"resource_uri"`
+	ID          uint         `json:"id"`
+}
+
+// spacesHandler handles requests for '/api/<version>/spaces/'.
+func spacesHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	if op != "" {
+		w.WriteHeader(http.StatusBadRequest)
+		return
+	}
+
+	spacesURLRE := regexp.MustCompile(`/spaces/(.+?)/`)
+	spacesURLMatch := spacesURLRE.FindStringSubmatch(r.URL.Path)
+	spacesURL := getSpacesEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if spacesURLMatch != nil {
+		ID, err = NameOrIDToID(spacesURLMatch[1], server.spaceNameToID, 1, uint(len(server.spaces)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.spaces) == 0 {
+			// Until a space is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support spaces.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == spacesURL {
+			var spaces []*TestSpace
+			// Iterating by id rather than a dictionary iteration
+			// preserves the order of the spaces in the result.
+			for i := uint(1); i < server.nextSpace; i++ {
+				s, ok := server.spaces[i]
+				if ok {
+					server.setSubnetsOnSpace(s)
+					spaces = append(spaces, s)
+				}
+			}
+			err = json.NewEncoder(w).Encode(spaces)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			err = json.NewEncoder(w).Encode(server.spaces[ID])
+		}
+		checkError(err)
+	case "POST":
+		//server.NewSpace(r.Body)
+	case "PUT":
+		//server.UpdateSpace(r.Body)
+	case "DELETE":
+		delete(server.spaces, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+// CreateSpace is used to create new spaces on the server.
+type CreateSpace struct {
+	Name string `json:"name"`
+}
+
+func decodePostedSpace(spaceJSON io.Reader) CreateSpace {
+	var postedSpace CreateSpace
+	decoder := json.NewDecoder(spaceJSON)
+	err := decoder.Decode(&postedSpace)
+	checkError(err)
+	return postedSpace
+}
+
+// NewSpace creates a space in the test server
+func (server *TestServer) NewSpace(spaceJSON io.Reader) *TestSpace {
+	postedSpace := decodePostedSpace(spaceJSON)
+	newSpace := &TestSpace{Name: postedSpace.Name}
+	newSpace.ID = server.nextSpace
+	newSpace.ResourceURI = fmt.Sprintf("/api/%s/spaces/%d/", server.version, int(server.nextSpace))
+	server.spaces[server.nextSpace] = newSpace
+	server.spaceNameToID[newSpace.Name] = newSpace.ID
+
+	server.nextSpace++
+	return newSpace
+}
+
+// setSubnetsOnSpace fetches the subnets for the specified space and adds them
+// to it.
+func (server *TestServer) setSubnetsOnSpace(space *TestSpace) {
+	subnets := []TestSubnet{}
+	for i := uint(1); i < server.nextSubnet; i++ {
+		subnet, ok := server.subnets[i]
+		if ok && subnet.Space == space.Name {
+			subnets = append(subnets, subnet)
+		}
+	}
+	space.Subnets = subnets
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go b/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go
new file mode 100644
index 0000000..5438669
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_subnets.go
@@ -0,0 +1,396 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"net"
+	"net/http"
+	"net/url"
+	"regexp"
+	"sort"
+	"strings"
+)
+
+func getSubnetsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/subnets/", version)
+}
+
+// CreateSubnet is used to receive new subnets via the MAAS API
+type CreateSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	// VLAN this subnet belongs to. Currently ignored.
+	// TODO: Defaults to the default VLAN
+	// for the provided fabric or defaults to the default VLAN
+	// in the default fabric.
+	VLAN *uint `json:"vlan"`
+
+	// Fabric for the subnet. Currently ignored.
+	// TODO: Defaults to the fabric the provided
+	// VLAN belongs to or defaults to the default fabric.
+	Fabric *uint `json:"fabric"`
+
+	// VID of the VLAN this subnet belongs to. Currently ignored.
+	// TODO: Only used when vlan
+	// is not provided. Picks the VLAN with this VID in the provided
+	// fabric or the default fabric if one is not given.
+	VID *uint `json:"vid"`
+
+	// This is used for updates (PUT) and is ignored by create (POST)
+	ID uint `json:"id"`
+}
+
+// TestSubnet is the MAAS API subnet representation
+type TestSubnet struct {
+	DNSServers []string `json:"dns_servers"`
+	Name       string   `json:"name"`
+	Space      string   `json:"space"`
+	VLAN       TestVLAN `json:"vlan"`
+	GatewayIP  string   `json:"gateway_ip"`
+	CIDR       string   `json:"cidr"`
+
+	ResourceURI        string         `json:"resource_uri"`
+	ID                 uint           `json:"id"`
+	InUseIPAddresses   []IP           `json:"-"`
+	FixedAddressRanges []AddressRange `json:"-"`
+}
+
+// AddFixedAddressRange adds an AddressRange to the list of fixed address ranges
+// that subnet stores.
+func (server *TestServer) AddFixedAddressRange(subnetID uint, ar AddressRange) {
+	subnet := server.subnets[subnetID]
+	ar.startUint = IPFromString(ar.Start).UInt64()
+	ar.endUint = IPFromString(ar.End).UInt64()
+	subnet.FixedAddressRanges = append(subnet.FixedAddressRanges, ar)
+	server.subnets[subnetID] = subnet
+}
+
+// subnetsHandler handles requests for '/api/<version>/subnets/'.
+func subnetsHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	var err error
+	values, err := url.ParseQuery(r.URL.RawQuery)
+	checkError(err)
+	op := values.Get("op")
+	includeRangesString := strings.ToLower(values.Get("include_ranges"))
+	subnetsURLRE := regexp.MustCompile(`/subnets/(.+?)/`)
+	subnetsURLMatch := subnetsURLRE.FindStringSubmatch(r.URL.Path)
+	subnetsURL := getSubnetsEndpoint(server.version)
+
+	var ID uint
+	var gotID bool
+	if subnetsURLMatch != nil {
+		ID, err = NameOrIDToID(subnetsURLMatch[1], server.subnetNameToID, 1, uint(len(server.subnets)))
+
+		if err != nil {
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		gotID = true
+	}
+
+	var includeRanges bool
+	switch includeRangesString {
+	case "true", "yes", "1":
+		includeRanges = true
+	}
+
+	switch r.Method {
+	case "GET":
+		w.Header().Set("Content-Type", "application/vnd.api+json")
+		if len(server.subnets) == 0 {
+			// Until a subnet is registered, behave as if the endpoint
+			// does not exist. This way we can simulate older MAAS
+			// servers that do not support subnets.
+			http.NotFoundHandler().ServeHTTP(w, r)
+			return
+		}
+
+		if r.URL.Path == subnetsURL {
+			var subnets []TestSubnet
+			for i := uint(1); i < server.nextSubnet; i++ {
+				s, ok := server.subnets[i]
+				if ok {
+					subnets = append(subnets, s)
+				}
+			}
+			PrettyJsonWriter(subnets, w)
+		} else if gotID == false {
+			w.WriteHeader(http.StatusBadRequest)
+		} else {
+			switch op {
+			case "unreserved_ip_ranges":
+				PrettyJsonWriter(server.subnetUnreservedIPRanges(server.subnets[ID]), w)
+			case "reserved_ip_ranges":
+				PrettyJsonWriter(server.subnetReservedIPRanges(server.subnets[ID]), w)
+			case "statistics":
+				PrettyJsonWriter(server.subnetStatistics(server.subnets[ID], includeRanges), w)
+			default:
+				PrettyJsonWriter(server.subnets[ID], w)
+			}
+		}
+		checkError(err)
+	case "POST":
+		server.NewSubnet(r.Body)
+	case "PUT":
+		server.UpdateSubnet(r.Body)
+	case "DELETE":
+		delete(server.subnets, ID)
+		w.WriteHeader(http.StatusOK)
+	default:
+		w.WriteHeader(http.StatusBadRequest)
+	}
+}
+
+type addressList []IP
+
+func (a addressList) Len() int           { return len(a) }
+func (a addressList) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a addressList) Less(i, j int) bool { return a[i].UInt64() < a[j].UInt64() }
+
+// AddressRange is used to generate reserved IP address range lists
+type AddressRange struct {
+	Start        string `json:"start"`
+	startUint    uint64
+	End          string `json:"end"`
+	endUint      uint64
+	Purpose      []string `json:"purpose,omitempty"`
+	NumAddresses uint     `json:"num_addresses"`
+}
+
+// AddressRangeList is a list of AddressRange
+type AddressRangeList struct {
+	ar []AddressRange
+}
+
+// Append appends a new AddressRange to an AddressRangeList
+func (ranges *AddressRangeList) Append(startIP, endIP IP) {
+	var i AddressRange
+	i.Start, i.End = startIP.String(), endIP.String()
+	i.startUint, i.endUint = startIP.UInt64(), endIP.UInt64()
+	i.NumAddresses = uint(1 + endIP.UInt64() - startIP.UInt64())
+	i.Purpose = startIP.Purpose
+	ranges.ar = append(ranges.ar, i)
+}
+
+func appendRangesToIPList(subnet TestSubnet, ipAddresses *[]IP) {
+	for _, r := range subnet.FixedAddressRanges {
+		for v := r.startUint; v <= r.endUint; v++ {
+			ip := IPFromInt64(v)
+			ip.Purpose = r.Purpose
+			*ipAddresses = append(*ipAddresses, ip)
+		}
+	}
+}
+
+func (server *TestServer) subnetUnreservedIPRanges(subnet TestSubnet) []AddressRange {
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+
+	// We need the first and last address in the subnet
+	var ranges AddressRangeList
+	var startIP, endIP, lastUsableIP IP
+
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+	startIP = IPFromNetIP(ipNet.IP)
+	// Start with the lowest usable address in the range, which is 1 above
+	// what net.ParseCIDR will give back.
+	startIP.SetUInt64(startIP.UInt64() + 1)
+
+	ones, bits := ipNet.Mask.Size()
+	set := ^((^uint64(0)) << uint(bits-ones))
+
+	// The last usable address is one below the broadcast address, which is
+	// what you get by bitwise ORing 'set' with any IP address in the subnet.
+	lastUsableIP.SetUInt64((startIP.UInt64() | set) - 1)
+
+	for _, endIP = range ipAddresses {
+		end := endIP.UInt64()
+
+		if endIP.UInt64() == startIP.UInt64() {
+			if endIP.UInt64() != lastUsableIP.UInt64() {
+				startIP.SetUInt64(end + 1)
+			}
+			continue
+		}
+
+		if end == lastUsableIP.UInt64() {
+			continue
+		}
+
+		ranges.Append(startIP, IPFromInt64(end-1))
+		startIP.SetUInt64(end + 1)
+	}
+
+	if startIP.UInt64() != lastUsableIP.UInt64() {
+		ranges.Append(startIP, lastUsableIP)
+	}
+
+	return ranges.ar
+}
+
+func (server *TestServer) subnetReservedIPRanges(subnet TestSubnet) []AddressRange {
+	var ranges AddressRangeList
+	var startIP, thisIP IP
+
+	// Make a sorted copy of subnet.InUseIPAddresses
+	ipAddresses := make([]IP, len(subnet.InUseIPAddresses))
+	copy(ipAddresses, subnet.InUseIPAddresses)
+	appendRangesToIPList(subnet, &ipAddresses)
+	sort.Sort(addressList(ipAddresses))
+	if len(ipAddresses) == 0 {
+		ar := ranges.ar
+		if ar == nil {
+			ar = []AddressRange{}
+		}
+		return ar
+	}
+
+	startIP = ipAddresses[0]
+	lastIP := ipAddresses[0]
+	for _, thisIP = range ipAddresses {
+		var purposeMissmatch bool
+		for i, p := range thisIP.Purpose {
+			if startIP.Purpose[i] != p {
+				purposeMissmatch = true
+			}
+		}
+		if (thisIP.UInt64() != lastIP.UInt64() && thisIP.UInt64() != lastIP.UInt64()+1) || purposeMissmatch {
+			ranges.Append(startIP, lastIP)
+			startIP = thisIP
+		}
+		lastIP = thisIP
+	}
+
+	if len(ranges.ar) == 0 || ranges.ar[len(ranges.ar)-1].endUint != lastIP.UInt64() {
+		ranges.Append(startIP, lastIP)
+	}
+
+	return ranges.ar
+}
+
+// SubnetStats holds statistics about a subnet
+type SubnetStats struct {
+	NumAvailable     uint           `json:"num_available"`
+	LargestAvailable uint           `json:"largest_available"`
+	NumUnavailable   uint           `json:"num_unavailable"`
+	TotalAddresses   uint           `json:"total_addresses"`
+	Usage            float32        `json:"usage"`
+	UsageString      string         `json:"usage_string"`
+	Ranges           []AddressRange `json:"ranges"`
+}
+
+func (server *TestServer) subnetStatistics(subnet TestSubnet, includeRanges bool) SubnetStats {
+	var stats SubnetStats
+	_, ipNet, err := net.ParseCIDR(subnet.CIDR)
+	checkError(err)
+
+	ones, bits := ipNet.Mask.Size()
+	stats.TotalAddresses = (1 << uint(bits-ones)) - 2
+	stats.NumUnavailable = uint(len(subnet.InUseIPAddresses))
+	stats.NumAvailable = stats.TotalAddresses - stats.NumUnavailable
+	stats.Usage = float32(stats.NumUnavailable) / float32(stats.TotalAddresses)
+	stats.UsageString = fmt.Sprintf("%0.1f%%", stats.Usage*100)
+
+	// Calculate stats.LargestAvailable - the largest contiguous block of IP addresses available
+	reserved := server.subnetUnreservedIPRanges(subnet)
+	for _, addressRange := range reserved {
+		if addressRange.NumAddresses > stats.LargestAvailable {
+			stats.LargestAvailable = addressRange.NumAddresses
+		}
+	}
+
+	if includeRanges {
+		stats.Ranges = reserved
+	}
+
+	return stats
+}
+
+func decodePostedSubnet(subnetJSON io.Reader) CreateSubnet {
+	var postedSubnet CreateSubnet
+	decoder := json.NewDecoder(subnetJSON)
+	err := decoder.Decode(&postedSubnet)
+	checkError(err)
+	if postedSubnet.DNSServers == nil {
+		postedSubnet.DNSServers = []string{}
+	}
+	return postedSubnet
+}
+
+// UpdateSubnet creates a subnet in the test server
+func (server *TestServer) UpdateSubnet(subnetJSON io.Reader) TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	updatedSubnet := subnetFromCreateSubnet(postedSubnet)
+	server.subnets[updatedSubnet.ID] = updatedSubnet
+	return updatedSubnet
+}
+
+// NewSubnet creates a subnet in the test server
+func (server *TestServer) NewSubnet(subnetJSON io.Reader) *TestSubnet {
+	postedSubnet := decodePostedSubnet(subnetJSON)
+	newSubnet := subnetFromCreateSubnet(postedSubnet)
+	newSubnet.ID = server.nextSubnet
+	server.subnets[server.nextSubnet] = newSubnet
+	server.subnetNameToID[newSubnet.Name] = newSubnet.ID
+
+	server.nextSubnet++
+	return &newSubnet
+}
+
+// NodeNetworkInterface represents a network interface attached to a node
+type NodeNetworkInterface struct {
+	Name  string        `json:"name"`
+	Links []NetworkLink `json:"links"`
+}
+
+// Node represents a node
+type Node struct {
+	SystemID   string                 `json:"system_id"`
+	Interfaces []NodeNetworkInterface `json:"interface_set"`
+}
+
+// NetworkLink represents a MAAS network link
+type NetworkLink struct {
+	ID     uint        `json:"id"`
+	Mode   string      `json:"mode"`
+	Subnet *TestSubnet `json:"subnet"`
+}
+
+// SetNodeNetworkLink records that the given node + interface are in subnet
+func (server *TestServer) SetNodeNetworkLink(SystemID string, nodeNetworkInterface NodeNetworkInterface) {
+	for i, ni := range server.nodeMetadata[SystemID].Interfaces {
+		if ni.Name == nodeNetworkInterface.Name {
+			server.nodeMetadata[SystemID].Interfaces[i] = nodeNetworkInterface
+			return
+		}
+	}
+	n := server.nodeMetadata[SystemID]
+	n.Interfaces = append(n.Interfaces, nodeNetworkInterface)
+	server.nodeMetadata[SystemID] = n
+}
+
+// subnetFromCreateSubnet creates a subnet in the test server
+func subnetFromCreateSubnet(postedSubnet CreateSubnet) TestSubnet {
+	var newSubnet TestSubnet
+	newSubnet.DNSServers = postedSubnet.DNSServers
+	newSubnet.Name = postedSubnet.Name
+	newSubnet.Space = postedSubnet.Space
+	//TODO: newSubnet.VLAN = server.postedSubnetVLAN
+	newSubnet.GatewayIP = postedSubnet.GatewayIP
+	newSubnet.CIDR = postedSubnet.CIDR
+	newSubnet.ID = postedSubnet.ID
+	return newSubnet
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go b/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go
new file mode 100644
index 0000000..8f941f1
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_utils.go
@@ -0,0 +1,119 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"bytes"
+	"encoding/binary"
+	"encoding/json"
+	"errors"
+	"net"
+	"net/http"
+	"strconv"
+)
+
+// NameOrIDToID takes a string that contains eiter an integer ID or the
+// name of a thing. It returns the integer ID contained or mapped to or panics.
+func NameOrIDToID(v string, nameToID map[string]uint, minID, maxID uint) (ID uint, err error) {
+	ID, ok := nameToID[v]
+	if !ok {
+		intID, err := strconv.Atoi(v)
+		if err != nil {
+			return 0, err
+		}
+		ID = uint(intID)
+	}
+
+	if ID < minID || ID > maxID {
+		return 0, errors.New("ID out of range")
+	}
+
+	return ID, nil
+}
+
+// IP is an enhanced net.IP
+type IP struct {
+	netIP   net.IP
+	Purpose []string
+}
+
+// IPFromNetIP creates a IP from a net.IP.
+func IPFromNetIP(netIP net.IP) IP {
+	var ip IP
+	ip.netIP = netIP
+	return ip
+}
+
+// IPFromString creates a new IP from a string IP address representation
+func IPFromString(v string) IP {
+	return IPFromNetIP(net.ParseIP(v))
+}
+
+// IPFromInt64 creates a new IP from a uint64 IP address representation
+func IPFromInt64(v uint64) IP {
+	var ip IP
+	ip.SetUInt64(v)
+	return ip
+}
+
+// To4 converts the IPv4 address ip to a 4-byte representation. If ip is not
+// an IPv4 address, To4 returns nil.
+func (ip IP) To4() net.IP {
+	return ip.netIP.To4()
+}
+
+// To16 converts the IP address ip to a 16-byte representation. If ip is not
+// an IP address (it is the wrong length), To16 returns nil.
+func (ip IP) To16() net.IP {
+	return ip.netIP.To16()
+}
+
+func (ip IP) String() string {
+	return ip.netIP.String()
+}
+
+// UInt64 returns a uint64 holding the IP address
+func (ip IP) UInt64() uint64 {
+	if len(ip.netIP) == 0 {
+		return uint64(0)
+	}
+
+	if ip.To4() != nil {
+		return uint64(binary.BigEndian.Uint32([]byte(ip.To4())))
+	}
+
+	return binary.BigEndian.Uint64([]byte(ip.To16()))
+}
+
+// SetUInt64 sets the IP value to v
+func (ip *IP) SetUInt64(v uint64) {
+	if len(ip.netIP) == 0 {
+		// If we don't have allocated storage make an educated guess
+		// at if the address we received is an IPv4 or IPv6 address.
+		if v == (v & 0x00000000ffffFFFF) {
+			// Guessing IPv4
+			ip.netIP = net.ParseIP("0.0.0.0")
+		} else {
+			ip.netIP = net.ParseIP("2001:4860:0:2001::68")
+		}
+	}
+
+	bb := new(bytes.Buffer)
+	var first int
+	if ip.To4() != nil {
+		binary.Write(bb, binary.BigEndian, uint32(v))
+		first = len(ip.netIP) - 4
+	} else {
+		binary.Write(bb, binary.BigEndian, v)
+	}
+	copy(ip.netIP[first:], bb.Bytes())
+}
+
+func PrettyJsonWriter(thing interface{}, w http.ResponseWriter) {
+	var out bytes.Buffer
+	b, err := json.MarshalIndent(thing, "", "  ")
+	checkError(err)
+	out.Write(b)
+	out.WriteTo(w)
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go b/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go
new file mode 100644
index 0000000..e81eaaa
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/testservice_vlan.go
@@ -0,0 +1,33 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/http"
+)
+
+func getVLANsEndpoint(version string) string {
+	return fmt.Sprintf("/api/%s/vlans/", version)
+}
+
+// TestVLAN is the MAAS API VLAN representation
+type TestVLAN struct {
+	Name   string `json:"name"`
+	Fabric string `json:"fabric"`
+	VID    uint   `json:"vid"`
+
+	ResourceURI string `json:"resource_uri"`
+	ID          uint   `json:"id"`
+}
+
+// PostedVLAN is the MAAS API posted VLAN representation
+type PostedVLAN struct {
+	Name string `json:"name"`
+	VID  uint   `json:"vid"`
+}
+
+func vlansHandler(server *TestServer, w http.ResponseWriter, r *http.Request) {
+	//TODO
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/urlparams.go b/automation/vendor/github.com/juju/gomaasapi/urlparams.go
new file mode 100644
index 0000000..a6bab6e
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/urlparams.go
@@ -0,0 +1,48 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"fmt"
+	"net/url"
+)
+
+// URLParams wraps url.Values to easily add values, but skipping empty ones.
+type URLParams struct {
+	Values url.Values
+}
+
+// NewURLParams allocates a new URLParams type.
+func NewURLParams() *URLParams {
+	return &URLParams{Values: make(url.Values)}
+}
+
+// MaybeAdd adds the (name, value) pair iff value is not empty.
+func (p *URLParams) MaybeAdd(name, value string) {
+	if value != "" {
+		p.Values.Add(name, value)
+	}
+}
+
+// MaybeAddInt adds the (name, value) pair iff value is not zero.
+func (p *URLParams) MaybeAddInt(name string, value int) {
+	if value != 0 {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddBool adds the (name, value) pair iff value is true.
+func (p *URLParams) MaybeAddBool(name string, value bool) {
+	if value {
+		p.Values.Add(name, fmt.Sprint(value))
+	}
+}
+
+// MaybeAddMany adds the (name, value) for each value in values iff
+// value is not empty.
+func (p *URLParams) MaybeAddMany(name string, values []string) {
+	for _, value := range values {
+		p.MaybeAdd(name, value)
+	}
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/util.go b/automation/vendor/github.com/juju/gomaasapi/util.go
new file mode 100644
index 0000000..3f95ac9
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/util.go
@@ -0,0 +1,27 @@
+// Copyright 2012-2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"strings"
+)
+
+// JoinURLs joins a base URL and a subpath together.
+// Regardless of whether baseURL ends in a trailing slash (or even multiple
+// trailing slashes), or whether there are any leading slashes at the begining
+// of path, the two will always be joined together by a single slash.
+func JoinURLs(baseURL, path string) string {
+	return strings.TrimRight(baseURL, "/") + "/" + strings.TrimLeft(path, "/")
+}
+
+// EnsureTrailingSlash appends a slash at the end of the given string unless
+// there already is one.
+// This is used to create the kind of normalized URLs that Django expects.
+// (to avoid Django's redirection when an URL does not ends with a slash.)
+func EnsureTrailingSlash(URL string) string {
+	if strings.HasSuffix(URL, "/") {
+		return URL
+	}
+	return URL + "/"
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/vlan.go b/automation/vendor/github.com/juju/gomaasapi/vlan.go
new file mode 100644
index 0000000..c509d42
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/vlan.go
@@ -0,0 +1,154 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type vlan struct {
+	// Add the controller in when we need to do things with the vlan.
+	// controller Controller
+
+	resourceURI string
+
+	id     int
+	name   string
+	fabric string
+
+	vid  int
+	mtu  int
+	dhcp bool
+
+	primaryRack   string
+	secondaryRack string
+}
+
+// ID implements VLAN.
+func (v *vlan) ID() int {
+	return v.id
+}
+
+// Name implements VLAN.
+func (v *vlan) Name() string {
+	return v.name
+}
+
+// Fabric implements VLAN.
+func (v *vlan) Fabric() string {
+	return v.fabric
+}
+
+// VID implements VLAN.
+func (v *vlan) VID() int {
+	return v.vid
+}
+
+// MTU implements VLAN.
+func (v *vlan) MTU() int {
+	return v.mtu
+}
+
+// DHCP implements VLAN.
+func (v *vlan) DHCP() bool {
+	return v.dhcp
+}
+
+// PrimaryRack implements VLAN.
+func (v *vlan) PrimaryRack() string {
+	return v.primaryRack
+}
+
+// SecondaryRack implements VLAN.
+func (v *vlan) SecondaryRack() string {
+	return v.secondaryRack
+}
+
+func readVLANs(controllerVersion version.Number, source interface{}) ([]*vlan, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range vlanDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no vlan read func for version %s", controllerVersion)
+	}
+	readFunc := vlanDeserializationFuncs[deserialisationVersion]
+	return readVLANList(valid, readFunc)
+}
+
+func readVLANList(sourceList []interface{}, readFunc vlanDeserializationFunc) ([]*vlan, error) {
+	result := make([]*vlan, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for vlan %d, %T", i, value)
+		}
+		vlan, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "vlan %d", i)
+		}
+		result = append(result, vlan)
+	}
+	return result, nil
+}
+
+type vlanDeserializationFunc func(map[string]interface{}) (*vlan, error)
+
+var vlanDeserializationFuncs = map[version.Number]vlanDeserializationFunc{
+	twoDotOh: vlan_2_0,
+}
+
+func vlan_2_0(source map[string]interface{}) (*vlan, error) {
+	fields := schema.Fields{
+		"id":           schema.ForceInt(),
+		"resource_uri": schema.String(),
+		"name":         schema.OneOf(schema.Nil(""), schema.String()),
+		"fabric":       schema.String(),
+		"vid":          schema.ForceInt(),
+		"mtu":          schema.ForceInt(),
+		"dhcp_on":      schema.Bool(),
+		// racks are not always set.
+		"primary_rack":   schema.OneOf(schema.Nil(""), schema.String()),
+		"secondary_rack": schema.OneOf(schema.Nil(""), schema.String()),
+	}
+	checker := schema.FieldMap(fields, nil)
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "vlan 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	// Since the primary and secondary racks are optional, we use the two
+	// part cast assignment. If the case fails, then we get the default value
+	// we care about, which is the empty string.
+	primary_rack, _ := valid["primary_rack"].(string)
+	secondary_rack, _ := valid["secondary_rack"].(string)
+	name, _ := valid["name"].(string)
+
+	result := &vlan{
+		resourceURI:   valid["resource_uri"].(string),
+		id:            valid["id"].(int),
+		name:          name,
+		fabric:        valid["fabric"].(string),
+		vid:           valid["vid"].(int),
+		mtu:           valid["mtu"].(int),
+		dhcp:          valid["dhcp_on"].(bool),
+		primaryRack:   primary_rack,
+		secondaryRack: secondary_rack,
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/gomaasapi/zone.go b/automation/vendor/github.com/juju/gomaasapi/zone.go
new file mode 100644
index 0000000..6f10cb4
--- /dev/null
+++ b/automation/vendor/github.com/juju/gomaasapi/zone.go
@@ -0,0 +1,97 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package gomaasapi
+
+import (
+	"github.com/juju/errors"
+	"github.com/juju/schema"
+	"github.com/juju/version"
+)
+
+type zone struct {
+	// Add the controller in when we need to do things with the zone.
+	// controller Controller
+
+	resourceURI string
+
+	name        string
+	description string
+}
+
+// Name implements Zone.
+func (z *zone) Name() string {
+	return z.name
+}
+
+// Description implements Zone.
+func (z *zone) Description() string {
+	return z.description
+}
+
+func readZones(controllerVersion version.Number, source interface{}) ([]*zone, error) {
+	checker := schema.List(schema.StringMap(schema.Any()))
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone base schema check failed")
+	}
+	valid := coerced.([]interface{})
+
+	var deserialisationVersion version.Number
+	for v := range zoneDeserializationFuncs {
+		if v.Compare(deserialisationVersion) > 0 && v.Compare(controllerVersion) <= 0 {
+			deserialisationVersion = v
+		}
+	}
+	if deserialisationVersion == version.Zero {
+		return nil, errors.Errorf("no zone read func for version %s", controllerVersion)
+	}
+	readFunc := zoneDeserializationFuncs[deserialisationVersion]
+	return readZoneList(valid, readFunc)
+}
+
+// readZoneList expects the values of the sourceList to be string maps.
+func readZoneList(sourceList []interface{}, readFunc zoneDeserializationFunc) ([]*zone, error) {
+	result := make([]*zone, 0, len(sourceList))
+	for i, value := range sourceList {
+		source, ok := value.(map[string]interface{})
+		if !ok {
+			return nil, errors.Errorf("unexpected value for zone %d, %T", i, value)
+		}
+		zone, err := readFunc(source)
+		if err != nil {
+			return nil, errors.Annotatef(err, "zone %d", i)
+		}
+		result = append(result, zone)
+	}
+	return result, nil
+}
+
+type zoneDeserializationFunc func(map[string]interface{}) (*zone, error)
+
+var zoneDeserializationFuncs = map[version.Number]zoneDeserializationFunc{
+	twoDotOh: zone_2_0,
+}
+
+func zone_2_0(source map[string]interface{}) (*zone, error) {
+	fields := schema.Fields{
+		"name":         schema.String(),
+		"description":  schema.String(),
+		"resource_uri": schema.String(),
+	}
+	checker := schema.FieldMap(fields, nil) // no defaults
+	coerced, err := checker.Coerce(source, nil)
+	if err != nil {
+		return nil, errors.Annotatef(err, "zone 2.0 schema check failed")
+	}
+	valid := coerced.(map[string]interface{})
+	// From here we know that the map returned from the schema coercion
+	// contains fields of the right type.
+
+	result := &zone{
+		name:        valid["name"].(string),
+		description: valid["description"].(string),
+		resourceURI: valid["resource_uri"].(string),
+	}
+	return result, nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/LICENSE b/automation/vendor/github.com/juju/loggo/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/loggo/Makefile b/automation/vendor/github.com/juju/loggo/Makefile
new file mode 100644
index 0000000..89afa49
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/Makefile
@@ -0,0 +1,11 @@
+default: check
+
+check:
+	go test
+
+docs:
+	godoc2md github.com/juju/loggo > README.md
+	sed -i 's|\[godoc-link-here\]|[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)|' README.md 
+
+
+.PHONY: default check docs
diff --git a/automation/vendor/github.com/juju/loggo/README.md b/automation/vendor/github.com/juju/loggo/README.md
new file mode 100644
index 0000000..a73a9db
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/README.md
@@ -0,0 +1,711 @@
+
+# loggo
+    import "github.com/juju/loggo"
+
+[![GoDoc](https://godoc.org/github.com/juju/loggo?status.svg)](https://godoc.org/github.com/juju/loggo)
+
+### Module level logging for Go
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+
+
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+
+
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+
+
+
+## Constants
+``` go
+const DefaultWriterName = "default"
+```
+DefaultWriterName is the name of the default writer for
+a Context.
+
+
+## Variables
+``` go
+var (
+    // SeverityColor defines the colors for the levels output by the ColorWriter.
+    SeverityColor = map[Level]*ansiterm.Context{
+        TRACE:   ansiterm.Foreground(ansiterm.Default),
+        DEBUG:   ansiterm.Foreground(ansiterm.Green),
+        INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+        WARNING: ansiterm.Foreground(ansiterm.Yellow),
+        ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+        CRITICAL: &ansiterm.Context{
+            Foreground: ansiterm.White,
+            Background: ansiterm.Red,
+        },
+    }
+    // LocationColor defines the colors for the location output by the ColorWriter.
+    LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+```
+``` go
+var TimeFormat = initTimeFormat()
+```
+TimeFormat is the time format used for the default writer.
+This can be set with the environment variable LOGGO_TIME_FORMAT.
+
+
+## func ConfigureLoggers
+``` go
+func ConfigureLoggers(specification string) error
+```
+ConfigureLoggers configures loggers according to the given string
+specification, which specifies a set of modules and their associated
+logging levels.  Loggers are colon- or semicolon-separated; each
+module is specified as <modulename>=<level>.  White space outside of
+module names and levels is ignored.  The root module is specified
+with the name "<root>".
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+## func DefaultFormatter
+``` go
+func DefaultFormatter(entry Entry) string
+```
+DefaultFormatter returns the parameters separated by spaces except for
+filename and line which are separated by a colon.  The timestamp is shown
+to second resolution in UTC. For example:
+
+
+	2016-07-02 15:04:05
+
+
+## func LoggerInfo
+``` go
+func LoggerInfo() string
+```
+LoggerInfo returns information about the configured loggers and their
+logging levels. The information is returned in the format expected by
+ConfigureLoggers. Loggers with UNSPECIFIED level will not
+be included.
+
+
+## func RegisterWriter
+``` go
+func RegisterWriter(name string, writer Writer) error
+```
+RegisterWriter adds the writer to the list of writers in the DefaultContext
+that get notified when logging.  If there is already a registered writer
+with that name, an error is returned.
+
+
+## func ResetLogging
+``` go
+func ResetLogging()
+```
+ResetLogging iterates through the known modules and sets the levels of all
+to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+removes all writers in the DefaultContext and puts the original default
+writer back as the only writer.
+
+
+## func ResetWriters
+``` go
+func ResetWriters()
+```
+ResetWriters puts the list of writers back into the initial state.
+
+
+
+## type Config
+``` go
+type Config map[string]Level
+```
+Config is a mapping of logger module names to logging severity levels.
+
+
+
+
+
+
+
+
+
+### func ParseConfigString
+``` go
+func ParseConfigString(specification string) (Config, error)
+```
+ParseConfigString parses a logger configuration string into a map of logger
+names and their associated log level. This method is provided to allow
+other programs to pre-validate a configuration string rather than just
+calling ConfigureLoggers.
+
+Logging modules are colon- or semicolon-separated; each module is specified
+as <modulename>=<level>.  White space outside of module names and levels is
+ignored.  The root module is specified with the name "<root>".
+
+As a special case, a log level may be specified on its own.
+This is equivalent to specifying the level of the root module,
+so "DEBUG" is equivalent to `<root>=DEBUG`
+
+An example specification:
+
+
+	`<root>=ERROR; foo.bar=WARNING`
+
+
+
+
+### func (Config) String
+``` go
+func (c Config) String() string
+```
+String returns a logger configuration string that may be parsed
+using ParseConfigurationString.
+
+
+
+## type Context
+``` go
+type Context struct {
+    // contains filtered or unexported fields
+}
+```
+Context produces loggers for a hierarchy of modules. The context holds
+a collection of hierarchical loggers and their writers.
+
+
+
+
+
+
+
+
+
+### func DefaultContext
+``` go
+func DefaultContext() *Context
+```
+DefaultContext returns the global default logging context.
+
+
+### func NewContext
+``` go
+func NewContext(rootLevel Level) *Context
+```
+NewLoggers returns a new Context with no writers set.
+If the root level is UNSPECIFIED, WARNING is used.
+
+
+
+
+### func (\*Context) AddWriter
+``` go
+func (c *Context) AddWriter(name string, writer Writer) error
+```
+AddWriter adds a writer to the list to be called for each logging call.
+The name cannot be empty, and the writer cannot be nil. If an existing
+writer exists with the specified name, an error is returned.
+
+
+
+### func (\*Context) ApplyConfig
+``` go
+func (c *Context) ApplyConfig(config Config)
+```
+ApplyConfig configures the logging modules according to the provided config.
+
+
+
+### func (\*Context) CompleteConfig
+``` go
+func (c *Context) CompleteConfig() Config
+```
+CompleteConfig returns all the loggers and their defined levels,
+even if that level is UNSPECIFIED.
+
+
+
+### func (\*Context) Config
+``` go
+func (c *Context) Config() Config
+```
+Config returns the current configuration of the Loggers. Loggers
+with UNSPECIFIED level will not be included.
+
+
+
+### func (\*Context) GetLogger
+``` go
+func (c *Context) GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name, creating it and
+its parents if necessary.
+
+
+
+### func (\*Context) RemoveWriter
+``` go
+func (c *Context) RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter remotes the specified writer. If a writer is not found with
+the specified name an error is returned. The writer that was removed is also
+returned.
+
+
+
+### func (\*Context) ReplaceWriter
+``` go
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error)
+```
+ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+followed by AddWriter with the same name. The replaced writer is returned.
+
+
+
+### func (\*Context) ResetLoggerLevels
+``` go
+func (c *Context) ResetLoggerLevels()
+```
+ResetLoggerLevels iterates through the known logging modules and sets the
+levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+
+
+
+### func (\*Context) ResetWriters
+``` go
+func (c *Context) ResetWriters()
+```
+ResetWriters is generally only used in testing and removes all the writers.
+
+
+
+## type Entry
+``` go
+type Entry struct {
+    // Level is the severity of the log message.
+    Level Level
+    // Module is the dotted module name from the logger.
+    Module string
+    // Filename is the full path the file that logged the message.
+    Filename string
+    // Line is the line number of the Filename.
+    Line int
+    // Timestamp is when the log message was created
+    Timestamp time.Time
+    // Message is the formatted string from teh log call.
+    Message string
+}
+```
+Entry represents a single log message.
+
+
+
+
+
+
+
+
+
+
+
+## type Level
+``` go
+type Level uint32
+```
+Level holds a severity level.
+
+
+
+``` go
+const (
+    UNSPECIFIED Level = iota
+    TRACE
+    DEBUG
+    INFO
+    WARNING
+    ERROR
+    CRITICAL
+)
+```
+The severity levels. Higher values are more considered more
+important.
+
+
+
+
+
+
+
+### func ParseLevel
+``` go
+func ParseLevel(level string) (Level, bool)
+```
+ParseLevel converts a string representation of a logging level to a
+Level. It returns the level and whether it was valid or not.
+
+
+
+
+### func (Level) Short
+``` go
+func (level Level) Short() string
+```
+Short returns a five character string to use in
+aligned logging output.
+
+
+
+### func (Level) String
+``` go
+func (level Level) String() string
+```
+String implements Stringer.
+
+
+
+## type Logger
+``` go
+type Logger struct {
+    // contains filtered or unexported fields
+}
+```
+A Logger represents a logging module. It has an associated logging
+level which can be changed; messages of lesser severity will
+be dropped. Loggers have a hierarchical relationship - see
+the package documentation.
+
+The zero Logger value is usable - any messages logged
+to it will be sent to the root Logger.
+
+
+
+
+
+
+
+
+
+### func GetLogger
+``` go
+func GetLogger(name string) Logger
+```
+GetLogger returns a Logger for the given module name,
+creating it and its parents if necessary.
+
+
+
+
+### func (Logger) Criticalf
+``` go
+func (logger Logger) Criticalf(message string, args ...interface{})
+```
+Criticalf logs the printf-formatted message at critical level.
+
+
+
+### func (Logger) Debugf
+``` go
+func (logger Logger) Debugf(message string, args ...interface{})
+```
+Debugf logs the printf-formatted message at debug level.
+
+
+
+### func (Logger) EffectiveLogLevel
+``` go
+func (logger Logger) EffectiveLogLevel() Level
+```
+EffectiveLogLevel returns the effective min log level of
+the receiver - that is, messages with a lesser severity
+level will be discarded.
+
+If the log level of the receiver is unspecified,
+it will be taken from the effective log level of its
+parent.
+
+
+
+### func (Logger) Errorf
+``` go
+func (logger Logger) Errorf(message string, args ...interface{})
+```
+Errorf logs the printf-formatted message at error level.
+
+
+
+### func (Logger) Infof
+``` go
+func (logger Logger) Infof(message string, args ...interface{})
+```
+Infof logs the printf-formatted message at info level.
+
+
+
+### func (Logger) IsDebugEnabled
+``` go
+func (logger Logger) IsDebugEnabled() bool
+```
+IsDebugEnabled returns whether debugging is enabled
+at debug level.
+
+
+
+### func (Logger) IsErrorEnabled
+``` go
+func (logger Logger) IsErrorEnabled() bool
+```
+IsErrorEnabled returns whether debugging is enabled
+at error level.
+
+
+
+### func (Logger) IsInfoEnabled
+``` go
+func (logger Logger) IsInfoEnabled() bool
+```
+IsInfoEnabled returns whether debugging is enabled
+at info level.
+
+
+
+### func (Logger) IsLevelEnabled
+``` go
+func (logger Logger) IsLevelEnabled(level Level) bool
+```
+IsLevelEnabled returns whether debugging is enabled
+for the given log level.
+
+
+
+### func (Logger) IsTraceEnabled
+``` go
+func (logger Logger) IsTraceEnabled() bool
+```
+IsTraceEnabled returns whether debugging is enabled
+at trace level.
+
+
+
+### func (Logger) IsWarningEnabled
+``` go
+func (logger Logger) IsWarningEnabled() bool
+```
+IsWarningEnabled returns whether debugging is enabled
+at warning level.
+
+
+
+### func (Logger) LogCallf
+``` go
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{})
+```
+LogCallf logs a printf-formatted message at the given level.
+The location of the call is indicated by the calldepth argument.
+A calldepth of 1 means the function that called this function.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) LogLevel
+``` go
+func (logger Logger) LogLevel() Level
+```
+LogLevel returns the configured min log level of the logger.
+
+
+
+### func (Logger) Logf
+``` go
+func (logger Logger) Logf(level Level, message string, args ...interface{})
+```
+Logf logs a printf-formatted message at the given level.
+A message will be discarded if level is less than the
+the effective log level of the logger.
+Note that the writers may also filter out messages that
+are less than their registered minimum severity level.
+
+
+
+### func (Logger) Name
+``` go
+func (logger Logger) Name() string
+```
+Name returns the logger's module name.
+
+
+
+### func (Logger) SetLogLevel
+``` go
+func (logger Logger) SetLogLevel(level Level)
+```
+SetLogLevel sets the severity level of the given logger.
+The root logger cannot be set to UNSPECIFIED level.
+See EffectiveLogLevel for how this affects the
+actual messages logged.
+
+
+
+### func (Logger) Tracef
+``` go
+func (logger Logger) Tracef(message string, args ...interface{})
+```
+Tracef logs the printf-formatted message at trace level.
+
+
+
+### func (Logger) Warningf
+``` go
+func (logger Logger) Warningf(message string, args ...interface{})
+```
+Warningf logs the printf-formatted message at warning level.
+
+
+
+## type TestWriter
+``` go
+type TestWriter struct {
+    // contains filtered or unexported fields
+}
+```
+TestWriter is a useful Writer for testing purposes.  Each component of the
+logging message is stored in the Log array.
+
+
+
+
+
+
+
+
+
+
+
+### func (\*TestWriter) Clear
+``` go
+func (writer *TestWriter) Clear()
+```
+Clear removes any saved log messages.
+
+
+
+### func (\*TestWriter) Log
+``` go
+func (writer *TestWriter) Log() []Entry
+```
+Log returns a copy of the current logged values.
+
+
+
+### func (\*TestWriter) Write
+``` go
+func (writer *TestWriter) Write(entry Entry)
+```
+Write saves the params as members in the TestLogValues struct appended to the Log array.
+
+
+
+## type Writer
+``` go
+type Writer interface {
+    // Write writes a message to the Writer with the given level and module
+    // name. The filename and line hold the file name and line number of the
+    // code that is generating the log message; the time stamp holds the time
+    // the log message was generated, and message holds the log message
+    // itself.
+    Write(entry Entry)
+}
+```
+Writer is implemented by any recipient of log messages.
+
+
+
+
+
+
+
+
+
+### func NewColorWriter
+``` go
+func NewColorWriter(writer io.Writer) Writer
+```
+NewColorWriter will write out colored severity levels if the writer is
+outputting to a terminal.
+
+
+### func NewMinimumLevelWriter
+``` go
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer
+```
+NewMinLevelWriter returns a Writer that will only pass on the Write calls
+to the provided writer if the log level is at or above the specified
+minimum level.
+
+
+### func NewSimpleWriter
+``` go
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer
+```
+NewSimpleWriter returns a new writer that writes log messages to the given
+io.Writer formatting the messages with the given formatter.
+
+
+### func RemoveWriter
+``` go
+func RemoveWriter(name string) (Writer, error)
+```
+RemoveWriter removes the Writer identified by 'name' and returns it.
+If the Writer is not found, an error is returned.
+
+
+### func ReplaceDefaultWriter
+``` go
+func ReplaceDefaultWriter(writer Writer) (Writer, error)
+```
+ReplaceDefaultWriter is a convenience method that does the equivalent of
+RemoveWriter and then RegisterWriter with the name "default".  The previous
+default writer, if any is returned.
+
+
+
+
+
+
+
+
+
+
+- - -
+Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md)
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/loggo/config.go b/automation/vendor/github.com/juju/loggo/config.go
new file mode 100644
index 0000000..1b3eaa5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/config.go
@@ -0,0 +1,96 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// Config is a mapping of logger module names to logging severity levels.
+type Config map[string]Level
+
+// String returns a logger configuration string that may be parsed
+// using ParseConfigurationString.
+func (c Config) String() string {
+	if c == nil {
+		return ""
+	}
+	// output in alphabetical order.
+	names := []string{}
+	for name := range c {
+		names = append(names, name)
+	}
+	sort.Strings(names)
+
+	var entries []string
+	for _, name := range names {
+		level := c[name]
+		if name == "" {
+			name = rootString
+		}
+		entry := fmt.Sprintf("%s=%s", name, level)
+		entries = append(entries, entry)
+	}
+	return strings.Join(entries, ";")
+}
+
+func parseConfigValue(value string) (string, Level, error) {
+	pair := strings.SplitN(value, "=", 2)
+	if len(pair) < 2 {
+		return "", UNSPECIFIED, fmt.Errorf("config value expected '=', found %q", value)
+	}
+	name := strings.TrimSpace(pair[0])
+	if name == "" {
+		return "", UNSPECIFIED, fmt.Errorf("config value %q has missing module name", value)
+	}
+
+	levelStr := strings.TrimSpace(pair[1])
+	level, ok := ParseLevel(levelStr)
+	if !ok {
+		return "", UNSPECIFIED, fmt.Errorf("unknown severity level %q", levelStr)
+	}
+	if name == rootString {
+		name = ""
+	}
+	return name, level, nil
+}
+
+// ParseConfigString parses a logger configuration string into a map of logger
+// names and their associated log level. This method is provided to allow
+// other programs to pre-validate a configuration string rather than just
+// calling ConfigureLoggers.
+//
+// Logging modules are colon- or semicolon-separated; each module is specified
+// as <modulename>=<level>.  White space outside of module names and levels is
+// ignored.  The root module is specified with the name "<root>".
+//
+// As a special case, a log level may be specified on its own.
+// This is equivalent to specifying the level of the root module,
+// so "DEBUG" is equivalent to `<root>=DEBUG`
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ParseConfigString(specification string) (Config, error) {
+	specification = strings.TrimSpace(specification)
+	if specification == "" {
+		return nil, nil
+	}
+	cfg := make(Config)
+	if level, ok := ParseLevel(specification); ok {
+		cfg[""] = level
+		return cfg, nil
+	}
+
+	values := strings.FieldsFunc(specification, func(r rune) bool { return r == ';' || r == ':' })
+	for _, value := range values {
+		name, level, err := parseConfigValue(value)
+		if err != nil {
+			return nil, err
+		}
+		cfg[name] = level
+	}
+	return cfg, nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/context.go b/automation/vendor/github.com/juju/loggo/context.go
new file mode 100644
index 0000000..f5739d9
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/context.go
@@ -0,0 +1,198 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+)
+
+// Context produces loggers for a hierarchy of modules. The context holds
+// a collection of hierarchical loggers and their writers.
+type Context struct {
+	root *module
+
+	// Perhaps have one mutex?
+	modulesMutex sync.Mutex
+	modules      map[string]*module
+
+	writersMutex sync.Mutex
+	writers      map[string]Writer
+
+	// writeMuxtex is used to serialise write operations.
+	writeMutex sync.Mutex
+}
+
+// NewLoggers returns a new Context with no writers set.
+// If the root level is UNSPECIFIED, WARNING is used.
+func NewContext(rootLevel Level) *Context {
+	if rootLevel < TRACE || rootLevel > CRITICAL {
+		rootLevel = WARNING
+	}
+	context := &Context{
+		modules: make(map[string]*module),
+		writers: make(map[string]Writer),
+	}
+	context.root = &module{
+		level:   rootLevel,
+		context: context,
+	}
+	context.modules[""] = context.root
+	return context
+}
+
+// GetLogger returns a Logger for the given module name, creating it and
+// its parents if necessary.
+func (c *Context) GetLogger(name string) Logger {
+	name = strings.TrimSpace(strings.ToLower(name))
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	return Logger{c.getLoggerModule(name)}
+}
+
+func (c *Context) getLoggerModule(name string) *module {
+	if name == rootString {
+		name = ""
+	}
+	impl, found := c.modules[name]
+	if found {
+		return impl
+	}
+	parentName := ""
+	if i := strings.LastIndex(name, "."); i >= 0 {
+		parentName = name[0:i]
+	}
+	parent := c.getLoggerModule(parentName)
+	impl = &module{name, UNSPECIFIED, parent, c}
+	c.modules[name] = impl
+	return impl
+}
+
+// Config returns the current configuration of the Loggers. Loggers
+// with UNSPECIFIED level will not be included.
+func (c *Context) Config() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		if module.level != UNSPECIFIED {
+			result[name] = module.level
+		}
+	}
+	return result
+}
+
+// CompleteConfig returns all the loggers and their defined levels,
+// even if that level is UNSPECIFIED.
+func (c *Context) CompleteConfig() Config {
+	result := make(Config)
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+
+	for name, module := range c.modules {
+		result[name] = module.level
+	}
+	return result
+}
+
+// ApplyConfig configures the logging modules according to the provided config.
+func (c *Context) ApplyConfig(config Config) {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	for name, level := range config {
+		module := c.getLoggerModule(name)
+		module.setLevel(level)
+	}
+}
+
+// ResetLoggerLevels iterates through the known logging modules and sets the
+// levels of all to UNSPECIFIED, except for <root> which is set to WARNING.
+func (c *Context) ResetLoggerLevels() {
+	c.modulesMutex.Lock()
+	defer c.modulesMutex.Unlock()
+	// Setting the root module to UNSPECIFIED will set it to WARNING.
+	for _, module := range c.modules {
+		module.setLevel(UNSPECIFIED)
+	}
+}
+
+func (c *Context) write(entry Entry) {
+	c.writeMutex.Lock()
+	defer c.writeMutex.Unlock()
+	for _, writer := range c.getWriters() {
+		writer.Write(entry)
+	}
+}
+
+func (c *Context) getWriters() []Writer {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	var result []Writer
+	for _, writer := range c.writers {
+		result = append(result, writer)
+	}
+	return result
+}
+
+// AddWriter adds a writer to the list to be called for each logging call.
+// The name cannot be empty, and the writer cannot be nil. If an existing
+// writer exists with the specified name, an error is returned.
+func (c *Context) AddWriter(name string, writer Writer) error {
+	if name == "" {
+		return fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	if _, found := c.writers[name]; found {
+		return fmt.Errorf("context already has a writer named %q", name)
+	}
+	c.writers[name] = writer
+	return nil
+}
+
+// RemoveWriter remotes the specified writer. If a writer is not found with
+// the specified name an error is returned. The writer that was removed is also
+// returned.
+func (c *Context) RemoveWriter(name string) (Writer, error) {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	delete(c.writers, name)
+	return reg, nil
+}
+
+// ReplaceWriter is a convenience method that does the equivalent of RemoveWriter
+// followed by AddWriter with the same name. The replaced writer is returned.
+func (c *Context) ReplaceWriter(name string, writer Writer) (Writer, error) {
+	if name == "" {
+		return nil, fmt.Errorf("name cannot be empty")
+	}
+	if writer == nil {
+		return nil, fmt.Errorf("writer cannot be nil")
+	}
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	reg, found := c.writers[name]
+	if !found {
+		return nil, fmt.Errorf("context has no writer named %q", name)
+	}
+	oldWriter := reg
+	c.writers[name] = writer
+	return oldWriter, nil
+}
+
+// ResetWriters is generally only used in testing and removes all the writers.
+func (c *Context) ResetWriters() {
+	c.writersMutex.Lock()
+	defer c.writersMutex.Unlock()
+	c.writers = make(map[string]Writer)
+}
diff --git a/automation/vendor/github.com/juju/loggo/dependencies.tsv b/automation/vendor/github.com/juju/loggo/dependencies.tsv
new file mode 100644
index 0000000..20daf32
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/dependencies.tsv
@@ -0,0 +1,5 @@
+github.com/juju/ansiterm	git	c368f42cb4b32a70389cded05c7345d9ccdce889	2016-08-17T02:52:20Z
+github.com/lunixbochs/vtclean	git	4fbf7632a2c6d3fbdb9931439bdbbeded02cbe36	2016-01-25T03:51:06Z
+github.com/mattn/go-colorable	git	ed8eb9e318d7a84ce5915b495b7d35e0cfe7b5a8	2016-07-31T23:54:17Z
+github.com/mattn/go-isatty	git	66b8e73f3f5cda9f96b69efd03dd3d7fc4a5cdb8	2016-08-06T12:27:52Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
diff --git a/automation/vendor/github.com/juju/loggo/doc.go b/automation/vendor/github.com/juju/loggo/doc.go
new file mode 100644
index 0000000..bb19cd5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/doc.go
@@ -0,0 +1,42 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+/*
+[godoc-link-here]
+
+Module level logging for Go
+
+This package provides an alternative to the standard library log package.
+
+The actual logging functions never return errors.  If you are logging
+something, you really don't want to be worried about the logging
+having trouble.
+
+Modules have names that are defined by dotted strings.
+	"first.second.third"
+
+There is a root module that has the name `""`.  Each module
+(except the root module) has a parent, identified by the part of
+the name without the last dotted value.
+* the parent of "first.second.third" is "first.second"
+* the parent of "first.second" is "first"
+* the parent of "first" is "" (the root module)
+
+Each module can specify its own severity level.  Logging calls that are of
+a lower severity than the module's effective severity level are not written
+out.
+
+Loggers are created using the GetLogger function.
+	logger := loggo.GetLogger("foo.bar")
+
+By default there is one writer registered, which will write to Stderr,
+and the root module, which will only emit warnings and above.
+If you want to continue using the default
+logger, but have it emit all logging levels you need to do the following.
+
+	writer, _, err := loggo.RemoveWriter("default")
+	// err is non-nil if and only if the name isn't found.
+	loggo.RegisterWriter("default", writer, loggo.TRACE)
+
+*/
+package loggo
diff --git a/automation/vendor/github.com/juju/loggo/entry.go b/automation/vendor/github.com/juju/loggo/entry.go
new file mode 100644
index 0000000..b61aa54
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/entry.go
@@ -0,0 +1,22 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import "time"
+
+// Entry represents a single log message.
+type Entry struct {
+	// Level is the severity of the log message.
+	Level Level
+	// Module is the dotted module name from the logger.
+	Module string
+	// Filename is the full path the file that logged the message.
+	Filename string
+	// Line is the line number of the Filename.
+	Line int
+	// Timestamp is when the log message was created
+	Timestamp time.Time
+	// Message is the formatted string from teh log call.
+	Message string
+}
diff --git a/automation/vendor/github.com/juju/loggo/formatter.go b/automation/vendor/github.com/juju/loggo/formatter.go
new file mode 100644
index 0000000..ef8aa7a
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/formatter.go
@@ -0,0 +1,38 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"time"
+)
+
+// DefaultFormatter returns the parameters separated by spaces except for
+// filename and line which are separated by a colon.  The timestamp is shown
+// to second resolution in UTC. For example:
+//   2016-07-02 15:04:05
+func DefaultFormatter(entry Entry) string {
+	ts := entry.Timestamp.In(time.UTC).Format("2006-01-02 15:04:05")
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+	return fmt.Sprintf("%s %s %s %s:%d %s", ts, entry.Level, entry.Module, filename, entry.Line, entry.Message)
+}
+
+// TimeFormat is the time format used for the default writer.
+// This can be set with the environment variable LOGGO_TIME_FORMAT.
+var TimeFormat = initTimeFormat()
+
+func initTimeFormat() string {
+	format := os.Getenv("LOGGO_TIME_FORMAT")
+	if format != "" {
+		return format
+	}
+	return "15:04:05"
+}
+
+func formatTime(ts time.Time) string {
+	return ts.Format(TimeFormat)
+}
diff --git a/automation/vendor/github.com/juju/loggo/global.go b/automation/vendor/github.com/juju/loggo/global.go
new file mode 100644
index 0000000..7cf95ca
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/global.go
@@ -0,0 +1,85 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+var (
+	defaultContext = newDefaultContxt()
+)
+
+func newDefaultContxt() *Context {
+	ctx := NewContext(WARNING)
+	ctx.AddWriter(DefaultWriterName, defaultWriter())
+	return ctx
+}
+
+// DefaultContext returns the global default logging context.
+func DefaultContext() *Context {
+	return defaultContext
+}
+
+// LoggerInfo returns information about the configured loggers and their
+// logging levels. The information is returned in the format expected by
+// ConfigureLoggers. Loggers with UNSPECIFIED level will not
+// be included.
+func LoggerInfo() string {
+	return defaultContext.Config().String()
+}
+
+// GetLogger returns a Logger for the given module name,
+// creating it and its parents if necessary.
+func GetLogger(name string) Logger {
+	return defaultContext.GetLogger(name)
+}
+
+// ResetLogging iterates through the known modules and sets the levels of all
+// to UNSPECIFIED, except for <root> which is set to WARNING. The call also
+// removes all writers in the DefaultContext and puts the original default
+// writer back as the only writer.
+func ResetLogging() {
+	defaultContext.ResetLoggerLevels()
+	defaultContext.ResetWriters()
+}
+
+// ResetWriters puts the list of writers back into the initial state.
+func ResetWriters() {
+	defaultContext.ResetWriters()
+}
+
+// ReplaceDefaultWriter is a convenience method that does the equivalent of
+// RemoveWriter and then RegisterWriter with the name "default".  The previous
+// default writer, if any is returned.
+func ReplaceDefaultWriter(writer Writer) (Writer, error) {
+	return defaultContext.ReplaceWriter(DefaultWriterName, writer)
+}
+
+// RegisterWriter adds the writer to the list of writers in the DefaultContext
+// that get notified when logging.  If there is already a registered writer
+// with that name, an error is returned.
+func RegisterWriter(name string, writer Writer) error {
+	return defaultContext.AddWriter(name, writer)
+}
+
+// RemoveWriter removes the Writer identified by 'name' and returns it.
+// If the Writer is not found, an error is returned.
+func RemoveWriter(name string) (Writer, error) {
+	return defaultContext.RemoveWriter(name)
+}
+
+// ConfigureLoggers configures loggers according to the given string
+// specification, which specifies a set of modules and their associated
+// logging levels.  Loggers are colon- or semicolon-separated; each
+// module is specified as <modulename>=<level>.  White space outside of
+// module names and levels is ignored.  The root module is specified
+// with the name "<root>".
+//
+// An example specification:
+//	`<root>=ERROR; foo.bar=WARNING`
+func ConfigureLoggers(specification string) error {
+	config, err := ParseConfigString(specification)
+	if err != nil {
+		return err
+	}
+	defaultContext.ApplyConfig(config)
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/loggo/level.go b/automation/vendor/github.com/juju/loggo/level.go
new file mode 100644
index 0000000..f6a5c4f
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/level.go
@@ -0,0 +1,102 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"strings"
+	"sync/atomic"
+)
+
+// The severity levels. Higher values are more considered more
+// important.
+const (
+	UNSPECIFIED Level = iota
+	TRACE
+	DEBUG
+	INFO
+	WARNING
+	ERROR
+	CRITICAL
+)
+
+// Level holds a severity level.
+type Level uint32
+
+// ParseLevel converts a string representation of a logging level to a
+// Level. It returns the level and whether it was valid or not.
+func ParseLevel(level string) (Level, bool) {
+	level = strings.ToUpper(level)
+	switch level {
+	case "UNSPECIFIED":
+		return UNSPECIFIED, true
+	case "TRACE":
+		return TRACE, true
+	case "DEBUG":
+		return DEBUG, true
+	case "INFO":
+		return INFO, true
+	case "WARN", "WARNING":
+		return WARNING, true
+	case "ERROR":
+		return ERROR, true
+	case "CRITICAL":
+		return CRITICAL, true
+	default:
+		return UNSPECIFIED, false
+	}
+}
+
+// String implements Stringer.
+func (level Level) String() string {
+	switch level {
+	case UNSPECIFIED:
+		return "UNSPECIFIED"
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO"
+	case WARNING:
+		return "WARNING"
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITICAL"
+	default:
+		return "<unknown>"
+	}
+}
+
+// Short returns a five character string to use in
+// aligned logging output.
+func (level Level) Short() string {
+	switch level {
+	case TRACE:
+		return "TRACE"
+	case DEBUG:
+		return "DEBUG"
+	case INFO:
+		return "INFO "
+	case WARNING:
+		return "WARN "
+	case ERROR:
+		return "ERROR"
+	case CRITICAL:
+		return "CRITC"
+	default:
+		return "     "
+	}
+}
+
+// get atomically gets the value of the given level.
+func (level *Level) get() Level {
+	return Level(atomic.LoadUint32((*uint32)(level)))
+}
+
+// set atomically sets the value of the receiver
+// to the given level.
+func (level *Level) set(newLevel Level) {
+	atomic.StoreUint32((*uint32)(level), uint32(newLevel))
+}
diff --git a/automation/vendor/github.com/juju/loggo/logger.go b/automation/vendor/github.com/juju/loggo/logger.go
new file mode 100644
index 0000000..fbdfd9e
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/logger.go
@@ -0,0 +1,176 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"runtime"
+	"time"
+)
+
+// A Logger represents a logging module. It has an associated logging
+// level which can be changed; messages of lesser severity will
+// be dropped. Loggers have a hierarchical relationship - see
+// the package documentation.
+//
+// The zero Logger value is usable - any messages logged
+// to it will be sent to the root Logger.
+type Logger struct {
+	impl *module
+}
+
+func (logger Logger) getModule() *module {
+	if logger.impl == nil {
+		return defaultContext.root
+	}
+	return logger.impl
+}
+
+// Name returns the logger's module name.
+func (logger Logger) Name() string {
+	return logger.getModule().Name()
+}
+
+// LogLevel returns the configured min log level of the logger.
+func (logger Logger) LogLevel() Level {
+	return logger.getModule().level
+}
+
+// EffectiveLogLevel returns the effective min log level of
+// the receiver - that is, messages with a lesser severity
+// level will be discarded.
+//
+// If the log level of the receiver is unspecified,
+// it will be taken from the effective log level of its
+// parent.
+func (logger Logger) EffectiveLogLevel() Level {
+	return logger.getModule().getEffectiveLogLevel()
+}
+
+// SetLogLevel sets the severity level of the given logger.
+// The root logger cannot be set to UNSPECIFIED level.
+// See EffectiveLogLevel for how this affects the
+// actual messages logged.
+func (logger Logger) SetLogLevel(level Level) {
+	logger.getModule().setLevel(level)
+}
+
+// Logf logs a printf-formatted message at the given level.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) Logf(level Level, message string, args ...interface{}) {
+	logger.LogCallf(2, level, message, args...)
+}
+
+// LogCallf logs a printf-formatted message at the given level.
+// The location of the call is indicated by the calldepth argument.
+// A calldepth of 1 means the function that called this function.
+// A message will be discarded if level is less than the
+// the effective log level of the logger.
+// Note that the writers may also filter out messages that
+// are less than their registered minimum severity level.
+func (logger Logger) LogCallf(calldepth int, level Level, message string, args ...interface{}) {
+	module := logger.getModule()
+	if !module.willWrite(level) {
+		return
+	}
+	// Gather time, and filename, line number.
+	now := time.Now() // get this early.
+	// Param to Caller is the call depth.  Since this method is called from
+	// the Logger methods, we want the place that those were called from.
+	_, file, line, ok := runtime.Caller(calldepth + 1)
+	if !ok {
+		file = "???"
+		line = 0
+	}
+	// Trim newline off format string, following usual
+	// Go logging conventions.
+	if len(message) > 0 && message[len(message)-1] == '\n' {
+		message = message[0 : len(message)-1]
+	}
+
+	// To avoid having a proliferation of Info/Infof methods,
+	// only use Sprintf if there are any args, and rely on the
+	// `go vet` tool for the obvious cases where someone has forgotten
+	// to provide an arg.
+	formattedMessage := message
+	if len(args) > 0 {
+		formattedMessage = fmt.Sprintf(message, args...)
+	}
+	module.write(Entry{
+		Level:     level,
+		Filename:  file,
+		Line:      line,
+		Timestamp: now,
+		Message:   formattedMessage,
+	})
+}
+
+// Criticalf logs the printf-formatted message at critical level.
+func (logger Logger) Criticalf(message string, args ...interface{}) {
+	logger.Logf(CRITICAL, message, args...)
+}
+
+// Errorf logs the printf-formatted message at error level.
+func (logger Logger) Errorf(message string, args ...interface{}) {
+	logger.Logf(ERROR, message, args...)
+}
+
+// Warningf logs the printf-formatted message at warning level.
+func (logger Logger) Warningf(message string, args ...interface{}) {
+	logger.Logf(WARNING, message, args...)
+}
+
+// Infof logs the printf-formatted message at info level.
+func (logger Logger) Infof(message string, args ...interface{}) {
+	logger.Logf(INFO, message, args...)
+}
+
+// Debugf logs the printf-formatted message at debug level.
+func (logger Logger) Debugf(message string, args ...interface{}) {
+	logger.Logf(DEBUG, message, args...)
+}
+
+// Tracef logs the printf-formatted message at trace level.
+func (logger Logger) Tracef(message string, args ...interface{}) {
+	logger.Logf(TRACE, message, args...)
+}
+
+// IsLevelEnabled returns whether debugging is enabled
+// for the given log level.
+func (logger Logger) IsLevelEnabled(level Level) bool {
+	return logger.getModule().willWrite(level)
+}
+
+// IsErrorEnabled returns whether debugging is enabled
+// at error level.
+func (logger Logger) IsErrorEnabled() bool {
+	return logger.IsLevelEnabled(ERROR)
+}
+
+// IsWarningEnabled returns whether debugging is enabled
+// at warning level.
+func (logger Logger) IsWarningEnabled() bool {
+	return logger.IsLevelEnabled(WARNING)
+}
+
+// IsInfoEnabled returns whether debugging is enabled
+// at info level.
+func (logger Logger) IsInfoEnabled() bool {
+	return logger.IsLevelEnabled(INFO)
+}
+
+// IsDebugEnabled returns whether debugging is enabled
+// at debug level.
+func (logger Logger) IsDebugEnabled() bool {
+	return logger.IsLevelEnabled(DEBUG)
+}
+
+// IsTraceEnabled returns whether debugging is enabled
+// at trace level.
+func (logger Logger) IsTraceEnabled() bool {
+	return logger.IsLevelEnabled(TRACE)
+}
diff --git a/automation/vendor/github.com/juju/loggo/module.go b/automation/vendor/github.com/juju/loggo/module.go
new file mode 100644
index 0000000..8153be5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/module.go
@@ -0,0 +1,61 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+// Do not change rootName: modules.resolve() will misbehave if it isn't "".
+const (
+	rootString       = "<root>"
+	defaultRootLevel = WARNING
+	defaultLevel     = UNSPECIFIED
+)
+
+type module struct {
+	name    string
+	level   Level
+	parent  *module
+	context *Context
+}
+
+// Name returns the module's name.
+func (module *module) Name() string {
+	if module.name == "" {
+		return rootString
+	}
+	return module.name
+}
+
+func (m *module) willWrite(level Level) bool {
+	if level < TRACE || level > CRITICAL {
+		return false
+	}
+	return level >= m.getEffectiveLogLevel()
+}
+
+func (module *module) getEffectiveLogLevel() Level {
+	// Note: the root module is guaranteed to have a
+	// specified logging level, so acts as a suitable sentinel
+	// for this loop.
+	for {
+		if level := module.level.get(); level != UNSPECIFIED {
+			return level
+		}
+		module = module.parent
+	}
+	panic("unreachable")
+}
+
+// setLevel sets the severity level of the given module.
+// The root module cannot be set to UNSPECIFIED level.
+func (module *module) setLevel(level Level) {
+	// The root module can't be unspecified.
+	if module.name == "" && level == UNSPECIFIED {
+		level = WARNING
+	}
+	module.level.set(level)
+}
+
+func (m *module) write(entry Entry) {
+	entry.Module = m.name
+	m.context.write(entry)
+}
diff --git a/automation/vendor/github.com/juju/loggo/testwriter.go b/automation/vendor/github.com/juju/loggo/testwriter.go
new file mode 100644
index 0000000..b20e470
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/testwriter.go
@@ -0,0 +1,40 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"path"
+	"sync"
+)
+
+// TestWriter is a useful Writer for testing purposes.  Each component of the
+// logging message is stored in the Log array.
+type TestWriter struct {
+	mu  sync.Mutex
+	log []Entry
+}
+
+// Write saves the params as members in the TestLogValues struct appended to the Log array.
+func (writer *TestWriter) Write(entry Entry) {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	entry.Filename = path.Base(entry.Filename)
+	writer.log = append(writer.log, entry)
+}
+
+// Clear removes any saved log messages.
+func (writer *TestWriter) Clear() {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	writer.log = nil
+}
+
+// Log returns a copy of the current logged values.
+func (writer *TestWriter) Log() []Entry {
+	writer.mu.Lock()
+	defer writer.mu.Unlock()
+	v := make([]Entry, len(writer.log))
+	copy(v, writer.log)
+	return v
+}
diff --git a/automation/vendor/github.com/juju/loggo/writer.go b/automation/vendor/github.com/juju/loggo/writer.go
new file mode 100644
index 0000000..b3fe3e5
--- /dev/null
+++ b/automation/vendor/github.com/juju/loggo/writer.go
@@ -0,0 +1,113 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package loggo
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/ansiterm"
+)
+
+// DefaultWriterName is the name of the default writer for
+// a Context.
+const DefaultWriterName = "default"
+
+// Writer is implemented by any recipient of log messages.
+type Writer interface {
+	// Write writes a message to the Writer with the given level and module
+	// name. The filename and line hold the file name and line number of the
+	// code that is generating the log message; the time stamp holds the time
+	// the log message was generated, and message holds the log message
+	// itself.
+	Write(entry Entry)
+}
+
+// NewMinLevelWriter returns a Writer that will only pass on the Write calls
+// to the provided writer if the log level is at or above the specified
+// minimum level.
+func NewMinimumLevelWriter(writer Writer, minLevel Level) Writer {
+	return &minLevelWriter{
+		writer: writer,
+		level:  minLevel,
+	}
+}
+
+type minLevelWriter struct {
+	writer Writer
+	level  Level
+}
+
+// Write writes the log record.
+func (w minLevelWriter) Write(entry Entry) {
+	if entry.Level < w.level {
+		return
+	}
+	w.writer.Write(entry)
+}
+
+type simpleWriter struct {
+	writer    io.Writer
+	formatter func(entry Entry) string
+}
+
+// NewSimpleWriter returns a new writer that writes log messages to the given
+// io.Writer formatting the messages with the given formatter.
+func NewSimpleWriter(writer io.Writer, formatter func(entry Entry) string) Writer {
+	if formatter == nil {
+		formatter = DefaultFormatter
+	}
+	return &simpleWriter{writer, formatter}
+}
+
+func (simple *simpleWriter) Write(entry Entry) {
+	logLine := simple.formatter(entry)
+	fmt.Fprintln(simple.writer, logLine)
+}
+
+func defaultWriter() Writer {
+	return NewColorWriter(os.Stderr)
+}
+
+type colorWriter struct {
+	writer *ansiterm.Writer
+}
+
+var (
+	// SeverityColor defines the colors for the levels output by the ColorWriter.
+	SeverityColor = map[Level]*ansiterm.Context{
+		TRACE:   ansiterm.Foreground(ansiterm.Default),
+		DEBUG:   ansiterm.Foreground(ansiterm.Green),
+		INFO:    ansiterm.Foreground(ansiterm.BrightBlue),
+		WARNING: ansiterm.Foreground(ansiterm.Yellow),
+		ERROR:   ansiterm.Foreground(ansiterm.BrightRed),
+		CRITICAL: &ansiterm.Context{
+			Foreground: ansiterm.White,
+			Background: ansiterm.Red,
+		},
+	}
+	// LocationColor defines the colors for the location output by the ColorWriter.
+	LocationColor = ansiterm.Foreground(ansiterm.BrightBlue)
+)
+
+// NewColorWriter will write out colored severity levels if the writer is
+// outputting to a terminal.
+func NewColorWriter(writer io.Writer) Writer {
+	return &colorWriter{ansiterm.NewWriter(writer)}
+}
+
+// Write implements Writer.
+func (w *colorWriter) Write(entry Entry) {
+	ts := formatTime(entry.Timestamp)
+	// Just get the basename from the filename
+	filename := filepath.Base(entry.Filename)
+
+	fmt.Fprintf(w.writer, "%s ", ts)
+	SeverityColor[entry.Level].Fprintf(w.writer, entry.Level.Short())
+	fmt.Fprintf(w.writer, " %s ", entry.Module)
+	LocationColor.Fprintf(w.writer, "%s:%d ", filename, entry.Line)
+	fmt.Fprintln(w.writer, entry.Message)
+}
diff --git a/automation/vendor/github.com/juju/schema/LICENSE b/automation/vendor/github.com/juju/schema/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/schema/README.md b/automation/vendor/github.com/juju/schema/README.md
new file mode 100644
index 0000000..78ad8a1
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/README.md
@@ -0,0 +1,5 @@
+juju/schema
+===========
+
+This package provides helpers for coercing dynamically typed data structures
+into known forms.
diff --git a/automation/vendor/github.com/juju/schema/checker.go b/automation/vendor/github.com/juju/schema/checker.go
new file mode 100644
index 0000000..8682136
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/checker.go
@@ -0,0 +1,71 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"strings"
+)
+
+// The Coerce method of the Checker interface is called recursively when
+// v is being validated.  If err is nil, newv is used as the new value
+// at the recursion point.  If err is non-nil, v is taken as invalid and
+// may be either ignored or error out depending on where in the schema
+// checking process the error happened. Checkers like OneOf may continue
+// with an alternative, for instance.
+type Checker interface {
+	Coerce(v interface{}, path []string) (newv interface{}, err error)
+}
+
+// Any returns a Checker that succeeds with any input value and
+// results in the value itself unprocessed.
+func Any() Checker {
+	return anyC{}
+}
+
+type anyC struct{}
+
+func (c anyC) Coerce(v interface{}, path []string) (interface{}, error) {
+	return v, nil
+}
+
+// OneOf returns a Checker that attempts to Coerce the value with each
+// of the provided checkers. The value returned by the first checker
+// that succeeds will be returned by the OneOf checker itself.  If no
+// checker succeeds, OneOf will return an error on coercion.
+func OneOf(options ...Checker) Checker {
+	return oneOfC{options}
+}
+
+type oneOfC struct {
+	options []Checker
+}
+
+func (c oneOfC) Coerce(v interface{}, path []string) (interface{}, error) {
+	for _, o := range c.options {
+		newv, err := o.Coerce(v, path)
+		if err == nil {
+			return newv, nil
+		}
+	}
+	return nil, error_{"", v, path}
+}
+
+// pathAsPrefix returns a string consisting of the path elements
+// suitable for using as the prefix of an error message. If path
+// starts with a ".", the dot is omitted.
+func pathAsPrefix(path []string) string {
+	if len(path) == 0 {
+		return ""
+	}
+	var s string
+	if path[0] == "." {
+		s = strings.Join(path[1:], "")
+	} else {
+		s = strings.Join(path, "")
+	}
+	if s == "" {
+		return ""
+	}
+	return s + ": "
+}
diff --git a/automation/vendor/github.com/juju/schema/const.go b/automation/vendor/github.com/juju/schema/const.go
new file mode 100644
index 0000000..cbd03b8
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/const.go
@@ -0,0 +1,57 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Const returns a Checker that only succeeds if the input matches
+// value exactly.  The value is compared with reflect.DeepEqual.
+func Const(value interface{}) Checker {
+	return constC{value}
+}
+
+type constC struct {
+	value interface{}
+}
+
+func (c constC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, c.value) {
+		return v, nil
+	}
+	return nil, error_{fmt.Sprintf("%#v", c.value), v, path}
+}
+
+// Nil returns a Checker that only succeeds if the input is nil. To tweak the
+// error message, valueLabel can contain a label of the value being checked to
+// be empty, e.g. "my special name". If valueLabel is "", "value" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.Nil("widget").Coerce(42, nil) will return an error message
+// like `expected empty widget, got int(42)`.
+//
+// Example 2:
+// schema.Nil("").Coerce("", nil) will return an error message like
+// `expected empty value, got string("")`.
+func Nil(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "value"
+	}
+	return nilC{valueLabel}
+}
+
+type nilC struct {
+	valueLabel string
+}
+
+func (c nilC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if reflect.DeepEqual(v, nil) {
+		return v, nil
+	}
+	label := fmt.Sprintf("empty %s", c.valueLabel)
+	return nil, error_{label, v, path}
+}
diff --git a/automation/vendor/github.com/juju/schema/errors.go b/automation/vendor/github.com/juju/schema/errors.go
new file mode 100644
index 0000000..f62f58e
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/errors.go
@@ -0,0 +1,25 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+)
+
+type error_ struct {
+	want string
+	got  interface{}
+	path []string
+}
+
+func (e error_) Error() string {
+	path := pathAsPrefix(e.path)
+	if e.want == "" {
+		return fmt.Sprintf("%sunexpected value %#v", path, e.got)
+	}
+	if e.got == nil {
+		return fmt.Sprintf("%sexpected %s, got nothing", path, e.want)
+	}
+	return fmt.Sprintf("%sexpected %s, got %T(%#v)", path, e.want, e.got, e.got)
+}
diff --git a/automation/vendor/github.com/juju/schema/fieldmap.go b/automation/vendor/github.com/juju/schema/fieldmap.go
new file mode 100644
index 0000000..765f7e3
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/fieldmap.go
@@ -0,0 +1,170 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Omit is a marker for FieldMap and StructFieldMap defaults parameter.
+// If a field is not present in the map and defaults to Omit, the missing
+// field will be ommitted from the coerced map as well.
+var Omit omit
+
+type omit struct{}
+
+type Fields map[string]Checker
+type Defaults map[string]interface{}
+
+// FieldMap returns a Checker that accepts a map value with defined
+// string keys. Every key has an independent checker associated,
+// and processing will only succeed if all the values succeed
+// individually. If a field fails to be processed, processing stops
+// and returns with the underlying error.
+//
+// Fields in defaults will be set to the provided value if not present
+// in the coerced map. If the default value is schema.Omit, the
+// missing field will be omitted from the coerced map.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, false}
+}
+
+// StrictFieldMap returns a Checker that acts as the one returned by FieldMap,
+// but the Checker returns an error if it encounters an unknown key.
+func StrictFieldMap(fields Fields, defaults Defaults) Checker {
+	return fieldMapC{fields, defaults, true}
+}
+
+type fieldMapC struct {
+	fields   Fields
+	defaults Defaults
+	strict   bool
+}
+
+var stringType = reflect.TypeOf("")
+
+func hasStrictStringKeys(rv reflect.Value) bool {
+	if rv.Type().Key() == stringType {
+		return true
+	}
+	if rv.Type().Key().Kind() != reflect.Interface {
+		return false
+	}
+	for _, k := range rv.MapKeys() {
+		if k.Elem().Type() != stringType {
+			return false
+		}
+	}
+	return true
+}
+
+func (c fieldMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+	if !hasStrictStringKeys(rv) {
+		return nil, error_{"map[string]", v, path}
+	}
+
+	if c.strict {
+		for _, k := range rv.MapKeys() {
+			ks := k.String()
+			if _, ok := c.fields[ks]; !ok {
+				return nil, fmt.Errorf("%sunknown key %q (value %#v)", pathAsPrefix(path), ks, rv.MapIndex(k).Interface())
+			}
+		}
+	}
+
+	vpath := append(path, ".", "?")
+
+	out := make(map[string]interface{}, rv.Len())
+	for k, checker := range c.fields {
+		valuev := rv.MapIndex(reflect.ValueOf(k))
+		var value interface{}
+		if valuev.IsValid() {
+			value = valuev.Interface()
+		} else if dflt, ok := c.defaults[k]; ok {
+			if dflt == Omit {
+				continue
+			}
+			value = dflt
+		}
+		vpath[len(vpath)-1] = k
+		newv, err := checker.Coerce(value, vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[k] = newv
+	}
+	for k, v := range c.defaults {
+		if v == Omit {
+			continue
+		}
+		if _, ok := out[k]; !ok {
+			checker, ok := c.fields[k]
+			if !ok {
+				return nil, fmt.Errorf("got default value for unknown field %q", k)
+			}
+			vpath[len(vpath)-1] = k
+			newv, err := checker.Coerce(v, vpath)
+			if err != nil {
+				return nil, err
+			}
+			out[k] = newv
+		}
+	}
+	return out, nil
+}
+
+// FieldMapSet returns a Checker that accepts a map value checked
+// against one of several FieldMap checkers.  The actual checker
+// used is the first one whose checker associated with the selector
+// field processes the map correctly. If no checker processes
+// the selector value correctly, an error is returned.
+//
+// The coerced output value has type map[string]interface{}.
+func FieldMapSet(selector string, maps []Checker) Checker {
+	fmaps := make([]fieldMapC, len(maps))
+	for i, m := range maps {
+		if fmap, ok := m.(fieldMapC); ok {
+			if checker, _ := fmap.fields[selector]; checker == nil {
+				panic("FieldMapSet has a FieldMap with a missing selector")
+			}
+			fmaps[i] = fmap
+		} else {
+			panic("FieldMapSet got a non-FieldMap checker")
+		}
+	}
+	return mapSetC{selector, fmaps}
+}
+
+type mapSetC struct {
+	selector string
+	fmaps    []fieldMapC
+}
+
+func (c mapSetC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	var selector interface{}
+	selectorv := rv.MapIndex(reflect.ValueOf(c.selector))
+	if selectorv.IsValid() {
+		selector = selectorv.Interface()
+		for _, fmap := range c.fmaps {
+			_, err := fmap.fields[c.selector].Coerce(selector, path)
+			if err != nil {
+				continue
+			}
+			return fmap.Coerce(v, path)
+		}
+	}
+	return nil, error_{"supported selector", selector, append(path, ".", c.selector)}
+}
diff --git a/automation/vendor/github.com/juju/schema/lists.go b/automation/vendor/github.com/juju/schema/lists.go
new file mode 100644
index 0000000..635425a
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/lists.go
@@ -0,0 +1,44 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// List returns a Checker that accepts a slice value with values
+// that are processed with the elem checker.  If any element of the
+// provided slice value fails to be processed, processing will stop
+// and return with the obtained error.
+//
+// The coerced output value has type []interface{}.
+func List(elem Checker) Checker {
+	return listC{elem}
+}
+
+type listC struct {
+	elem Checker
+}
+
+func (c listC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Slice {
+		return nil, error_{"list", v, path}
+	}
+
+	path = append(path, "[", "?", "]")
+
+	l := rv.Len()
+	out := make([]interface{}, 0, l)
+	for i := 0; i != l; i++ {
+		path[len(path)-2] = strconv.Itoa(i)
+		elem, err := c.elem.Coerce(rv.Index(i).Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		out = append(out, elem)
+	}
+	return out, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/maps.go b/automation/vendor/github.com/juju/schema/maps.go
new file mode 100644
index 0000000..31242a0
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/maps.go
@@ -0,0 +1,93 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// Map returns a Checker that accepts a map value. Every key and value
+// in the map are processed with the respective checker, and if any
+// value fails to be coerced, processing stops and returns with the
+// underlying error.
+//
+// The coerced output value has type map[interface{}]interface{}.
+func Map(key Checker, value Checker) Checker {
+	return mapC{key, value}
+}
+
+type mapC struct {
+	key   Checker
+	value Checker
+}
+
+func (c mapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+
+	l := rv.Len()
+	out := make(map[interface{}]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := c.key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk] = newv
+	}
+	return out, nil
+}
+
+// StringMap returns a Checker that accepts a map value. Every key in
+// the map must be a string, and every value in the map are processed
+// with the provided checker. If any value fails to be coerced,
+// processing stops and returns with the underlying error.
+//
+// The coerced output value has type map[string]interface{}.
+func StringMap(value Checker) Checker {
+	return stringMapC{value}
+}
+
+type stringMapC struct {
+	value Checker
+}
+
+func (c stringMapC) Coerce(v interface{}, path []string) (interface{}, error) {
+	rv := reflect.ValueOf(v)
+	if rv.Kind() != reflect.Map {
+		return nil, error_{"map", v, path}
+	}
+
+	vpath := append(path, ".", "?")
+	key := String()
+
+	l := rv.Len()
+	out := make(map[string]interface{}, l)
+	keys := rv.MapKeys()
+	for i := 0; i != l; i++ {
+		k := keys[i]
+		newk, err := key.Coerce(k.Interface(), path)
+		if err != nil {
+			return nil, err
+		}
+		vpath[len(vpath)-1] = fmt.Sprint(k.Interface())
+		newv, err := c.value.Coerce(rv.MapIndex(k).Interface(), vpath)
+		if err != nil {
+			return nil, err
+		}
+		out[newk.(string)] = newv
+	}
+	return out, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/numeric.go b/automation/vendor/github.com/juju/schema/numeric.go
new file mode 100644
index 0000000..ec88e56
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/numeric.go
@@ -0,0 +1,197 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"strconv"
+)
+
+// Bool returns a Checker that accepts boolean values only.
+func Bool() Checker {
+	return boolC{}
+}
+
+type boolC struct{}
+
+func (c boolC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch reflect.TypeOf(v).Kind() {
+		case reflect.Bool:
+			return v, nil
+		case reflect.String:
+			val, err := strconv.ParseBool(reflect.ValueOf(v).String())
+			if err == nil {
+				return val, nil
+			}
+		}
+	}
+	return nil, error_{"bool", v, path}
+}
+
+// Int returns a Checker that accepts any integer value, and returns
+// the same value consistently typed as an int64.
+func Int() Checker {
+	return intC{}
+}
+
+type intC struct{}
+
+func (c intC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"int", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Int:
+	case reflect.Int8:
+	case reflect.Int16:
+	case reflect.Int32:
+	case reflect.Int64:
+	case reflect.String:
+		val, err := strconv.ParseInt(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"int", v, path}
+		}
+	default:
+		return nil, error_{"int", v, path}
+	}
+	return reflect.ValueOf(v).Int(), nil
+}
+
+// Uint returns a Checker that accepts any integer or unsigned value, and
+// returns the same value consistently typed as an uint64. If the integer
+// value is negative an error is raised.
+func Uint() Checker {
+	return uintC{}
+}
+
+type uintC struct{}
+
+func (c uintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"uint", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		return reflect.ValueOf(v).Uint(), nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		val := reflect.ValueOf(v).Int()
+		if val < 0 {
+			return nil, error_{"uint", v, path}
+		}
+		// All positive int64 values fit into uint64.
+		return uint64(val), nil
+	case reflect.String:
+		val, err := strconv.ParseUint(reflect.ValueOf(v).String(), 0, 64)
+		if err == nil {
+			return val, nil
+		} else {
+			return nil, error_{"uint", v, path}
+		}
+	default:
+		return nil, error_{"uint", v, path}
+	}
+}
+
+// ForceInt returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an int. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure.
+func ForceInt() Checker {
+	return forceIntC{}
+}
+
+type forceIntC struct{}
+
+func (c forceIntC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseInt(vstr, 0, 64)
+			if err == nil {
+				return int(intValue), nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				return int(floatValue), nil
+			}
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			return int(reflect.ValueOf(v).Int()), nil
+		case reflect.Float32, reflect.Float64:
+			return int(reflect.ValueOf(v).Float()), nil
+		}
+	}
+	return nil, error_{"number", v, path}
+}
+
+// ForceUint returns a Checker that accepts any integer or float value, and
+// returns the same value consistently typed as an uint64. This is required
+// in order to handle the interface{}/float64 type conversion performed by
+// the JSON serializer used as part of the API infrastructure. If the integer
+// value is negative an error is raised.
+func ForceUint() Checker {
+	return forceUintC{}
+}
+
+type forceUintC struct{}
+
+func (c forceUintC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil {
+		switch vv := reflect.TypeOf(v); vv.Kind() {
+		case reflect.String:
+			vstr := reflect.ValueOf(v).String()
+			intValue, err := strconv.ParseUint(vstr, 0, 64)
+			if err == nil {
+				return intValue, nil
+			}
+			floatValue, err := strconv.ParseFloat(vstr, 64)
+			if err == nil {
+				if floatValue < 0 {
+					return nil, error_{"uint", v, path}
+				}
+				return uint64(floatValue), nil
+			}
+		case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+			return reflect.ValueOf(v).Uint(), nil
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+			val := reflect.ValueOf(v).Int()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			// All positive int64 values fit into uint64.
+			return uint64(val), nil
+		case reflect.Float32, reflect.Float64:
+			val := reflect.ValueOf(v).Float()
+			if val < 0 {
+				return nil, error_{"uint", v, path}
+			}
+			return uint64(val), nil
+		}
+	}
+	return nil, error_{"uint", v, path}
+}
+
+// Float returns a Checker that accepts any float value, and returns
+// the same value consistently typed as a float64.
+func Float() Checker {
+	return floatC{}
+}
+
+type floatC struct{}
+
+func (c floatC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"float", v, path}
+	}
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.Float32:
+	case reflect.Float64:
+	default:
+		return nil, error_{"float", v, path}
+	}
+	return reflect.ValueOf(v).Float(), nil
+}
diff --git a/automation/vendor/github.com/juju/schema/size.go b/automation/vendor/github.com/juju/schema/size.go
new file mode 100644
index 0000000..1ad0caa
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/size.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"github.com/juju/utils"
+	"reflect"
+)
+
+// Size returns a Checker that accepts a string value, and returns
+// the parsed string as a size in mebibytes see: https://godoc.org/github.com/juju/utils#ParseSize
+func Size() Checker {
+	return sizeC{}
+}
+
+type sizeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c sizeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string", v, path}
+	}
+
+	typeOf := reflect.TypeOf(v).Kind()
+	if typeOf != reflect.String {
+		return nil, error_{"string", v, path}
+	}
+
+	value := reflect.ValueOf(v).String()
+	if value == "" {
+		return nil, error_{"empty string", v, path}
+	}
+
+	v, err := utils.ParseSize(value)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return v, nil
+}
diff --git a/automation/vendor/github.com/juju/schema/strings.go b/automation/vendor/github.com/juju/schema/strings.go
new file mode 100644
index 0000000..75f6120
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/strings.go
@@ -0,0 +1,155 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"fmt"
+	"net/url"
+	"reflect"
+	"regexp"
+)
+
+// String returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func String() Checker {
+	return stringC{}
+}
+
+type stringC struct{}
+
+func (c stringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		return reflect.ValueOf(v).String(), nil
+	}
+	return nil, error_{"string", v, path}
+}
+
+// URL returns a Checker that accepts a string value that must be parseable as a
+// URL, and returns a *net.URL.
+func URL() Checker {
+	return urlC{}
+}
+
+type urlC struct{}
+
+func (c urlC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		u, err := url.Parse(s)
+		if err != nil {
+			return nil, error_{"valid url", s, path}
+		}
+		return u, nil
+	}
+	return nil, error_{"url string", v, path}
+}
+
+// SimpleRegexp returns a checker that accepts a string value that is
+// a valid regular expression and returns it unprocessed.
+func SimpleRegexp() Checker {
+	return sregexpC{}
+}
+
+type sregexpC struct{}
+
+func (c sregexpC) Coerce(v interface{}, path []string) (interface{}, error) {
+	// XXX The regexp package happens to be extremely simple right now.
+	//     Once exp/regexp goes mainstream, we'll have to update this
+	//     logic to use a more widely accepted regexp subset.
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		s := reflect.ValueOf(v).String()
+		_, err := regexp.Compile(s)
+		if err != nil {
+			return nil, error_{"valid regexp", s, path}
+		}
+		return v, nil
+	}
+	return nil, error_{"regexp string", v, path}
+}
+
+// UUID returns a Checker that accepts a string value only and returns
+// it unprocessed.
+func UUID() Checker {
+	return uuidC{}
+}
+
+type uuidC struct{}
+
+var uuidregex = regexp.MustCompile(`[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}`)
+
+func (c uuidC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v != nil && reflect.TypeOf(v).Kind() == reflect.String {
+		uuid := reflect.ValueOf(v).String()
+		if uuidregex.MatchString(uuid) {
+			return uuid, nil
+		}
+	}
+	return nil, error_{"uuid", v, path}
+}
+
+// Stringified returns a checker that accepts a bool/int/float/string
+// value and returns its string. Other value types may be supported by
+// passing in their checkers.
+func Stringified(checkers ...Checker) Checker {
+	return stringifiedC{
+		checkers: checkers,
+	}
+}
+
+type stringifiedC struct {
+	checkers []Checker
+}
+
+func (c stringifiedC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if newStr, err := String().Coerce(v, path); err == nil {
+		return newStr, nil
+	}
+	_, err := OneOf(append(c.checkers,
+		Bool(),
+		Int(),
+		Float(),
+		String(),
+		URL(),
+	)...).Coerce(v, path)
+	if err != nil {
+		return nil, err
+	}
+	return fmt.Sprintf("%#v", v), nil
+}
+
+// NonEmptyString returns a Checker that only accepts non-empty strings. To
+// tweak the error message, valueLabel can contain a label of the value being
+// checked, e.g. "my special name". If valueLabel is "", "string" will be used
+// as a label instead.
+//
+// Example 1:
+// schema.NonEmptyString("widget").Coerce("", nil) will return an error message
+// like `expected non-empty widget, got string("")`.
+//
+// Example 2:
+// schema.NonEmptyString("").Coerce("", nil) will return an error message like
+// `expected non-empty string, got string("")`.
+func NonEmptyString(valueLabel string) Checker {
+	if valueLabel == "" {
+		valueLabel = "string"
+	}
+	return nonEmptyStringC{valueLabel}
+}
+
+type nonEmptyStringC struct {
+	valueLabel string
+}
+
+func (c nonEmptyStringC) Coerce(v interface{}, path []string) (interface{}, error) {
+	label := fmt.Sprintf("non-empty %s", c.valueLabel)
+	invalidError := error_{label, v, path}
+
+	if v == nil || reflect.TypeOf(v).Kind() != reflect.String {
+		return nil, invalidError
+	}
+	if stringValue := reflect.ValueOf(v).String(); stringValue != "" {
+		return stringValue, nil
+	}
+	return nil, invalidError
+}
diff --git a/automation/vendor/github.com/juju/schema/time.go b/automation/vendor/github.com/juju/schema/time.go
new file mode 100644
index 0000000..9521a2a
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/time.go
@@ -0,0 +1,41 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// Time returns a Checker that accepts a string value, and returns
+// the parsed time.Time value. Emtpy strings are considered empty times.
+func Time() Checker {
+	return timeC{}
+}
+
+type timeC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Time", v, path}
+	}
+	var empty time.Time
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.Parse(time.RFC3339Nano, vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Time", v, path}
+	}
+}
diff --git a/automation/vendor/github.com/juju/schema/time_duration.go b/automation/vendor/github.com/juju/schema/time_duration.go
new file mode 100644
index 0000000..71e75fe
--- /dev/null
+++ b/automation/vendor/github.com/juju/schema/time_duration.go
@@ -0,0 +1,42 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package schema
+
+import (
+	"reflect"
+	"time"
+)
+
+// TimeDuration returns a Checker that accepts a string value, and returns
+// the parsed time.Duration value. Emtpy strings are considered empty time.Duration
+func TimeDuration() Checker {
+	return timeDurationC{}
+}
+
+type timeDurationC struct{}
+
+// Coerce implements Checker Coerce method.
+func (c timeDurationC) Coerce(v interface{}, path []string) (interface{}, error) {
+	if v == nil {
+		return nil, error_{"string or time.Duration", v, path}
+	}
+
+	var empty time.Duration
+	switch reflect.TypeOf(v).Kind() {
+	case reflect.TypeOf(empty).Kind():
+		return v, nil
+	case reflect.String:
+		vstr := reflect.ValueOf(v).String()
+		if vstr == "" {
+			return empty, nil
+		}
+		v, err := time.ParseDuration(vstr)
+		if err != nil {
+			return nil, err
+		}
+		return v, nil
+	default:
+		return nil, error_{"string or time.Duration", v, path}
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/LICENSE b/automation/vendor/github.com/juju/utils/LICENSE
new file mode 100644
index 0000000..ade9307
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply.
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/utils/LICENSE.golang b/automation/vendor/github.com/juju/utils/LICENSE.golang
new file mode 100644
index 0000000..953076d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/LICENSE.golang
@@ -0,0 +1,32 @@
+This licence applies to the following files:
+
+* filepath/stdlib.go
+* filepath/stdlibmatch.go
+
+Copyright (c) 2010 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/automation/vendor/github.com/juju/utils/Makefile b/automation/vendor/github.com/juju/utils/Makefile
new file mode 100644
index 0000000..9c69f32
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/Makefile
@@ -0,0 +1,10 @@
+PROJECT := github.com/juju/utils
+
+check-licence:
+	@(fgrep -rl "Licensed under the LGPLv3" .;\
+		fgrep -rl "MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT" .;\
+		find . -name "*.go") | sed -e 's,\./,,' | sort | uniq -u | \
+		xargs -I {} echo FAIL: licence missed: {}
+
+check: check-licence
+	go test $(PROJECT)/...
\ No newline at end of file
diff --git a/automation/vendor/github.com/juju/utils/README.md b/automation/vendor/github.com/juju/utils/README.md
new file mode 100644
index 0000000..ef08948
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/README.md
@@ -0,0 +1,4 @@
+juju/utils
+============
+
+This package provides general utility packages and functions.
diff --git a/automation/vendor/github.com/juju/utils/attempt.go b/automation/vendor/github.com/juju/utils/attempt.go
new file mode 100644
index 0000000..3becab2
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/attempt.go
@@ -0,0 +1,80 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"time"
+)
+
+// The Attempt and AttemptStrategy types are copied from those in launchpad.net/goamz/aws.
+
+// AttemptStrategy represents a strategy for waiting for an action
+// to complete successfully.
+type AttemptStrategy struct {
+	Total time.Duration // total duration of attempt.
+	Delay time.Duration // interval between each try in the burst.
+	Min   int           // minimum number of retries; overrides Total
+}
+
+type Attempt struct {
+	strategy AttemptStrategy
+	last     time.Time
+	end      time.Time
+	force    bool
+	count    int
+}
+
+// Start begins a new sequence of attempts for the given strategy.
+func (s AttemptStrategy) Start() *Attempt {
+	now := time.Now()
+	return &Attempt{
+		strategy: s,
+		last:     now,
+		end:      now.Add(s.Total),
+		force:    true,
+	}
+}
+
+// Next waits until it is time to perform the next attempt or returns
+// false if it is time to stop trying.
+// It always returns true the first time it is called - we are guaranteed to
+// make at least one attempt.
+func (a *Attempt) Next() bool {
+	now := time.Now()
+	sleep := a.nextSleep(now)
+	if !a.force && !now.Add(sleep).Before(a.end) && a.strategy.Min <= a.count {
+		return false
+	}
+	a.force = false
+	if sleep > 0 && a.count > 0 {
+		time.Sleep(sleep)
+		now = time.Now()
+	}
+	a.count++
+	a.last = now
+	return true
+}
+
+func (a *Attempt) nextSleep(now time.Time) time.Duration {
+	sleep := a.strategy.Delay - now.Sub(a.last)
+	if sleep < 0 {
+		return 0
+	}
+	return sleep
+}
+
+// HasNext returns whether another attempt will be made if the current
+// one fails. If it returns true, the following call to Next is
+// guaranteed to return true.
+func (a *Attempt) HasNext() bool {
+	if a.force || a.strategy.Min > a.count {
+		return true
+	}
+	now := time.Now()
+	if now.Add(a.nextSleep(now)).Before(a.end) {
+		a.force = true
+		return true
+	}
+	return false
+}
diff --git a/automation/vendor/github.com/juju/utils/clock/clock.go b/automation/vendor/github.com/juju/utils/clock/clock.go
new file mode 100644
index 0000000..59a511d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/clock/clock.go
@@ -0,0 +1,53 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import "time"
+
+// Clock provides an interface for dealing with clocks.
+type Clock interface {
+	// Now returns the current clock time.
+	Now() time.Time
+
+	// After waits for the duration to elapse and then sends the
+	// current time on the returned channel.
+	After(time.Duration) <-chan time.Time
+
+	// AfterFunc waits for the duration to elapse and then calls f in its own goroutine.
+	// It returns a Timer that can be used to cancel the call using its Stop method.
+	AfterFunc(d time.Duration, f func()) Timer
+
+	// NewTimer creates a new Timer that will send the current time
+	// on its channel after at least duration d.
+	NewTimer(d time.Duration) Timer
+}
+
+// Alarm returns a channel that will have the time sent on it at some point
+// after the supplied time occurs.
+//
+// This is short for c.After(t.Sub(c.Now())).
+func Alarm(c Clock, t time.Time) <-chan time.Time {
+	return c.After(t.Sub(c.Now()))
+}
+
+// The Timer type represents a single event.
+// A Timer must be created with AfterFunc.
+// This interface follows time.Timer's methods but provides easier mocking.
+type Timer interface {
+	// When the Timer expires, the current time will be sent on the
+	// channel returned from Chan, unless the Timer was created by
+	// AfterFunc.
+	Chan() <-chan time.Time
+
+	// Reset changes the timer to expire after duration d.
+	// It returns true if the timer had been active, false if
+	// the timer had expired or been stopped.
+	Reset(time.Duration) bool
+
+	// Stop prevents the Timer from firing. It returns true if
+	// the call stops the timer, false if the timer has already expired or been stopped.
+	// Stop does not close the channel, to prevent a read
+	// from the channel succeeding incorrectly.
+	Stop() bool
+}
diff --git a/automation/vendor/github.com/juju/utils/clock/wall.go b/automation/vendor/github.com/juju/utils/clock/wall.go
new file mode 100644
index 0000000..9bfc351
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/clock/wall.go
@@ -0,0 +1,47 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package clock
+
+import (
+	"time"
+)
+
+// WallClock exposes wall-clock time via the Clock interface.
+var WallClock wallClock
+
+// ensure that WallClock does actually implement the Clock interface.
+var _ Clock = WallClock
+
+// WallClock exposes wall-clock time as returned by time.Now.
+type wallClock struct{}
+
+// Now is part of the Clock interface.
+func (wallClock) Now() time.Time {
+	return time.Now()
+}
+
+// After implements Clock.After.
+func (wallClock) After(d time.Duration) <-chan time.Time {
+	return time.After(d)
+}
+
+// AfterFunc implements Clock.AfterFunc.
+func (wallClock) AfterFunc(d time.Duration, f func()) Timer {
+	return wallTimer{time.AfterFunc(d, f)}
+}
+
+// NewTimer implements Clock.NewTimer.
+func (wallClock) NewTimer(d time.Duration) Timer {
+	return wallTimer{time.NewTimer(d)}
+}
+
+// wallTimer implements the Timer interface.
+type wallTimer struct {
+	*time.Timer
+}
+
+// Chan implements Timer.Chan.
+func (t wallTimer) Chan() <-chan time.Time {
+	return t.C
+}
diff --git a/automation/vendor/github.com/juju/utils/command.go b/automation/vendor/github.com/juju/utils/command.go
new file mode 100644
index 0000000..4bd51cd
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/command.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os/exec"
+)
+
+// RunCommand executes the command and return the combined output.
+func RunCommand(command string, args ...string) (output string, err error) {
+	cmd := exec.Command(command, args...)
+	out, err := cmd.CombinedOutput()
+	output = string(out)
+	if err != nil {
+		return output, err
+	}
+	return output, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/dependencies.tsv b/automation/vendor/github.com/juju/utils/dependencies.tsv
new file mode 100644
index 0000000..76d098e
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/dependencies.tsv
@@ -0,0 +1,24 @@
+github.com/juju/cmd	git	7c57a7d5a20602e4563a83f2d530283ca0e6f481	2016-08-10T12:53:08Z
+github.com/juju/errors	git	1b5e39b83d1835fa480e0c2ddefb040ee82d58b3	2015-09-16T12:56:42Z
+github.com/juju/gnuflag	git	4e76c56581859c14d9d87e1ddbe29e1c0f10195f	2016-08-09T16:52:14Z
+github.com/juju/httpprof	git	14bf14c307672fd2456bdbf35d19cf0ccd3cf565	2014-12-17T16:00:36Z
+github.com/juju/httprequest	git	89d547093c45e293599088cc63e805c6f1205dc0	2016-03-02T10:09:58Z
+github.com/juju/loggo	git	15901ae4de786d05edae84a27c93d3fbef66c91e	2016-08-04T22:15:26Z
+github.com/juju/retry	git	62c62032529169c7ec02fa48f93349604c345e1f	2015-10-29T02:48:21Z
+github.com/juju/testing	git	7177264a582e2c00d08277eaa91d88f8eb0fd869	2016-09-26T12:59:16Z
+github.com/juju/version	git	4ae6172c00626779a5a462c3e3d22fc0e889431a	2016-06-03T19:49:58Z
+github.com/julienschmidt/httprouter	git	77a895ad01ebc98a4dc95d8355bc825ce80a56f6	2015-10-13T22:55:20Z
+github.com/masterzen/azure-sdk-for-go	git	ee4f0065d00cd12b542f18f5bc45799e88163b12	2016-07-20T05:16:58Z
+github.com/masterzen/simplexml	git	4572e39b1ab9fe03ee513ce6fc7e289e98482190	2016-06-08T18:30:07Z
+github.com/masterzen/winrm	git	7a535cd943fccaeed196718896beec3fb51aff41	2016-08-04T09:38:27Z
+github.com/masterzen/xmlpath	git	13f4951698adc0fa9c1dda3e275d489a24201161	2014-02-18T18:59:01Z
+github.com/nu7hatch/gouuid	git	179d4d0c4d8d407a32af483c2354df1d2c91e6c3	2016-02-18t18:59:01Z
+golang.org/x/crypto	git	aedad9a179ec1ea11b7064c57cbc6dc30d7724ec	2015-08-30T18:06:42Z
+golang.org/x/net	git	ea47fc708ee3e20177f3ca3716217c4ab75942cb	2015-08-29T23:03:18Z
+golang.org/x/text	git	b01949dc0793a9af5e4cb3fce4d42999e76e8ca1	2016-05-25T23:07:23Z
+gopkg.in/check.v1	git	4f90aeace3a26ad7021961c297b22c42160c7b25	2016-01-05T16:49:36Z
+gopkg.in/errgo.v1	git	66cb46252b94c1f3d65646f54ee8043ab38d766c	2015-10-07T15:31:57Z
+gopkg.in/juju/names.v2	git	e38bc90539f22af61a9c656d35068bd5f0a5b30a	2016-05-25T23:07:23Z
+gopkg.in/mgo.v2	git	4d04138ffef2791c479c0c8bbffc30b34081b8d9	2015-10-26T16:34:53Z
+gopkg.in/tomb.v1	git	dd632973f1e7218eb1089048e0798ec9ae7dceb8	2014-10-24T13:56:13Z
+gopkg.in/yaml.v2	git	a83829b6f1293c91addabc89d0571c246397bbf4	2016-03-01T20:40:22Z
diff --git a/automation/vendor/github.com/juju/utils/file.go b/automation/vendor/github.com/juju/utils/file.go
new file mode 100644
index 0000000..75fe5a6
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file.go
@@ -0,0 +1,149 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"path"
+	"path/filepath"
+	"regexp"
+)
+
+// UserHomeDir returns the home directory for the specified user, or the
+// home directory for the current user if the specified user is empty.
+func UserHomeDir(userName string) (hDir string, err error) {
+	if userName == "" {
+		// TODO (wallyworld) - fix tests on Windows
+		// Ordinarily, we'd always use user.Current() to get the current user
+		// and then get the HomeDir from that. But our tests rely on poking
+		// a value into $HOME in order to override the normal home dir for the
+		// current user. So we're forced to use Home() to make the tests pass.
+		// All of our tests currently construct paths with the default user in
+		// mind eg "~/foo".
+		return Home(), nil
+	}
+	hDir, err = homeDir(userName)
+	if err != nil {
+		return "", err
+	}
+	return hDir, nil
+}
+
+// Only match paths starting with ~ (~user/test, ~/test). This will prevent
+// accidental expansion on Windows when short form paths are present (C:\users\ADMINI~1\test)
+var userHomePathRegexp = regexp.MustCompile("(^~(?P<user>[^/]*))(?P<path>.*)")
+
+// NormalizePath expands a path containing ~ to its absolute form,
+// and removes any .. or . path elements.
+func NormalizePath(dir string) (string, error) {
+	if userHomePathRegexp.MatchString(dir) {
+		user := userHomePathRegexp.ReplaceAllString(dir, "$user")
+		userHomeDir, err := UserHomeDir(user)
+		if err != nil {
+			return "", err
+		}
+		dir = userHomePathRegexp.ReplaceAllString(dir, fmt.Sprintf("%s$path", userHomeDir))
+	}
+	return filepath.Clean(dir), nil
+}
+
+// EnsureBaseDir ensures that path is always prefixed by baseDir,
+// allowing for the fact that path might have a Window drive letter in
+// it.
+func EnsureBaseDir(baseDir, path string) string {
+	if baseDir == "" {
+		return path
+	}
+	volume := filepath.VolumeName(path)
+	return filepath.Join(baseDir, path[len(volume):])
+}
+
+// JoinServerPath joins any number of path elements into a single path, adding
+// a path separator (based on the current juju server OS) if necessary. The
+// result is Cleaned; in particular, all empty strings are ignored.
+func JoinServerPath(elem ...string) string {
+	return path.Join(elem...)
+}
+
+// UniqueDirectory returns "path/name" if that directory doesn't exist.  If it
+// does, the method starts appending .1, .2, etc until a unique name is found.
+func UniqueDirectory(path, name string) (string, error) {
+	dir := filepath.Join(path, name)
+	_, err := os.Stat(dir)
+	if os.IsNotExist(err) {
+		return dir, nil
+	}
+	for i := 1; ; i++ {
+		dir := filepath.Join(path, fmt.Sprintf("%s.%d", name, i))
+		_, err := os.Stat(dir)
+		if os.IsNotExist(err) {
+			return dir, nil
+		} else if err != nil {
+			return "", err
+		}
+	}
+}
+
+// CopyFile writes the contents of the given source file to dest.
+func CopyFile(dest, source string) error {
+	df, err := os.Create(dest)
+	if err != nil {
+		return err
+	}
+	f, err := os.Open(source)
+	if err != nil {
+		return err
+	}
+	defer f.Close()
+	_, err = io.Copy(df, f)
+	return err
+}
+
+// AtomicWriteFileAndChange atomically writes the filename with the
+// given contents and calls the given function after the contents were
+// written, but before the file is renamed.
+func AtomicWriteFileAndChange(filename string, contents []byte, change func(*os.File) error) (err error) {
+	dir, file := filepath.Split(filename)
+	f, err := ioutil.TempFile(dir, file)
+	if err != nil {
+		return fmt.Errorf("cannot create temp file: %v", err)
+	}
+	defer f.Close()
+	defer func() {
+		if err != nil {
+			// Don't leave the temp file lying around on error.
+			// Close the file before removing. Trying to remove an open file on
+			// Windows will fail.
+			f.Close()
+			os.Remove(f.Name())
+		}
+	}()
+	if _, err := f.Write(contents); err != nil {
+		return fmt.Errorf("cannot write %q contents: %v", filename, err)
+	}
+	if err := change(f); err != nil {
+		return err
+	}
+	f.Close()
+	if err := ReplaceFile(f.Name(), filename); err != nil {
+		return fmt.Errorf("cannot replace %q with %q: %v", f.Name(), filename, err)
+	}
+	return nil
+}
+
+// AtomicWriteFile atomically writes the filename with the given
+// contents and permissions, replacing any existing file at the same
+// path.
+func AtomicWriteFile(filename string, contents []byte, perms os.FileMode) (err error) {
+	return AtomicWriteFileAndChange(filename, contents, func(f *os.File) error {
+		// FileMod.Chmod() is not implemented on Windows, however, os.Chmod() is
+		if err := os.Chmod(f.Name(), perms); err != nil {
+			return fmt.Errorf("cannot set permissions: %v", err)
+		}
+		return nil
+	})
+}
diff --git a/automation/vendor/github.com/juju/utils/file_unix.go b/automation/vendor/github.com/juju/utils/file_unix.go
new file mode 100644
index 0000000..4380290
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file_unix.go
@@ -0,0 +1,75 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"os/user"
+	"strconv"
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+func homeDir(userName string) (string, error) {
+	u, err := user.Lookup(userName)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return u.HomeDir, nil
+}
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+//
+// On unix systems, an error may occur with a successful move, if the source
+// file location cannot be unlinked.
+func MoveFile(source, destination string) (bool, error) {
+	err := os.Link(source, destination)
+	if err != nil {
+		return false, err
+	}
+	err = os.Remove(source)
+	if err != nil {
+		return true, err
+	}
+	return true, nil
+}
+
+// ReplaceFile atomically replaces the destination file or directory
+// with the source. The errors that are returned are identical to
+// those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	return os.Rename(source, destination)
+}
+
+// MakeFileURL returns a file URL if a directory is passed in else it does nothing
+func MakeFileURL(in string) string {
+	if strings.HasPrefix(in, "/") {
+		return "file://" + in
+	}
+	return in
+}
+
+// ChownPath sets the uid and gid of path to match that of the user
+// specified.
+func ChownPath(path, username string) error {
+	u, err := user.Lookup(username)
+	if err != nil {
+		return fmt.Errorf("cannot lookup %q user id: %v", username, err)
+	}
+	uid, err := strconv.Atoi(u.Uid)
+	if err != nil {
+		return fmt.Errorf("invalid user id %q: %v", u.Uid, err)
+	}
+	gid, err := strconv.Atoi(u.Gid)
+	if err != nil {
+		return fmt.Errorf("invalid group id %q: %v", u.Gid, err)
+	}
+	return os.Chown(path, uid, gid)
+}
diff --git a/automation/vendor/github.com/juju/utils/file_windows.go b/automation/vendor/github.com/juju/utils/file_windows.go
new file mode 100644
index 0000000..9bb3936
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/file_windows.go
@@ -0,0 +1,142 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build windows
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"syscall"
+	"unsafe"
+
+	"github.com/juju/errors"
+)
+
+const (
+	movefile_replace_existing = 0x1
+	movefile_write_through    = 0x8
+)
+
+//sys moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) = MoveFileExW
+
+// MoveFile atomically moves the source file to the destination, returning
+// whether the file was moved successfully. If the destination already exists,
+// it returns an error rather than overwrite it.
+func MoveFile(source, destination string) (bool, error) {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_write_through); err != nil {
+		return false, &os.LinkError{"move", source, destination, err}
+	}
+	return true, nil
+
+}
+
+// ReplaceFile atomically replaces the destination file or directory with the source.
+// The errors that are returned are identical to those returned by os.Rename.
+func ReplaceFile(source, destination string) error {
+	src, err := syscall.UTF16PtrFromString(source)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	dest, err := syscall.UTF16PtrFromString(destination)
+	if err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+
+	// see http://msdn.microsoft.com/en-us/library/windows/desktop/aa365240(v=vs.85).aspx
+	if err := moveFileEx(src, dest, movefile_replace_existing|movefile_write_through); err != nil {
+		return &os.LinkError{"replace", source, destination, err}
+	}
+	return nil
+}
+
+// MakeFileURL returns a proper file URL for the given path/directory
+func MakeFileURL(in string) string {
+	in = filepath.ToSlash(in)
+	// for windows at least should be <letter>: to be considered valid
+	// so we cant do anything with less than that.
+	if len(in) < 2 {
+		return in
+	}
+	if string(in[1]) != ":" {
+		return in
+	}
+	// since go 1.6 http client will only take this format.
+	return "file://" + in
+}
+
+func getUserSID(username string) (string, error) {
+	sid, _, _, e := syscall.LookupSID("", username)
+	if e != nil {
+		return "", e
+	}
+	sidStr, err := sid.String()
+	return sidStr, err
+}
+
+func readRegString(h syscall.Handle, key string) (value string, err error) {
+	var typ uint32
+	var buf uint32
+
+	// Get size of registry key
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, nil, &buf)
+	if err != nil {
+		return value, err
+	}
+
+	n := make([]uint16, buf/2+1)
+	err = syscall.RegQueryValueEx(h, syscall.StringToUTF16Ptr(key), nil, &typ, (*byte)(unsafe.Pointer(&n[0])), &buf)
+	if err != nil {
+		return value, err
+	}
+	return syscall.UTF16ToString(n[:]), err
+}
+
+func homeFromRegistry(sid string) (string, error) {
+	var h syscall.Handle
+	// This key will exist on all platforms we support the agent on (windows server 2008 and above)
+	keyPath := fmt.Sprintf("Software\\Microsoft\\Windows NT\\CurrentVersion\\ProfileList\\%s", sid)
+	err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE,
+		syscall.StringToUTF16Ptr(keyPath),
+		0, syscall.KEY_READ, &h)
+	if err != nil {
+		return "", err
+	}
+	defer syscall.RegCloseKey(h)
+	str, err := readRegString(h, "ProfileImagePath")
+	if err != nil {
+		return "", err
+	}
+	return str, nil
+}
+
+// homeDir returns a local user home dir on Windows
+// user.Lookup() does not populate Gid and HomeDir on Windows,
+// so we get it from the registry
+func homeDir(user string) (string, error) {
+	u, err := getUserSID(user)
+	if err != nil {
+		return "", errors.NewUserNotFound(err, "no such user")
+	}
+	return homeFromRegistry(u)
+}
+
+// ChownPath is not implemented for Windows.
+func ChownPath(path, username string) error {
+	// This only exists to allow building on Windows. User lookup and
+	// file ownership needs to be handled in a completely different
+	// way and hasn't yet been implemented.
+	return nil
+}
diff --git a/automation/vendor/github.com/juju/utils/gomaxprocs.go b/automation/vendor/github.com/juju/utils/gomaxprocs.go
new file mode 100644
index 0000000..5977a86
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/gomaxprocs.go
@@ -0,0 +1,26 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"runtime"
+)
+
+var gomaxprocs = runtime.GOMAXPROCS
+var numCPU = runtime.NumCPU
+
+// UseMultipleCPUs sets GOMAXPROCS to the number of CPU cores unless it has
+// already been overridden by the GOMAXPROCS environment variable.
+func UseMultipleCPUs() {
+	if envGOMAXPROCS := os.Getenv("GOMAXPROCS"); envGOMAXPROCS != "" {
+		n := gomaxprocs(0)
+		logger.Debugf("GOMAXPROCS already set in environment to %q, %d internally",
+			envGOMAXPROCS, n)
+		return
+	}
+	n := numCPU()
+	logger.Debugf("setting GOMAXPROCS to %d", n)
+	gomaxprocs(n)
+}
diff --git a/automation/vendor/github.com/juju/utils/home_unix.go b/automation/vendor/github.com/juju/utils/home_unix.go
new file mode 100644
index 0000000..6b450be
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/home_unix.go
@@ -0,0 +1,19 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+// +build !windows
+
+package utils
+
+import (
+	"os"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return os.Getenv("HOME")
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	return os.Setenv("HOME", s)
+}
diff --git a/automation/vendor/github.com/juju/utils/home_windows.go b/automation/vendor/github.com/juju/utils/home_windows.go
new file mode 100644
index 0000000..e61225c
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/home_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"path/filepath"
+)
+
+// Home returns the os-specific home path as specified in the environment.
+func Home() string {
+	return filepath.Join(os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"))
+}
+
+// SetHome sets the os-specific home path in the environment.
+func SetHome(s string) error {
+	v := filepath.VolumeName(s)
+	if v != "" {
+		if err := os.Setenv("HOMEDRIVE", v); err != nil {
+			return err
+		}
+	}
+	return os.Setenv("HOMEPATH", s[len(v):])
+}
diff --git a/automation/vendor/github.com/juju/utils/http-1_4.go b/automation/vendor/github.com/juju/utils/http-1_4.go
new file mode 100644
index 0000000..87bd5b1
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http-1_4.go
@@ -0,0 +1,24 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build !go1.7
+
+package utils
+
+import (
+	"fmt"
+	"net"
+	"net/http"
+)
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+func installHTTPDialShim(t *http.Transport) {
+	t.Dial = func(network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return net.Dial(network, addr)
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/http-1_7.go b/automation/vendor/github.com/juju/utils/http-1_7.go
new file mode 100644
index 0000000..d676dcb
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http-1_7.go
@@ -0,0 +1,35 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+//+build go1.7
+
+package utils
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/http"
+	"time"
+)
+
+var ctxtDialer = &net.Dialer{
+	Timeout:   30 * time.Second,
+	KeepAlive: 30 * time.Second,
+}
+
+// installHTTPDialShim patches the default HTTP transport so
+// that it fails when an attempt is made to dial a non-local
+// host.
+//
+// Note that this is Go version dependent because in Go 1.7 and above,
+// the DialContext field was introduced (and set in http.DefaultTransport)
+// which overrides the Dial field.
+func installHTTPDialShim(t *http.Transport) {
+	t.DialContext = func(ctxt context.Context, network, addr string) (net.Conn, error) {
+		if !OutgoingAccessAllowed && !isLocalAddr(addr) {
+			return nil, fmt.Errorf("access to address %q not allowed", addr)
+		}
+		return ctxtDialer.DialContext(ctxt, network, addr)
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/http.go b/automation/vendor/github.com/juju/utils/http.go
new file mode 100644
index 0000000..2f5ddf3
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/http.go
@@ -0,0 +1,117 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"encoding/base64"
+	"fmt"
+	"net"
+	"net/http"
+	"strings"
+	"sync"
+)
+
+var insecureClient = (*http.Client)(nil)
+var insecureClientMutex = sync.Mutex{}
+
+func init() {
+	defaultTransport := http.DefaultTransport.(*http.Transport)
+	installHTTPDialShim(defaultTransport)
+	registerFileProtocol(defaultTransport)
+}
+
+// registerFileProtocol registers support for file:// URLs on the given transport.
+func registerFileProtocol(transport *http.Transport) {
+	transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
+}
+
+// SSLHostnameVerification is used as a switch for when a given provider might
+// use self-signed credentials and we should not try to verify the hostname on
+// the TLS/SSL certificates
+type SSLHostnameVerification bool
+
+const (
+	// VerifySSLHostnames ensures we verify the hostname on the certificate
+	// matches the host we are connecting and is signed
+	VerifySSLHostnames = SSLHostnameVerification(true)
+	// NoVerifySSLHostnames informs us to skip verifying the hostname
+	// matches a valid certificate
+	NoVerifySSLHostnames = SSLHostnameVerification(false)
+)
+
+// GetHTTPClient returns either a standard http client or
+// non validating client depending on the value of verify.
+func GetHTTPClient(verify SSLHostnameVerification) *http.Client {
+	if verify == VerifySSLHostnames {
+		return GetValidatingHTTPClient()
+	}
+	return GetNonValidatingHTTPClient()
+}
+
+// GetValidatingHTTPClient returns a new http.Client that
+// verifies the server's certificate chain and hostname.
+func GetValidatingHTTPClient() *http.Client {
+	return &http.Client{}
+}
+
+// GetNonValidatingHTTPClient returns a new http.Client that
+// does not verify the server's certificate chain and hostname.
+func GetNonValidatingHTTPClient() *http.Client {
+	return &http.Client{
+		Transport: NewHttpTLSTransport(&tls.Config{
+			InsecureSkipVerify: true,
+		}),
+	}
+}
+
+// BasicAuthHeader creates a header that contains just the "Authorization"
+// entry.  The implementation was originally taked from net/http but this is
+// needed externally from the http request object in order to use this with
+// our websockets. See 2 (end of page 4) http://www.ietf.org/rfc/rfc2617.txt
+// "To receive authorization, the client sends the userid and password,
+// separated by a single colon (":") character, within a base64 encoded string
+// in the credentials."
+func BasicAuthHeader(username, password string) http.Header {
+	auth := username + ":" + password
+	encoded := "Basic " + base64.StdEncoding.EncodeToString([]byte(auth))
+	return http.Header{
+		"Authorization": {encoded},
+	}
+}
+
+// ParseBasicAuth attempts to find an Authorization header in the supplied
+// http.Header and if found parses it as a Basic header. See 2 (end of page 4)
+// http://www.ietf.org/rfc/rfc2617.txt "To receive authorization, the client
+// sends the userid and password, separated by a single colon (":") character,
+// within a base64 encoded string in the credentials."
+func ParseBasicAuthHeader(h http.Header) (userid, password string, err error) {
+	parts := strings.Fields(h.Get("Authorization"))
+	if len(parts) != 2 || parts[0] != "Basic" {
+		return "", "", fmt.Errorf("invalid or missing HTTP auth header")
+	}
+	// Challenge is a base64-encoded "tag:pass" string.
+	// See RFC 2617, Section 2.
+	challenge, err := base64.StdEncoding.DecodeString(parts[1])
+	if err != nil {
+		return "", "", fmt.Errorf("invalid HTTP auth encoding")
+	}
+	tokens := strings.SplitN(string(challenge), ":", 2)
+	if len(tokens) != 2 {
+		return "", "", fmt.Errorf("invalid HTTP auth contents")
+	}
+	return tokens[0], tokens[1], nil
+}
+
+// OutgoingAccessAllowed determines whether connections other than
+// localhost can be dialled.
+var OutgoingAccessAllowed = true
+
+func isLocalAddr(addr string) bool {
+	host, _, err := net.SplitHostPort(addr)
+	if err != nil {
+		return false
+	}
+	return host == "localhost" || net.ParseIP(host).IsLoopback()
+}
diff --git a/automation/vendor/github.com/juju/utils/isubuntu.go b/automation/vendor/github.com/juju/utils/isubuntu.go
new file mode 100644
index 0000000..d85ed9a
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/isubuntu.go
@@ -0,0 +1,17 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+)
+
+// IsUbuntu executes lxb_release to see if the host OS is Ubuntu.
+func IsUbuntu() bool {
+	out, err := RunCommand("lsb_release", "-i", "-s")
+	if err != nil {
+		return false
+	}
+	return strings.TrimSpace(out) == "Ubuntu"
+}
diff --git a/automation/vendor/github.com/juju/utils/limiter.go b/automation/vendor/github.com/juju/utils/limiter.go
new file mode 100644
index 0000000..60bd066
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/limiter.go
@@ -0,0 +1,59 @@
+// Copyright 2011, 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+)
+
+type empty struct{}
+type limiter chan empty
+
+// Limiter represents a limited resource (eg a semaphore).
+type Limiter interface {
+	// Acquire another unit of the resource.
+	// Acquire returns false to indicate there is no more availability,
+	// until another entity calls Release.
+	Acquire() bool
+	// AcquireWait requests a unit of resource, but blocks until one is
+	// available.
+	AcquireWait()
+	// Release returns a unit of the resource. Calling Release when there
+	// are no units Acquired is an error.
+	Release() error
+}
+
+func NewLimiter(max int) Limiter {
+	return make(limiter, max)
+}
+
+// Acquire requests some resources that you can return later
+// It returns 'true' if there are resources available, but false if they are
+// not. Callers are responsible for calling Release if this returns true, but
+// should not release if this returns false.
+func (l limiter) Acquire() bool {
+	e := empty{}
+	select {
+	case l <- e:
+		return true
+	default:
+		return false
+	}
+}
+
+// AcquireWait waits for the resource to become available before returning.
+func (l limiter) AcquireWait() {
+	e := empty{}
+	l <- e
+}
+
+// Release returns the resource to the available pool.
+func (l limiter) Release() error {
+	select {
+	case <-l:
+		return nil
+	default:
+		return fmt.Errorf("Release without an associated Acquire")
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/multireader.go b/automation/vendor/github.com/juju/utils/multireader.go
new file mode 100644
index 0000000..b8431f9
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/multireader.go
@@ -0,0 +1,189 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io"
+	"sort"
+
+	"github.com/juju/errors"
+)
+
+// SizeReaderAt combines io.ReaderAt with a Size method.
+type SizeReaderAt interface {
+	// Size returns the size of the data readable
+	// from the reader.
+	Size() int64
+	io.ReaderAt
+}
+
+// NewMultiReaderAt is like io.MultiReader but produces a ReaderAt
+// (and Size), instead of just a reader.
+//
+// Note: this implementation was taken from a talk given
+// by Brad Fitzpatrick as OSCON 2013.
+//
+// http://talks.golang.org/2013/oscon-dl.slide#49
+// https://github.com/golang/talks/blob/master/2013/oscon-dl/server-compose.go
+func NewMultiReaderAt(parts ...SizeReaderAt) SizeReaderAt {
+	m := &multiReaderAt{
+		parts: make([]offsetAndSource, 0, len(parts)),
+	}
+	var off int64
+	for _, p := range parts {
+		m.parts = append(m.parts, offsetAndSource{off, p})
+		off += p.Size()
+	}
+	m.size = off
+	return m
+}
+
+type offsetAndSource struct {
+	off int64
+	SizeReaderAt
+}
+
+type multiReaderAt struct {
+	parts []offsetAndSource
+	size  int64
+}
+
+func (m *multiReaderAt) Size() int64 {
+	return m.size
+}
+
+func (m *multiReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
+	wantN := len(p)
+
+	// Skip past the requested offset.
+	skipParts := sort.Search(len(m.parts), func(i int) bool {
+		// This function returns whether parts[i] will
+		// contribute any bytes to our output.
+		part := m.parts[i]
+		return part.off+part.Size() > off
+	})
+	parts := m.parts[skipParts:]
+
+	// How far to skip in the first part.
+	needSkip := off
+	if len(parts) > 0 {
+		needSkip -= parts[0].off
+	}
+
+	for len(parts) > 0 && len(p) > 0 {
+		readP := p
+		partSize := parts[0].Size()
+		if int64(len(readP)) > partSize-needSkip {
+			readP = readP[:partSize-needSkip]
+		}
+		pn, err0 := parts[0].ReadAt(readP, needSkip)
+		if err0 != nil {
+			return n, err0
+		}
+		n += pn
+		p = p[pn:]
+		if int64(pn)+needSkip == partSize {
+			parts = parts[1:]
+		}
+		needSkip = 0
+	}
+
+	if n != wantN {
+		err = io.ErrUnexpectedEOF
+	}
+	return
+}
+
+// NewMultiReaderSeeker returns an io.ReadSeeker that combines
+// all the given readers into a single one. It assumes that
+// all the seekers are initially positioned at the start.
+func NewMultiReaderSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
+	sreaders := make([]SizeReaderAt, len(readers))
+	for i, r := range readers {
+		r1, err := newSizeReaderAt(r)
+		if err != nil {
+			panic(err)
+		}
+		sreaders[i] = r1
+	}
+	return &readSeeker{
+		r: NewMultiReaderAt(sreaders...),
+	}
+}
+
+// newSizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+// Note that it doesn't strictly adhere to the ReaderAt
+// contract because it's not safe to call ReadAt concurrently.
+// This doesn't matter because io.ReadSeeker doesn't
+// need to be thread-safe and this is only used in that
+// context.
+func newSizeReaderAt(r io.ReadSeeker) (SizeReaderAt, error) {
+	size, err := r.Seek(0, 2)
+	if err != nil {
+		return nil, err
+	}
+	return &sizeReaderAt{
+		r:    r,
+		size: size,
+		off:  size,
+	}, nil
+}
+
+// sizeReaderAt adapts an io.ReadSeeker to a SizeReaderAt.
+type sizeReaderAt struct {
+	r    io.ReadSeeker
+	size int64
+	off  int64
+}
+
+// ReadAt implemnts SizeReaderAt.ReadAt.
+func (r *sizeReaderAt) ReadAt(buf []byte, off int64) (n int, err error) {
+	if off != r.off {
+		_, err = r.r.Seek(off, 0)
+		if err != nil {
+			return 0, err
+		}
+		r.off = off
+	}
+	n, err = io.ReadFull(r.r, buf)
+	r.off += int64(n)
+	return n, err
+}
+
+// Size implemnts SizeReaderAt.Size.
+func (r *sizeReaderAt) Size() int64 {
+	return r.size
+}
+
+// readSeeker adapts a SizeReaderAt to an io.ReadSeeker.
+type readSeeker struct {
+	r   SizeReaderAt
+	off int64
+}
+
+// Seek implements io.Seeker.Seek.
+func (r *readSeeker) Seek(off int64, whence int) (int64, error) {
+	switch whence {
+	case 0:
+	case 1:
+		off += r.off
+	case 2:
+		off = r.r.Size() + off
+	}
+	if off < 0 {
+		return 0, errors.New("negative position")
+	}
+	r.off = off
+	return off, nil
+}
+
+// Read implements io.Reader.Read.
+func (r *readSeeker) Read(buf []byte) (int, error) {
+	n, err := r.r.ReadAt(buf, r.off)
+	r.off += int64(n)
+	if err == io.ErrUnexpectedEOF {
+		err = io.EOF
+	}
+	return n, err
+}
diff --git a/automation/vendor/github.com/juju/utils/naturalsort.go b/automation/vendor/github.com/juju/utils/naturalsort.go
new file mode 100644
index 0000000..d337093
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/naturalsort.go
@@ -0,0 +1,95 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"unicode"
+)
+
+// SortStringsNaturally sorts strings according to their natural sort order.
+func SortStringsNaturally(s []string) []string {
+	sort.Sort(naturally(s))
+	return s
+}
+
+type naturally []string
+
+func (n naturally) Len() int {
+	return len(n)
+}
+
+func (n naturally) Swap(a, b int) {
+	n[a], n[b] = n[b], n[a]
+}
+
+// Less sorts by non-numeric prefix and numeric suffix
+// when one exists.
+func (n naturally) Less(a, b int) bool {
+	aVal := n[a]
+	bVal := n[b]
+
+	for {
+		// If bVal is empty, then aVal can't be less than it.
+		if bVal == "" {
+			return false
+		}
+		// If aVal is empty here, then is must be less than bVal.
+		if aVal == "" {
+			return true
+		}
+
+		aPrefix, aNumber, aRemainder := splitAtNumber(aVal)
+		bPrefix, bNumber, bRemainder := splitAtNumber(bVal)
+		if aPrefix != bPrefix {
+			return aPrefix < bPrefix
+		}
+		if aNumber != bNumber {
+			return aNumber < bNumber
+		}
+
+		// Everything is the same so far, try again with the remainer.
+		aVal = aRemainder
+		bVal = bRemainder
+	}
+}
+
+// splitAtNumber splits given string at the first digit, returning the
+// prefix before the number, the integer represented by the first
+// series of digits, and the remainder of the string after the first
+// series of digits. If no digits are present, the number is returned
+// as -1 and the remainder is empty.
+func splitAtNumber(str string) (string, int, string) {
+	i := indexOfDigit(str)
+	if i == -1 {
+		// no numbers
+		return str, -1, ""
+	}
+	j := i + indexOfNonDigit(str[i:])
+	n, err := strconv.Atoi(str[i:j])
+	if err != nil {
+		panic(fmt.Sprintf("parsing number %v: %v", str[i:j], err)) // should never happen
+	}
+	return str[:i], n, str[j:]
+}
+
+func indexOfDigit(str string) int {
+	for i, rune := range str {
+		if unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return -1
+}
+
+func indexOfNonDigit(str string) int {
+	for i, rune := range str {
+		if !unicode.IsDigit(rune) {
+			return i
+		}
+	}
+	return len(str)
+}
diff --git a/automation/vendor/github.com/juju/utils/network.go b/automation/vendor/github.com/juju/utils/network.go
new file mode 100644
index 0000000..505a6e9
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/network.go
@@ -0,0 +1,46 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"net"
+
+	"github.com/juju/loggo"
+)
+
+var logger = loggo.GetLogger("juju.utils")
+
+// GetIPv4Address iterates through the addresses expecting the format from
+// func (ifi *net.Interface) Addrs() ([]net.Addr, error)
+func GetIPv4Address(addresses []net.Addr) (string, error) {
+	for _, addr := range addresses {
+		ip, _, err := net.ParseCIDR(addr.String())
+		if err != nil {
+			return "", err
+		}
+		ipv4 := ip.To4()
+		if ipv4 == nil {
+			continue
+		}
+		return ipv4.String(), nil
+	}
+	return "", fmt.Errorf("no addresses match")
+}
+
+// GetAddressForInterface looks for the network interface
+// and returns the IPv4 address from the possible addresses.
+func GetAddressForInterface(interfaceName string) (string, error) {
+	iface, err := net.InterfaceByName(interfaceName)
+	if err != nil {
+		logger.Errorf("cannot find network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	addrs, err := iface.Addrs()
+	if err != nil {
+		logger.Errorf("cannot get addresses for network interface %q: %v", interfaceName, err)
+		return "", err
+	}
+	return GetIPv4Address(addrs)
+}
diff --git a/automation/vendor/github.com/juju/utils/os.go b/automation/vendor/github.com/juju/utils/os.go
new file mode 100644
index 0000000..146f793
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/os.go
@@ -0,0 +1,41 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// These are the names of the operating systems recognized by Go.
+const (
+	OSWindows   = "windows"
+	OSDarwin    = "darwin"
+	OSDragonfly = "dragonfly"
+	OSFreebsd   = "freebsd"
+	OSLinux     = "linux"
+	OSNacl      = "nacl"
+	OSNetbsd    = "netbsd"
+	OSOpenbsd   = "openbsd"
+	OSSolaris   = "solaris"
+)
+
+// OSUnix is the list of unix-like operating systems recognized by Go.
+// See http://golang.org/src/path/filepath/path_unix.go.
+var OSUnix = []string{
+	OSDarwin,
+	OSDragonfly,
+	OSFreebsd,
+	OSLinux,
+	OSNacl,
+	OSNetbsd,
+	OSOpenbsd,
+	OSSolaris,
+}
+
+// OSIsUnix determines whether or not the given OS name is one of the
+// unix-like operating systems recognized by Go.
+func OSIsUnix(os string) bool {
+	for _, goos := range OSUnix {
+		if os == goos {
+			return true
+		}
+	}
+	return false
+}
diff --git a/automation/vendor/github.com/juju/utils/password.go b/automation/vendor/github.com/juju/utils/password.go
new file mode 100644
index 0000000..914e8e4
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/password.go
@@ -0,0 +1,92 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"crypto/sha512"
+	"encoding/base64"
+	"fmt"
+	"io"
+
+	"golang.org/x/crypto/pbkdf2"
+)
+
+// CompatSalt is because Juju 1.16 and older used a hard-coded salt to compute
+// the password hash for all users and agents
+var CompatSalt = string([]byte{0x75, 0x82, 0x81, 0xca})
+
+const randomPasswordBytes = 18
+
+// MinAgentPasswordLength describes how long agent passwords should be. We
+// require this length because we assume enough entropy in the Agent password
+// that it is safe to not do extra rounds of iterated hashing.
+var MinAgentPasswordLength = base64.StdEncoding.EncodedLen(randomPasswordBytes)
+
+// RandomBytes returns n random bytes.
+func RandomBytes(n int) ([]byte, error) {
+	buf := make([]byte, n)
+	_, err := io.ReadFull(rand.Reader, buf)
+	if err != nil {
+		return nil, fmt.Errorf("cannot read random bytes: %v", err)
+	}
+	return buf, nil
+}
+
+// RandomPassword generates a random base64-encoded password.
+func RandomPassword() (string, error) {
+	b, err := RandomBytes(randomPasswordBytes)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// RandomSalt generates a random base64 data suitable for using as a password
+// salt The pbkdf2 guideline is to use 8 bytes of salt, so we do 12 raw bytes
+// into 16 base64 bytes. (The alternative is 6 raw into 8 base64).
+func RandomSalt() (string, error) {
+	b, err := RandomBytes(12)
+	if err != nil {
+		return "", err
+	}
+	return base64.StdEncoding.EncodeToString(b), nil
+}
+
+// FastInsecureHash specifies whether a fast, insecure version of the hash
+// algorithm will be used.  Changing this will cause PasswordHash to
+// produce incompatible passwords.  It should only be changed for
+// testing purposes - to make tests run faster.
+var FastInsecureHash = false
+
+// UserPasswordHash returns base64-encoded one-way hash password that is
+// computationally hard to crack by iterating through possible passwords.
+func UserPasswordHash(password string, salt string) string {
+	if salt == "" {
+		panic("salt is not allowed to be empty")
+	}
+	iter := 8192
+	if FastInsecureHash {
+		iter = 1
+	}
+	// Generate 18 byte passwords because we know that MongoDB
+	// uses the MD5 sum of the password anyway, so there's
+	// no point in using more bytes. (18 so we don't get base 64
+	// padding characters).
+	h := pbkdf2.Key([]byte(password), []byte(salt), iter, 18, sha512.New)
+	return base64.StdEncoding.EncodeToString(h)
+}
+
+// AgentPasswordHash returns base64-encoded one-way hash of password. This is
+// not suitable for User passwords because those will have limited entropy (see
+// UserPasswordHash). However, since we generate long random passwords for
+// agents, we can trust that there is sufficient entropy to prevent brute force
+// search. And using a faster hash allows us to restart the state machines and
+// have 1000s of agents log in in a reasonable amount of time.
+func AgentPasswordHash(password string) string {
+	sum := sha512.New()
+	sum.Write([]byte(password))
+	h := sum.Sum(nil)
+	return base64.StdEncoding.EncodeToString(h[:18])
+}
diff --git a/automation/vendor/github.com/juju/utils/randomstring.go b/automation/vendor/github.com/juju/utils/randomstring.go
new file mode 100644
index 0000000..662f514
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/randomstring.go
@@ -0,0 +1,42 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"sync"
+	"time"
+)
+
+// Can be used as a sane default argument for RandomString
+var (
+	LowerAlpha = []rune("abcdefghijklmnopqrstuvwxyz")
+	UpperAlpha = []rune("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
+	Digits     = []rune("0123456789")
+)
+
+var (
+	randomStringMu   sync.Mutex
+	randomStringRand *rand.Rand
+)
+
+func init() {
+	randomStringRand = rand.New(
+		rand.NewSource(time.Now().UnixNano()),
+	)
+}
+
+// RandomString will return a string of length n that will only
+// contain runes inside validRunes
+func RandomString(n int, validRunes []rune) string {
+	randomStringMu.Lock()
+	defer randomStringMu.Unlock()
+
+	runes := make([]rune, n)
+	for i := range runes {
+		runes[i] = validRunes[randomStringRand.Intn(len(validRunes))]
+	}
+
+	return string(runes)
+}
diff --git a/automation/vendor/github.com/juju/utils/relativeurl.go b/automation/vendor/github.com/juju/utils/relativeurl.go
new file mode 100644
index 0000000..b70f1d5
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/relativeurl.go
@@ -0,0 +1,62 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"strings"
+
+	"github.com/juju/errors"
+)
+
+// RelativeURLPath returns a relative URL path that is lexically
+// equivalent to targpath when interpreted by url.URL.ResolveReference.
+// On success, the returned path will always be non-empty and relative
+// to basePath, even if basePath and targPath share no elements.
+//
+// It is assumed that both basePath and targPath are normalized
+// (have no . or .. elements).
+//
+// An error is returned if basePath or targPath are not absolute paths.
+func RelativeURLPath(basePath, targPath string) (string, error) {
+	if !strings.HasPrefix(basePath, "/") {
+		return "", errors.New("non-absolute base URL")
+	}
+	if !strings.HasPrefix(targPath, "/") {
+		return "", errors.New("non-absolute target URL")
+	}
+	baseParts := strings.Split(basePath, "/")
+	targParts := strings.Split(targPath, "/")
+
+	// For the purposes of dotdot, the last element of
+	// the paths are irrelevant. We save the last part
+	// of the target path for later.
+	lastElem := targParts[len(targParts)-1]
+	baseParts = baseParts[0 : len(baseParts)-1]
+	targParts = targParts[0 : len(targParts)-1]
+
+	// Find the common prefix between the two paths:
+	var i int
+	for ; i < len(baseParts); i++ {
+		if i >= len(targParts) || baseParts[i] != targParts[i] {
+			break
+		}
+	}
+	dotdotCount := len(baseParts) - i
+	targOnly := targParts[i:]
+	result := make([]string, 0, dotdotCount+len(targOnly)+1)
+	for i := 0; i < dotdotCount; i++ {
+		result = append(result, "..")
+	}
+	result = append(result, targOnly...)
+	result = append(result, lastElem)
+	final := strings.Join(result, "/")
+	if final == "" {
+		// If the final result is empty, the last element must
+		// have been empty, so the target was slash terminated
+		// and there were no previous elements, so "."
+		// is appropriate.
+		final = "."
+	}
+	return final, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/set/ints.go b/automation/vendor/github.com/juju/utils/set/ints.go
new file mode 100644
index 0000000..02009ea
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/ints.go
@@ -0,0 +1,112 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Ints represents the classic "set" data structure, and contains ints.
+type Ints map[int]bool
+
+// NewInts creates and initializes an Ints and populates it with
+// initial values as specified in the parameters.
+func NewInts(initial ...int) Ints {
+	result := make(Ints)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (is Ints) Size() int {
+	return len(is)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (is Ints) IsEmpty() bool {
+	return len(is) == 0
+}
+
+// Add puts a value into the set.
+func (is Ints) Add(value int) {
+	if is == nil {
+		panic("uninitalised set")
+	}
+	is[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (is Ints) Remove(value int) {
+	delete(is, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (is Ints) Contains(value int) bool {
+	_, exists := is[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (is Ints) Values() []int {
+	result := make([]int, len(is))
+	i := 0
+	for key := range is {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (is Ints) SortedValues() []int {
+	values := is.Values()
+	sort.Ints(values)
+	return values
+}
+
+// Union returns a new Ints representing a union of the elments in the
+// method target and the parameter.
+func (is Ints) Union(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Ints representing a intersection of the elments in the
+// method target and the parameter.
+func (is Ints) Intersection(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Ints representing all the values in the
+// target that are not in the parameter.
+func (is Ints) Difference(other Ints) Ints {
+	result := make(Ints)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range is {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/set/strings.go b/automation/vendor/github.com/juju/utils/set/strings.go
new file mode 100644
index 0000000..b89f932
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/strings.go
@@ -0,0 +1,112 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+)
+
+// Strings represents the classic "set" data structure, and contains strings.
+type Strings map[string]bool
+
+// NewStrings creates and initializes a Strings and populates it with
+// initial values as specified in the parameters.
+func NewStrings(initial ...string) Strings {
+	result := make(Strings)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// Size returns the number of elements in the set.
+func (s Strings) Size() int {
+	return len(s)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (s Strings) IsEmpty() bool {
+	return len(s) == 0
+}
+
+// Add puts a value into the set.
+func (s Strings) Add(value string) {
+	if s == nil {
+		panic("uninitalised set")
+	}
+	s[value] = true
+}
+
+// Remove takes a value out of the set. If value wasn't in the set to start
+// with, this method silently succeeds.
+func (s Strings) Remove(value string) {
+	delete(s, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (s Strings) Contains(value string) bool {
+	_, exists := s[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (s Strings) Values() []string {
+	result := make([]string, len(s))
+	i := 0
+	for key := range s {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (s Strings) SortedValues() []string {
+	values := s.Values()
+	sort.Strings(values)
+	return values
+}
+
+// Union returns a new Strings representing a union of the elments in the
+// method target and the parameter.
+func (s Strings) Union(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Strings representing a intersection of the elments in the
+// method target and the parameter.
+func (s Strings) Intersection(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Strings representing all the values in the
+// target that are not in the parameter.
+func (s Strings) Difference(other Strings) Strings {
+	result := make(Strings)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range s {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/set/tags.go b/automation/vendor/github.com/juju/utils/set/tags.go
new file mode 100644
index 0000000..0fb6f87
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/set/tags.go
@@ -0,0 +1,150 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package set
+
+import (
+	"sort"
+
+	"github.com/juju/errors"
+	"gopkg.in/juju/names.v2"
+)
+
+// Tags represents the Set data structure, it implements tagSet
+// and contains names.Tags.
+type Tags map[names.Tag]bool
+
+// NewTags creates and initializes a Tags and populates it with
+// inital values as specified in the parameters.
+func NewTags(initial ...names.Tag) Tags {
+	result := make(Tags)
+	for _, value := range initial {
+		result.Add(value)
+	}
+	return result
+}
+
+// NewTagsFromStrings creates and initializes a Tags and populates it
+// by using names.ParseTag on the initial values specified in the parameters.
+func NewTagsFromStrings(initial ...string) (Tags, error) {
+	result := make(Tags)
+	for _, value := range initial {
+		tag, err := names.ParseTag(value)
+		if err != nil {
+			return result, errors.Trace(err)
+		}
+		result.Add(tag)
+	}
+	return result, nil
+}
+
+// Size returns the number of elements in the set.
+func (t Tags) Size() int {
+	return len(t)
+}
+
+// IsEmpty is true for empty or uninitialized sets.
+func (t Tags) IsEmpty() bool {
+	return len(t) == 0
+}
+
+// Add puts a value into the set.
+func (t Tags) Add(value names.Tag) {
+	if t == nil {
+		panic("uninitalised set")
+	}
+	t[value] = true
+}
+
+// Remove takes a value out of the set.  If value wasn't in the set to start
+// with, this method silently succeeds.
+func (t Tags) Remove(value names.Tag) {
+	delete(t, value)
+}
+
+// Contains returns true if the value is in the set, and false otherwise.
+func (t Tags) Contains(value names.Tag) bool {
+	_, exists := t[value]
+	return exists
+}
+
+// Values returns an unordered slice containing all the values in the set.
+func (t Tags) Values() []names.Tag {
+	result := make([]names.Tag, len(t))
+	i := 0
+	for key := range t {
+		result[i] = key
+		i++
+	}
+	return result
+}
+
+// stringValues returns a list of strings that represent a names.Tag
+// Used internally by the SortedValues method.
+func (t Tags) stringValues() []string {
+	result := make([]string, t.Size())
+	i := 0
+	for key := range t {
+		result[i] = key.String()
+		i++
+	}
+	return result
+}
+
+// SortedValues returns an ordered slice containing all the values in the set.
+func (t Tags) SortedValues() []names.Tag {
+	values := t.stringValues()
+	sort.Strings(values)
+
+	result := make([]names.Tag, len(values))
+	for i, value := range values {
+		// We already know only good strings can live in the Tags set
+		// so we can safely ignore the error here.
+		tag, _ := names.ParseTag(value)
+		result[i] = tag
+	}
+	return result
+}
+
+// Union returns a new Tags representing a union of the elments in the
+// method target and the parameter.
+func (t Tags) Union(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		result[value] = true
+	}
+	for value := range other {
+		result[value] = true
+	}
+	return result
+}
+
+// Intersection returns a new Tags representing a intersection of the elments in the
+// method target and the parameter.
+func (t Tags) Intersection(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
+
+// Difference returns a new Tags representing all the values in the
+// target that are not in the parameter.
+func (t Tags) Difference(other Tags) Tags {
+	result := make(Tags)
+	// Use the internal map rather than going through the friendlier functions
+	// to avoid extra allocation of slices.
+	for value := range t {
+		if !other.Contains(value) {
+			result[value] = true
+		}
+	}
+	return result
+}
diff --git a/automation/vendor/github.com/juju/utils/size.go b/automation/vendor/github.com/juju/utils/size.go
new file mode 100644
index 0000000..8f6f88d
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/size.go
@@ -0,0 +1,78 @@
+// Copyright 2014 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math"
+	"strconv"
+	"strings"
+	"unicode"
+
+	"github.com/juju/errors"
+)
+
+// ParseSize parses the string as a size, in mebibytes.
+//
+// The string must be a is a non-negative number with
+// an optional multiplier suffix (M, G, T, P, E, Z, or Y).
+// If the suffix is not specified, "M" is implied.
+func ParseSize(str string) (MB uint64, err error) {
+	// Find the first non-digit/period:
+	i := strings.IndexFunc(str, func(r rune) bool {
+		return r != '.' && !unicode.IsDigit(r)
+	})
+	var multiplier float64 = 1
+	if i > 0 {
+		suffix := str[i:]
+		multiplier = 0
+		for j := 0; j < len(sizeSuffixes); j++ {
+			base := string(sizeSuffixes[j])
+			// M, MB, or MiB are all valid.
+			switch suffix {
+			case base, base + "B", base + "iB":
+				multiplier = float64(sizeSuffixMultiplier(j))
+				break
+			}
+		}
+		if multiplier == 0 {
+			return 0, errors.Errorf("invalid multiplier suffix %q, expected one of %s", suffix, []byte(sizeSuffixes))
+		}
+		str = str[:i]
+	}
+
+	val, err := strconv.ParseFloat(str, 64)
+	if err != nil || val < 0 {
+		return 0, errors.Errorf("expected a non-negative number, got %q", str)
+	}
+	val *= multiplier
+	return uint64(math.Ceil(val)), nil
+}
+
+var sizeSuffixes = "MGTPEZY"
+
+func sizeSuffixMultiplier(i int) int {
+	return 1 << uint(i*10)
+}
+
+// SizeTracker tracks the number of bytes passing through
+// its Write method (which is otherwise a no-op).
+//
+// Use SizeTracker with io.MultiWriter() to track number of bytes
+// written. Use with io.TeeReader() to track number of bytes read.
+type SizeTracker struct {
+	// size is the number of bytes written so far.
+	size int64
+}
+
+// Size returns the number of bytes written so far.
+func (st SizeTracker) Size() int64 {
+	return st.size
+}
+
+// Write implements io.Writer.
+func (st *SizeTracker) Write(data []byte) (n int, err error) {
+	n = len(data)
+	st.size += int64(n)
+	return n, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go b/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go
new file mode 100644
index 0000000..7a0edd4
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/systemerrmessages_unix.go
@@ -0,0 +1,16 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// +build !windows
+
+package utils
+
+// The following are strings/regex-es which match common Unix error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `user: unknown user [a-z0-9_-]*`
+	NoSuchFileErrRegexp = `no such file or directory`
+	MkdirFailErrRegexp  = `.* not a directory`
+)
diff --git a/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go b/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go
new file mode 100644
index 0000000..b24d453
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/systemerrmessages_windows.go
@@ -0,0 +1,14 @@
+// Copyright 2014 Canonical Ltd.
+// Copyright 2014 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+// The following are strings/regex-es which match common Windows error messages
+// that may be returned in case of failed calls to the system.
+// Any extra leading/trailing regex-es are left to be added by the developer.
+const (
+	NoSuchUserErrRegexp = `No mapping between account names and security IDs was done\.`
+	NoSuchFileErrRegexp = `The system cannot find the (file|path) specified\.`
+	MkdirFailErrRegexp  = `mkdir .*` + NoSuchFileErrRegexp
+)
diff --git a/automation/vendor/github.com/juju/utils/timeit.go b/automation/vendor/github.com/juju/utils/timeit.go
new file mode 100644
index 0000000..172b593
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/timeit.go
@@ -0,0 +1,57 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"fmt"
+	"os"
+	"time"
+)
+
+type timer struct {
+	action     string
+	start      time.Time
+	depth      int
+	duration   time.Duration
+	subActions []*timer
+}
+
+func (t *timer) String() string {
+	this := fmt.Sprintf("%.3fs %*s%s\n", t.duration.Seconds(), t.depth, "", t.action)
+	for _, sub := range t.subActions {
+		this += sub.String()
+	}
+	return this
+}
+
+var stack []*timer
+
+// Start a timer, used for tracking time spent.
+// Generally used with either defer, as in:
+//  defer utils.Timeit("my func")()
+// Which will track how much time is spent in your function. Or
+// if you want to track the time spent in a function you are calling
+// then you would use:
+//  toc := utils.Timeit("anotherFunc()")
+//  anotherFunc()
+//  toc()
+// This tracks nested calls by indenting the output, and will print out the
+// full stack of timing when we reach the top of the stack.
+func Timeit(action string) func() {
+	cur := &timer{action: action, start: time.Now(), depth: len(stack)}
+	if len(stack) != 0 {
+		tip := stack[len(stack)-1]
+		tip.subActions = append(tip.subActions, cur)
+	}
+	stack = append(stack, cur)
+	return func() {
+		cur.duration = time.Since(cur.start)
+		if len(stack) == 0 || cur == stack[0] {
+			fmt.Fprint(os.Stderr, cur)
+			stack = nil
+		} else {
+			stack = stack[0 : len(stack)-1]
+		}
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/timer.go b/automation/vendor/github.com/juju/utils/timer.go
new file mode 100644
index 0000000..6b32f09
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/timer.go
@@ -0,0 +1,124 @@
+// Copyright 2015 Canonical Ltd.
+// Copyright 2015 Cloudbase Solutions SRL
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"math/rand"
+	"time"
+
+	"github.com/juju/utils/clock"
+)
+
+// Countdown implements a timer that will call a provided function.
+// after a internally stored duration. The steps as well as min and max
+// durations are declared upon initialization and depend on
+// the particular implementation.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type Countdown interface {
+	// Reset stops the timer and resets its duration to the minimum one.
+	// Start must be called to start the timer again.
+	Reset()
+
+	// Start starts the internal timer.
+	// At the end of the timer, if Reset hasn't been called in the mean time
+	// Func will be called and the duration is increased for the next call.
+	Start()
+}
+
+// NewBackoffTimer creates and initializes a new BackoffTimer
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+func NewBackoffTimer(config BackoffTimerConfig) *BackoffTimer {
+	return &BackoffTimer{
+		config:          config,
+		currentDuration: config.Min,
+	}
+}
+
+// BackoffTimer implements Countdown.
+// A backoff timer starts at min and gets multiplied by factor
+// until it reaches max. Jitter determines whether a small
+// randomization is added to the duration.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimer struct {
+	config BackoffTimerConfig
+
+	timer           clock.Timer
+	currentDuration time.Duration
+}
+
+// BackoffTimerConfig is a helper struct for backoff timer
+// that encapsulates config information.
+//
+// TODO(katco): 2016-08-09: This type is deprecated: lp:1611427
+type BackoffTimerConfig struct {
+	// The minimum duration after which Func is called.
+	Min time.Duration
+
+	// The maximum duration after which Func is called.
+	Max time.Duration
+
+	// Determines whether a small randomization is applied to
+	// the duration.
+	Jitter bool
+
+	// The factor by which you want the duration to increase
+	// every time.
+	Factor int64
+
+	// Func is the function that will be called when the countdown reaches 0.
+	Func func()
+
+	// Clock provides the AfterFunc function used to call func.
+	// It is exposed here so it's easier to mock it in tests.
+	Clock clock.Clock
+}
+
+// Start implements the Timer interface.
+// Any existing timer execution is stopped before
+// a new one is created.
+func (t *BackoffTimer) Start() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	t.timer = t.config.Clock.AfterFunc(t.currentDuration, t.config.Func)
+
+	// Since it's a backoff timer we will increase
+	// the duration after each signal.
+	t.increaseDuration()
+}
+
+// Reset implements the Timer interface.
+func (t *BackoffTimer) Reset() {
+	if t.timer != nil {
+		t.timer.Stop()
+	}
+	if t.currentDuration > t.config.Min {
+		t.currentDuration = t.config.Min
+	}
+}
+
+// increaseDuration will increase the duration based on
+// the current value and the factor. If jitter is true
+// it will add a 0.3% jitter to the final value.
+func (t *BackoffTimer) increaseDuration() {
+	current := int64(t.currentDuration)
+	nextDuration := time.Duration(current * t.config.Factor)
+	if t.config.Jitter {
+		// Get a factor in [-1; 1].
+		randFactor := (rand.Float64() * 2) - 1
+		jitter := float64(nextDuration) * randFactor * 0.03
+		nextDuration = nextDuration + time.Duration(jitter)
+	}
+	if nextDuration > t.config.Max {
+		nextDuration = t.config.Max
+	}
+	t.currentDuration = nextDuration
+}
diff --git a/automation/vendor/github.com/juju/utils/tls.go b/automation/vendor/github.com/juju/utils/tls.go
new file mode 100644
index 0000000..b805af8
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/tls.go
@@ -0,0 +1,72 @@
+// Copyright 2016 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/tls"
+	"net/http"
+	"time"
+)
+
+// NewHttpTLSTransport returns a new http.Transport constructed with the TLS config
+// and the necessary parameters for Juju.
+func NewHttpTLSTransport(tlsConfig *tls.Config) *http.Transport {
+	// See https://code.google.com/p/go/issues/detail?id=4677
+	// We need to force the connection to close each time so that we don't
+	// hit the above Go bug.
+	transport := &http.Transport{
+		Proxy:               http.ProxyFromEnvironment,
+		TLSClientConfig:     tlsConfig,
+		DisableKeepAlives:   true,
+		TLSHandshakeTimeout: 10 * time.Second,
+	}
+	installHTTPDialShim(transport)
+	registerFileProtocol(transport)
+	return transport
+}
+
+// knownGoodCipherSuites contains the list of secure cipher suites to use
+// with tls.Config. This list matches those that Go 1.6 implements from
+// https://wiki.mozilla.org/Security/Server_Side_TLS#Recommended_configurations.
+//
+// https://tools.ietf.org/html/rfc7525#section-4.2 excludes RSA exchange completely
+// so we could be more strict if all our clients will support
+// TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256/384. Unfortunately Go's crypto library
+// is limited and doesn't support DHE-RSA-AES256-GCM-SHA384 and
+// DHE-RSA-AES256-SHA256, which are part of the recommended set.
+//
+// Unfortunately we can't drop the RSA algorithms because our servers aren't
+// generating ECDHE keys.
+var knownGoodCipherSuites = []uint16{
+	// These are technically useless for Juju, since we use an RSA certificate,
+	// but they also don't hurt anything, and supporting an ECDSA certificate
+	// could be useful in the future.
+	tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
+
+	tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+
+	// Windows doesn't support GCM currently, so we need these for RSA support.
+	tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
+	tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+
+	// We need this so that we have at least one suite in common
+	// with the default gnutls installed for precise and trusty.
+	tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+}
+
+// SecureTLSConfig returns a tls.Config that conforms to Juju's security
+// standards, so as to avoid known security vulnerabilities in certain
+// configurations.
+//
+// Currently it excludes RC4 implementations from the available ciphersuites,
+// requires ciphersuites that provide forward secrecy, and sets the minimum TLS
+// version to 1.2.
+func SecureTLSConfig() *tls.Config {
+	return &tls.Config{
+		CipherSuites: knownGoodCipherSuites,
+		MinVersion:   tls.VersionTLS12,
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/trivial.go b/automation/vendor/github.com/juju/utils/trivial.go
new file mode 100644
index 0000000..642e213
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/trivial.go
@@ -0,0 +1,147 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"bytes"
+	"compress/gzip"
+	"crypto/sha256"
+	"encoding/hex"
+	"io"
+	"io/ioutil"
+	"os"
+	"strings"
+	"unicode"
+)
+
+// TODO(ericsnow) Move the quoting helpers into the shell package?
+
+// ShQuote quotes s so that when read by bash, no metacharacters
+// within s will be interpreted as such.
+func ShQuote(s string) string {
+	// single-quote becomes single-quote, double-quote, single-quote, double-quote, single-quote
+	return `'` + strings.Replace(s, `'`, `'"'"'`, -1) + `'`
+}
+
+// WinPSQuote quotes s so that when read by powershell, no metacharacters
+// within s will be interpreted as such.
+func WinPSQuote(s string) string {
+	// See http://ss64.com/ps/syntax-esc.html#quotes.
+	// Double quotes inside single quotes are fine, double single quotes inside
+	// single quotes, not so much so. Having double quoted strings inside single
+	// quoted strings, ensure no expansion happens.
+	return `'` + strings.Replace(s, `'`, `"`, -1) + `'`
+}
+
+// WinCmdQuote quotes s so that when read by cmd.exe, no metacharacters
+// within s will be interpreted as such.
+func WinCmdQuote(s string) string {
+	// See http://blogs.msdn.com/b/twistylittlepassagesallalike/archive/2011/04/23/everyone-quotes-arguments-the-wrong-way.aspx.
+	quoted := winCmdQuote(s)
+	return winCmdEscapeMeta(quoted)
+}
+
+func winCmdQuote(s string) string {
+	var escaped string
+	for _, c := range s {
+		switch c {
+		case '\\', '"':
+			escaped += `\`
+		}
+		escaped += string(c)
+	}
+	return `"` + escaped + `"`
+}
+
+func winCmdEscapeMeta(str string) string {
+	const meta = `()%!^"<>&|`
+	var newStr string
+	for _, c := range str {
+		if strings.Contains(meta, string(c)) {
+			newStr += "^"
+		}
+		newStr += string(c)
+	}
+	return newStr
+}
+
+// CommandString flattens a sequence of command arguments into a
+// string suitable for executing in a shell, escaping slashes,
+// variables and quotes as necessary; each argument is double-quoted
+// if and only if necessary.
+func CommandString(args ...string) string {
+	var buf bytes.Buffer
+	for i, arg := range args {
+		needsQuotes := false
+		var argBuf bytes.Buffer
+		for _, r := range arg {
+			if unicode.IsSpace(r) {
+				needsQuotes = true
+			} else if r == '"' || r == '$' || r == '\\' {
+				needsQuotes = true
+				argBuf.WriteByte('\\')
+			}
+			argBuf.WriteRune(r)
+		}
+		if i > 0 {
+			buf.WriteByte(' ')
+		}
+		if needsQuotes {
+			buf.WriteByte('"')
+			argBuf.WriteTo(&buf)
+			buf.WriteByte('"')
+		} else {
+			argBuf.WriteTo(&buf)
+		}
+	}
+	return buf.String()
+}
+
+// Gzip compresses the given data.
+func Gzip(data []byte) []byte {
+	var buf bytes.Buffer
+	w := gzip.NewWriter(&buf)
+	if _, err := w.Write(data); err != nil {
+		// Compression should never fail unless it fails
+		// to write to the underlying writer, which is a bytes.Buffer
+		// that never fails.
+		panic(err)
+	}
+	if err := w.Close(); err != nil {
+		panic(err)
+	}
+	return buf.Bytes()
+}
+
+// Gunzip uncompresses the given data.
+func Gunzip(data []byte) ([]byte, error) {
+	r, err := gzip.NewReader(bytes.NewReader(data))
+	if err != nil {
+		return nil, err
+	}
+	return ioutil.ReadAll(r)
+}
+
+// ReadSHA256 returns the SHA256 hash of the contents read from source
+// (hex encoded) and the size of the source in bytes.
+func ReadSHA256(source io.Reader) (string, int64, error) {
+	hash := sha256.New()
+	size, err := io.Copy(hash, source)
+	if err != nil {
+		return "", 0, err
+	}
+	digest := hex.EncodeToString(hash.Sum(nil))
+	return digest, size, nil
+}
+
+// ReadFileSHA256 is like ReadSHA256 but reads the contents of the
+// given file.
+func ReadFileSHA256(filename string) (string, int64, error) {
+	f, err := os.Open(filename)
+	if err != nil {
+		return "", 0, err
+	}
+	defer f.Close()
+	return ReadSHA256(f)
+}
diff --git a/automation/vendor/github.com/juju/utils/username.go b/automation/vendor/github.com/juju/utils/username.go
new file mode 100644
index 0000000..5107e3b
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/username.go
@@ -0,0 +1,77 @@
+// Copyright 2015 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"os"
+	"os/user"
+
+	"github.com/juju/errors"
+)
+
+// ResolveSudo returns the original username if sudo was used. The
+// original username is extracted from the OS environment.
+func ResolveSudo(username string) string {
+	return resolveSudo(username, os.Getenv)
+}
+
+func resolveSudo(username string, getenvFunc func(string) string) string {
+	if username != "root" {
+		return username
+	}
+	// sudo was probably called, get the original user.
+	if username := getenvFunc("SUDO_USER"); username != "" {
+		return username
+	}
+	return username
+}
+
+// EnvUsername returns the username from the OS environment.
+func EnvUsername() (string, error) {
+	return os.Getenv("USER"), nil
+}
+
+// OSUsername returns the username of the current OS user (based on UID).
+func OSUsername() (string, error) {
+	u, err := user.Current()
+	if err != nil {
+		return "", errors.Trace(err)
+	}
+	return u.Username, nil
+}
+
+// ResolveUsername returns the username determined by the provided
+// functions. The functions are tried in the same order in which they
+// were passed in. An error returned from any of them is immediately
+// returned. If an empty string is returned then that signals that the
+// function did not find the username and the next function is tried.
+// Once a username is found, the provided resolveSudo func (if any) is
+// called with that username and the result is returned. If no username
+// is found then errors.NotFound is returned.
+func ResolveUsername(resolveSudo func(string) string, usernameFuncs ...func() (string, error)) (string, error) {
+	for _, usernameFunc := range usernameFuncs {
+		username, err := usernameFunc()
+		if err != nil {
+			return "", errors.Trace(err)
+		}
+		if username != "" {
+			if resolveSudo != nil {
+				if original := resolveSudo(username); original != "" {
+					username = original
+				}
+			}
+			return username, nil
+		}
+	}
+	return "", errors.NotFoundf("username")
+}
+
+// LocalUsername determines the current username on the local host.
+func LocalUsername() (string, error) {
+	username, err := ResolveUsername(ResolveSudo, EnvUsername, OSUsername)
+	if err != nil {
+		return "", errors.Annotatef(err, "cannot get current user from the environment: %v", os.Environ())
+	}
+	return username, nil
+}
diff --git a/automation/vendor/github.com/juju/utils/uuid.go b/automation/vendor/github.com/juju/utils/uuid.go
new file mode 100644
index 0000000..2404efc
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/uuid.go
@@ -0,0 +1,90 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"crypto/rand"
+	"encoding/hex"
+	"fmt"
+	"io"
+	"regexp"
+	"strings"
+)
+
+// UUID represent a universal identifier with 16 octets.
+type UUID [16]byte
+
+// regex for validating that the UUID matches RFC 4122.
+// This package generates version 4 UUIDs but
+// accepts any UUID version.
+// http://www.ietf.org/rfc/rfc4122.txt
+var (
+	block1 = "[0-9a-f]{8}"
+	block2 = "[0-9a-f]{4}"
+	block3 = "[0-9a-f]{4}"
+	block4 = "[0-9a-f]{4}"
+	block5 = "[0-9a-f]{12}"
+
+	UUIDSnippet = block1 + "-" + block2 + "-" + block3 + "-" + block4 + "-" + block5
+	validUUID   = regexp.MustCompile("^" + UUIDSnippet + "$")
+)
+
+func UUIDFromString(s string) (UUID, error) {
+	if !IsValidUUIDString(s) {
+		return UUID{}, fmt.Errorf("invalid UUID: %q", s)
+	}
+	s = strings.Replace(s, "-", "", 4)
+	raw, err := hex.DecodeString(s)
+	if err != nil {
+		return UUID{}, err
+	}
+	var uuid UUID
+	copy(uuid[:], raw)
+	return uuid, nil
+}
+
+// IsValidUUIDString returns true, if the given string matches a valid UUID (version 4, variant 2).
+func IsValidUUIDString(s string) bool {
+	return validUUID.MatchString(s)
+}
+
+// MustNewUUID returns a new uuid, if an error occurs it panics.
+func MustNewUUID() UUID {
+	uuid, err := NewUUID()
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// NewUUID generates a new version 4 UUID relying only on random numbers.
+func NewUUID() (UUID, error) {
+	uuid := UUID{}
+	if _, err := io.ReadFull(rand.Reader, []byte(uuid[0:16])); err != nil {
+		return UUID{}, err
+	}
+	// Set version (4) and variant (2) according to RfC 4122.
+	var version byte = 4 << 4
+	var variant byte = 8 << 4
+	uuid[6] = version | (uuid[6] & 15)
+	uuid[8] = variant | (uuid[8] & 15)
+	return uuid, nil
+}
+
+// Copy returns a copy of the UUID.
+func (uuid UUID) Copy() UUID {
+	uuidCopy := uuid
+	return uuidCopy
+}
+
+// Raw returns a copy of the UUID bytes.
+func (uuid UUID) Raw() [16]byte {
+	return [16]byte(uuid)
+}
+
+// String returns a hexadecimal string representation with
+// standardized separators.
+func (uuid UUID) String() string {
+	return fmt.Sprintf("%x-%x-%x-%x-%x", uuid[0:4], uuid[4:6], uuid[6:8], uuid[8:10], uuid[10:16])
+}
diff --git a/automation/vendor/github.com/juju/utils/yaml.go b/automation/vendor/github.com/juju/utils/yaml.go
new file mode 100644
index 0000000..2d443a0
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/yaml.go
@@ -0,0 +1,107 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+package utils
+
+import (
+	"io/ioutil"
+	"os"
+	"path/filepath"
+
+	"github.com/juju/errors"
+
+	"gopkg.in/yaml.v2"
+)
+
+// WriteYaml marshals obj as yaml to a temporary file in the same directory
+// as path, than atomically replaces path with the temporary file.
+func WriteYaml(path string, obj interface{}) error {
+	data, err := yaml.Marshal(obj)
+	if err != nil {
+		return errors.Trace(err)
+	}
+	dir := filepath.Dir(path)
+	f, err := ioutil.TempFile(dir, "juju")
+	if err != nil {
+		return errors.Trace(err)
+	}
+	tmp := f.Name()
+	if _, err := f.Write(data); err != nil {
+		f.Close()      // don't leak file handle
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+	// Explicitly close the file before moving it. This is needed on Windows
+	// where the OS will not allow us to move a file that still has an open
+	// file handle. Must check the error on close because filesystems can delay
+	// reporting errors until the file is closed.
+	if err := f.Close(); err != nil {
+		os.Remove(tmp) // don't leak half written files on disk
+		return errors.Trace(err)
+	}
+
+	// ioutils.TempFile creates files 0600, but this function has a contract
+	// that files will be world readable, 0644 after replacement.
+	if err := os.Chmod(tmp, 0644); err != nil {
+		os.Remove(tmp) // remove file with incorrect permissions.
+		return errors.Trace(err)
+	}
+
+	return ReplaceFile(tmp, path)
+}
+
+// ReadYaml unmarshals the yaml contained in the file at path into obj. See
+// goyaml.Unmarshal. If path is not found, the error returned will be compatible
+// with os.IsNotExist.
+func ReadYaml(path string, obj interface{}) error {
+	data, err := ioutil.ReadFile(path)
+	if err != nil {
+		return err // cannot wrap here because callers check for NotFound.
+	}
+	return yaml.Unmarshal(data, obj)
+}
+
+// ConformYAML ensures all keys of any nested maps are strings.  This is
+// necessary because YAML unmarshals map[interface{}]interface{} in nested
+// maps, which cannot be serialized by json or bson. Also, handle
+// []interface{}. cf. gopkg.in/juju/charm.v4/actions.go cleanse
+func ConformYAML(input interface{}) (interface{}, error) {
+	switch typedInput := input.(type) {
+
+	case map[string]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			newValue, err := ConformYAML(value)
+			if err != nil {
+				return nil, err
+			}
+			newMap[key] = newValue
+		}
+		return newMap, nil
+
+	case map[interface{}]interface{}:
+		newMap := make(map[string]interface{})
+		for key, value := range typedInput {
+			typedKey, ok := key.(string)
+			if !ok {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newMap[typedKey] = value
+		}
+		return ConformYAML(newMap)
+
+	case []interface{}:
+		newSlice := make([]interface{}, len(typedInput))
+		for i, sliceValue := range typedInput {
+			newSliceValue, err := ConformYAML(sliceValue)
+			if err != nil {
+				return nil, errors.New("map keyed with non-string value")
+			}
+			newSlice[i] = newSliceValue
+		}
+		return newSlice, nil
+
+	default:
+		return input, nil
+	}
+}
diff --git a/automation/vendor/github.com/juju/utils/zfile_windows.go b/automation/vendor/github.com/juju/utils/zfile_windows.go
new file mode 100644
index 0000000..b1a50f1
--- /dev/null
+++ b/automation/vendor/github.com/juju/utils/zfile_windows.go
@@ -0,0 +1,28 @@
+// Copyright 2013 Canonical Ltd.
+// Licensed under the LGPLv3, see LICENCE file for details.
+
+// mksyscall_windows.pl -l32 file_windows.go
+// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
+
+package utils
+
+import "unsafe"
+import "syscall"
+
+var (
+	modkernel32 = syscall.NewLazyDLL("kernel32.dll")
+
+	procMoveFileExW = modkernel32.NewProc("MoveFileExW")
+)
+
+func moveFileEx(lpExistingFileName *uint16, lpNewFileName *uint16, dwFlags uint32) (err error) {
+	r1, _, e1 := syscall.Syscall(procMoveFileExW.Addr(), 3, uintptr(unsafe.Pointer(lpExistingFileName)), uintptr(unsafe.Pointer(lpNewFileName)), uintptr(dwFlags))
+	if r1 == 0 {
+		if e1 != 0 {
+			err = error(e1)
+		} else {
+			err = syscall.EINVAL
+		}
+	}
+	return
+}
diff --git a/automation/vendor/github.com/juju/version/LICENSE b/automation/vendor/github.com/juju/version/LICENSE
new file mode 100644
index 0000000..5cec73f
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/LICENSE
@@ -0,0 +1,191 @@
+All files in this repository are licensed as follows. If you contribute
+to this repository, it is assumed that you license your contribution
+under the same license unless you state otherwise.
+
+All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
+
+This software is licensed under the LGPLv3, included below.
+
+As a special exception to the GNU Lesser General Public License version 3
+("LGPL3"), the copyright holders of this Library give you permission to
+convey to a third party a Combined Work that links statically or dynamically
+to this Library without providing any Minimal Corresponding Source or
+Minimal Application Code as set out in 4d or providing the installation
+information set out in section 4e, provided that you comply with the other
+provisions of LGPL3 and provided that you meet, for the Application the
+terms and conditions of the license(s) which apply to the Application.
+
+Except as stated in this special exception, the provisions of LGPL3 will
+continue to comply in full to this Library. If you modify this Library, you
+may apply this exception to your version of this Library, but you are not
+obliged to do so. If you do not wish to do so, delete this exception
+statement from your version. This exception does not (and cannot) modify any
+license terms which apply to the Application, with which you must still
+comply. 
+
+
+                   GNU LESSER GENERAL PUBLIC LICENSE
+                       Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+
+  This version of the GNU Lesser General Public License incorporates
+the terms and conditions of version 3 of the GNU General Public
+License, supplemented by the additional permissions listed below.
+
+  0. Additional Definitions.
+
+  As used herein, "this License" refers to version 3 of the GNU Lesser
+General Public License, and the "GNU GPL" refers to version 3 of the GNU
+General Public License.
+
+  "The Library" refers to a covered work governed by this License,
+other than an Application or a Combined Work as defined below.
+
+  An "Application" is any work that makes use of an interface provided
+by the Library, but which is not otherwise based on the Library.
+Defining a subclass of a class defined by the Library is deemed a mode
+of using an interface provided by the Library.
+
+  A "Combined Work" is a work produced by combining or linking an
+Application with the Library.  The particular version of the Library
+with which the Combined Work was made is also called the "Linked
+Version".
+
+  The "Minimal Corresponding Source" for a Combined Work means the
+Corresponding Source for the Combined Work, excluding any source code
+for portions of the Combined Work that, considered in isolation, are
+based on the Application, and not on the Linked Version.
+
+  The "Corresponding Application Code" for a Combined Work means the
+object code and/or source code for the Application, including any data
+and utility programs needed for reproducing the Combined Work from the
+Application, but excluding the System Libraries of the Combined Work.
+
+  1. Exception to Section 3 of the GNU GPL.
+
+  You may convey a covered work under sections 3 and 4 of this License
+without being bound by section 3 of the GNU GPL.
+
+  2. Conveying Modified Versions.
+
+  If you modify a copy of the Library, and, in your modifications, a
+facility refers to a function or data to be supplied by an Application
+that uses the facility (other than as an argument passed when the
+facility is invoked), then you may convey a copy of the modified
+version:
+
+   a) under this License, provided that you make a good faith effort to
+   ensure that, in the event an Application does not supply the
+   function or data, the facility still operates, and performs
+   whatever part of its purpose remains meaningful, or
+
+   b) under the GNU GPL, with none of the additional permissions of
+   this License applicable to that copy.
+
+  3. Object Code Incorporating Material from Library Header Files.
+
+  The object code form of an Application may incorporate material from
+a header file that is part of the Library.  You may convey such object
+code under terms of your choice, provided that, if the incorporated
+material is not limited to numerical parameters, data structure
+layouts and accessors, or small macros, inline functions and templates
+(ten or fewer lines in length), you do both of the following:
+
+   a) Give prominent notice with each copy of the object code that the
+   Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the object code with a copy of the GNU GPL and this license
+   document.
+
+  4. Combined Works.
+
+  You may convey a Combined Work under terms of your choice that,
+taken together, effectively do not restrict modification of the
+portions of the Library contained in the Combined Work and reverse
+engineering for debugging such modifications, if you also do each of
+the following:
+
+   a) Give prominent notice with each copy of the Combined Work that
+   the Library is used in it and that the Library and its use are
+   covered by this License.
+
+   b) Accompany the Combined Work with a copy of the GNU GPL and this license
+   document.
+
+   c) For a Combined Work that displays copyright notices during
+   execution, include the copyright notice for the Library among
+   these notices, as well as a reference directing the user to the
+   copies of the GNU GPL and this license document.
+
+   d) Do one of the following:
+
+       0) Convey the Minimal Corresponding Source under the terms of this
+       License, and the Corresponding Application Code in a form
+       suitable for, and under terms that permit, the user to
+       recombine or relink the Application with a modified version of
+       the Linked Version to produce a modified Combined Work, in the
+       manner specified by section 6 of the GNU GPL for conveying
+       Corresponding Source.
+
+       1) Use a suitable shared library mechanism for linking with the
+       Library.  A suitable mechanism is one that (a) uses at run time
+       a copy of the Library already present on the user's computer
+       system, and (b) will operate properly with a modified version
+       of the Library that is interface-compatible with the Linked
+       Version.
+
+   e) Provide Installation Information, but only if you would otherwise
+   be required to provide such information under section 6 of the
+   GNU GPL, and only to the extent that such information is
+   necessary to install and execute a modified version of the
+   Combined Work produced by recombining or relinking the
+   Application with a modified version of the Linked Version. (If
+   you use option 4d0, the Installation Information must accompany
+   the Minimal Corresponding Source and Corresponding Application
+   Code. If you use option 4d1, you must provide the Installation
+   Information in the manner specified by section 6 of the GNU GPL
+   for conveying Corresponding Source.)
+
+  5. Combined Libraries.
+
+  You may place library facilities that are a work based on the
+Library side by side in a single library together with other library
+facilities that are not Applications and are not covered by this
+License, and convey such a combined library under terms of your
+choice, if you do both of the following:
+
+   a) Accompany the combined library with a copy of the same work based
+   on the Library, uncombined with any other library facilities,
+   conveyed under the terms of this License.
+
+   b) Give prominent notice with the combined library that part of it
+   is a work based on the Library, and explaining where to find the
+   accompanying uncombined form of the same work.
+
+  6. Revised Versions of the GNU Lesser General Public License.
+
+  The Free Software Foundation may publish revised and/or new versions
+of the GNU Lesser General Public License from time to time. Such new
+versions will be similar in spirit to the present version, but may
+differ in detail to address new problems or concerns.
+
+  Each version is given a distinguishing version number. If the
+Library as you received it specifies that a certain numbered version
+of the GNU Lesser General Public License "or any later version"
+applies to it, you have the option of following the terms and
+conditions either of that published version or of any later version
+published by the Free Software Foundation. If the Library as you
+received it does not specify a version number of the GNU Lesser
+General Public License, you may choose any version of the GNU Lesser
+General Public License ever published by the Free Software Foundation.
+
+  If the Library as you received it specifies that a proxy can decide
+whether future versions of the GNU Lesser General Public License shall
+apply, that proxy's public statement of acceptance of any version is
+permanent authorization for you to choose that version for the
+Library.
diff --git a/automation/vendor/github.com/juju/version/README.md b/automation/vendor/github.com/juju/version/README.md
new file mode 100644
index 0000000..2d1b48b
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/README.md
@@ -0,0 +1,3 @@
+# Version [![GoDoc](https://godoc.org/github.com/juju/version?status.svg)](https://godoc.org/github.com/juju/version) 
+version is a go package for intelligent version comparisons.
+
diff --git a/automation/vendor/github.com/juju/version/version.go b/automation/vendor/github.com/juju/version/version.go
new file mode 100644
index 0000000..026e99f
--- /dev/null
+++ b/automation/vendor/github.com/juju/version/version.go
@@ -0,0 +1,297 @@
+// Copyright 2012, 2013 Canonical Ltd.
+// Licensed under the AGPLv3, see LICENCE file for details.
+
+// Package version implements version parsing.
+package version
+
+import (
+	"encoding/json"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+
+	"gopkg.in/mgo.v2/bson"
+)
+
+// Number represents a version number.
+type Number struct {
+	Major int
+	Minor int
+	Tag   string
+	Patch int
+	Build int
+}
+
+// Zero is occasionally convenient and readable.
+// Please don't change its value.
+var Zero = Number{}
+
+// Binary specifies a binary version of juju.v
+type Binary struct {
+	Number
+	Series string
+	Arch   string
+}
+
+// String returns the string representation of the binary version.
+func (b Binary) String() string {
+	return fmt.Sprintf("%v-%s-%s", b.Number, b.Series, b.Arch)
+}
+
+// GetBSON implements bson.Getter.
+func (b Binary) GetBSON() (interface{}, error) {
+	return b.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (b *Binary) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (b Binary) MarshalJSON() ([]byte, error) {
+	return json.Marshal(b.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (b *Binary) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := ParseBinary(s)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface.
+func (b Binary) MarshalYAML() (interface{}, error) {
+	return b.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface.
+func (b *Binary) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := ParseBinary(vstr)
+	if err != nil {
+		return err
+	}
+	*b = v
+	return nil
+}
+
+var (
+	binaryPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?-([^-]+)-([^-]+)$`)
+	numberPat = regexp.MustCompile(`^(\d{1,9})\.(\d{1,9})(?:\.|-([a-z]+))(\d{1,9})(\.\d{1,9})?$`)
+)
+
+// MustParse parses a version and panics if it does
+// not parse correctly.
+func MustParse(s string) Number {
+	v, err := Parse(s)
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// MustParseBinary parses a binary version and panics if it does
+// not parse correctly.
+func MustParseBinary(s string) Binary {
+	b, err := ParseBinary(s)
+	if err != nil {
+		panic(err)
+	}
+	return b
+}
+
+// ParseBinary parses a binary version of the form "1.2.3-series-arch".
+func ParseBinary(s string) (Binary, error) {
+	m := binaryPat.FindStringSubmatch(s)
+	if m == nil {
+		return Binary{}, fmt.Errorf("invalid binary version %q", s)
+	}
+	var b Binary
+	b.Major = atoi(m[1])
+	b.Minor = atoi(m[2])
+	b.Tag = m[3]
+	b.Patch = atoi(m[4])
+	if m[5] != "" {
+		b.Build = atoi(m[5][1:])
+	}
+	b.Series = m[6]
+	b.Arch = m[7]
+	return b, nil
+}
+
+// Parse parses the version, which is of the form 1.2.3
+// giving the major, minor and release versions
+// respectively.
+func Parse(s string) (Number, error) {
+	m := numberPat.FindStringSubmatch(s)
+	if m == nil {
+		return Number{}, fmt.Errorf("invalid version %q", s)
+	}
+	var n Number
+	n.Major = atoi(m[1])
+	n.Minor = atoi(m[2])
+	n.Tag = m[3]
+	n.Patch = atoi(m[4])
+	if m[5] != "" {
+		n.Build = atoi(m[5][1:])
+	}
+	return n, nil
+}
+
+// atoi is the same as strconv.Atoi but assumes that
+// the string has been verified to be a valid integer.
+func atoi(s string) int {
+	n, err := strconv.Atoi(s)
+	if err != nil {
+		panic(err)
+	}
+	return n
+}
+
+// String returns the string representation of this Number.
+func (n Number) String() string {
+	var s string
+	if n.Tag == "" {
+		s = fmt.Sprintf("%d.%d.%d", n.Major, n.Minor, n.Patch)
+	} else {
+		s = fmt.Sprintf("%d.%d-%s%d", n.Major, n.Minor, n.Tag, n.Patch)
+	}
+	if n.Build > 0 {
+		s += fmt.Sprintf(".%d", n.Build)
+	}
+	return s
+}
+
+// Compare returns -1, 0 or 1 depending on whether
+// n is less than, equal to or greater than other.
+// The comparison compares Major, then Minor, then Patch, then Build, using the first difference as
+func (n Number) Compare(other Number) int {
+	if n == other {
+		return 0
+	}
+	less := false
+	switch {
+	case n.Major != other.Major:
+		less = n.Major < other.Major
+	case n.Minor != other.Minor:
+		less = n.Minor < other.Minor
+	case n.Tag != other.Tag:
+		switch {
+		case n.Tag == "":
+			less = false
+		case other.Tag == "":
+			less = true
+		default:
+			less = n.Tag < other.Tag
+		}
+	case n.Patch != other.Patch:
+		less = n.Patch < other.Patch
+	case n.Build != other.Build:
+		less = n.Build < other.Build
+	}
+	if less {
+		return -1
+	}
+	return 1
+}
+
+// GetBSON implements bson.Getter.
+func (n Number) GetBSON() (interface{}, error) {
+	return n.String(), nil
+}
+
+// SetBSON implements bson.Setter.
+func (n *Number) SetBSON(raw bson.Raw) error {
+	var s string
+	err := raw.Unmarshal(&s)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (n Number) MarshalJSON() ([]byte, error) {
+	return json.Marshal(n.String())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (n *Number) UnmarshalJSON(data []byte) error {
+	var s string
+	if err := json.Unmarshal(data, &s); err != nil {
+		return err
+	}
+	v, err := Parse(s)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// MarshalYAML implements yaml.v2.Marshaller interface
+func (n Number) MarshalYAML() (interface{}, error) {
+	return n.String(), nil
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaller interface
+func (n *Number) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var vstr string
+	err := unmarshal(&vstr)
+	if err != nil {
+		return err
+	}
+	v, err := Parse(vstr)
+	if err != nil {
+		return err
+	}
+	*n = v
+	return nil
+}
+
+// ParseMajorMinor takes an argument of the form "major.minor" and returns ints major and minor.
+func ParseMajorMinor(vers string) (int, int, error) {
+	parts := strings.Split(vers, ".")
+	major, err := strconv.Atoi(parts[0])
+	minor := -1
+	if err != nil {
+		return -1, -1, fmt.Errorf("invalid major version number %s: %v", parts[0], err)
+	}
+	if len(parts) == 2 {
+		minor, err = strconv.Atoi(parts[1])
+		if err != nil {
+			return -1, -1, fmt.Errorf("invalid minor version number %s: %v", parts[1], err)
+		}
+	} else if len(parts) > 2 {
+		return -1, -1, fmt.Errorf("invalid major.minor version number %s", vers)
+	}
+	return major, minor, nil
+}
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE b/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE
new file mode 100644
index 0000000..4bfa7a8
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2013 Kelsey Hightower
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS b/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
new file mode 100644
index 0000000..6527a9f
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/MAINTAINERS
@@ -0,0 +1,2 @@
+Kelsey Hightower kelsey.hightower@gmail.com github.com/kelseyhightower
+Travis Parker    travis.parker@gmail.com    github.com/teepark
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/README.md b/automation/vendor/github.com/kelseyhightower/envconfig/README.md
new file mode 100644
index 0000000..09de74b
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/README.md
@@ -0,0 +1,175 @@
+# envconfig
+
+[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig)
+
+```Go
+import "github.com/kelseyhightower/envconfig"
+```
+
+## Documentation
+
+See [godoc](http://godoc.org/github.com/kelseyhightower/envconfig)
+
+## Usage
+
+Set some environment variables:
+
+```Bash
+export MYAPP_DEBUG=false
+export MYAPP_PORT=8080
+export MYAPP_USER=Kelsey
+export MYAPP_RATE="0.5"
+export MYAPP_TIMEOUT="3m"
+export MYAPP_USERS="rob,ken,robert"
+```
+
+Write some code:
+
+```Go
+package main
+
+import (
+    "fmt"
+    "log"
+    "time"
+
+    "github.com/kelseyhightower/envconfig"
+)
+
+type Specification struct {
+    Debug   bool
+    Port    int
+    User    string
+    Users   []string
+    Rate    float32
+    Timeout time.Duration
+}
+
+func main() {
+    var s Specification
+    err := envconfig.Process("myapp", &s)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+    format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n"
+    _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate)
+    if err != nil {
+        log.Fatal(err.Error())
+    }
+
+    fmt.Println("Users:")
+    for _, u := range s.Users {
+        fmt.Printf("  %s\n", u)
+    }
+}
+```
+
+Results:
+
+```Bash
+Debug: false
+Port: 8080
+User: Kelsey
+Rate: 0.500000
+Timeout: 3m0s
+Users:
+  rob
+  ken
+  robert
+```
+
+## Struct Tag Support
+
+Envconfig supports the use of struct tags to specify alternate, default, and required
+environment variables.
+
+For example, consider the following struct:
+
+```Go
+type Specification struct {
+    ManualOverride1 string `envconfig:"manual_override_1"`
+    DefaultVar      string `default:"foobar"`
+    RequiredVar     string `required:"true"`
+    IgnoredVar      string `ignored:"true"`
+    AutoSplitVar    string `split_words:"true"`
+}
+```
+
+Envconfig has automatic support for CamelCased struct elements when the
+`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above
+would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the
+setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers
+will get globbed into the previous word. If the setting does not do the
+right thing, you may use a manual override.
+
+Envconfig will process value for `ManualOverride1` by populating it with the
+value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have
+instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag
+it would have looked up `MYAPP_MANUAL_OVERRIDE1`.
+
+```Bash
+export MYAPP_MANUAL_OVERRIDE_1="this will be the value"
+
+# export MYAPP_MANUALOVERRIDE1="and this will not"
+```
+
+If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`,
+it will populate it with "foobar" as a default value.
+
+If envconfig can't find an environment variable value for `MYAPP_REQUIREDVAR`,
+it will return an error when asked to process the struct.
+
+If envconfig can't find an environment variable in the form `PREFIX_MYVAR`, and there
+is a struct tag defined, it will try to populate your variable with an environment
+variable that directly matches the envconfig tag in your struct definition:
+
+```shell
+export SERVICE_HOST=127.0.0.1
+export MYAPP_DEBUG=true
+```
+```Go
+type Specification struct {
+    ServiceHost string `envconfig:"SERVICE_HOST"`
+    Debug       bool
+}
+```
+
+Envconfig won't process a field with the "ignored" tag set to "true", even if a corresponding
+environment variable is set.
+
+## Supported Struct Field Types
+
+envconfig supports supports these struct field types:
+
+  * string
+  * int8, int16, int32, int64
+  * bool
+  * float32, float64
+  * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler)
+
+Embedded structs using these fields are also supported.
+
+## Custom Decoders
+
+Any field whose type (or pointer-to-type) implements `envconfig.Decoder` can
+control its own deserialization:
+
+```Bash
+export DNS_SERVER=8.8.8.8
+```
+
+```Go
+type IPDecoder net.IP
+
+func (ipd *IPDecoder) Decode(value string) error {
+    *ipd = IPDecoder(net.ParseIP(value))
+    return nil
+}
+
+type DNSConfig struct {
+    Address IPDecoder `envconfig:"DNS_SERVER"`
+}
+```
+
+Also, envconfig will use a `Set(string) error` method like from the
+[flag.Value](https://godoc.org/flag#Value) interface if implemented.
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/doc.go b/automation/vendor/github.com/kelseyhightower/envconfig/doc.go
new file mode 100644
index 0000000..f28561c
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/doc.go
@@ -0,0 +1,8 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+// Package envconfig implements decoding of environment variables based on a user
+// defined specification. A typical use is using environment variables for
+// configuration settings.
+package envconfig
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go b/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go
new file mode 100644
index 0000000..a6a014a
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/env_os.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package envconfig
+
+import "os"
+
+var lookupEnv = os.LookupEnv
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
new file mode 100644
index 0000000..9d98085
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/env_syscall.go
@@ -0,0 +1,7 @@
+// +build !appengine
+
+package envconfig
+
+import "syscall"
+
+var lookupEnv = syscall.Getenv
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go
new file mode 100644
index 0000000..3ad5e7d
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/envconfig.go
@@ -0,0 +1,298 @@
+// Copyright (c) 2013 Kelsey Hightower. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"regexp"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// ErrInvalidSpecification indicates that a specification is of the wrong type.
+var ErrInvalidSpecification = errors.New("specification must be a struct pointer")
+
+// A ParseError occurs when an environment variable cannot be converted to
+// the type required by a struct field during assignment.
+type ParseError struct {
+	KeyName   string
+	FieldName string
+	TypeName  string
+	Value     string
+	Err       error
+}
+
+// Decoder has the same semantics as Setter, but takes higher precedence.
+// It is provided for historical compatibility.
+type Decoder interface {
+	Decode(value string) error
+}
+
+// Setter is implemented by types can self-deserialize values.
+// Any type that implements flag.Value also implements Setter.
+type Setter interface {
+	Set(value string) error
+}
+
+func (e *ParseError) Error() string {
+	return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err)
+}
+
+// varInfo maintains information about the configuration variable
+type varInfo struct {
+	Name  string
+	Alt   string
+	Key   string
+	Field reflect.Value
+	Tags  reflect.StructTag
+}
+
+// GatherInfo gathers information about the specified struct
+func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) {
+	expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)")
+	s := reflect.ValueOf(spec)
+
+	if s.Kind() != reflect.Ptr {
+		return nil, ErrInvalidSpecification
+	}
+	s = s.Elem()
+	if s.Kind() != reflect.Struct {
+		return nil, ErrInvalidSpecification
+	}
+	typeOfSpec := s.Type()
+
+	// over allocate an info array, we will extend if needed later
+	infos := make([]varInfo, 0, s.NumField())
+	for i := 0; i < s.NumField(); i++ {
+		f := s.Field(i)
+		ftype := typeOfSpec.Field(i)
+		if !f.CanSet() || ftype.Tag.Get("ignored") == "true" {
+			continue
+		}
+
+		for f.Kind() == reflect.Ptr {
+			if f.IsNil() {
+				if f.Type().Elem().Kind() != reflect.Struct {
+					// nil pointer to a non-struct: leave it alone
+					break
+				}
+				// nil pointer to struct: create a zero instance
+				f.Set(reflect.New(f.Type().Elem()))
+			}
+			f = f.Elem()
+		}
+
+		// Capture information about the config variable
+		info := varInfo{
+			Name:  ftype.Name,
+			Field: f,
+			Tags:  ftype.Tag,
+			Alt:   strings.ToUpper(ftype.Tag.Get("envconfig")),
+		}
+
+		// Default to the field name as the env var name (will be upcased)
+		info.Key = info.Name
+
+		// Best effort to un-pick camel casing as separate words
+		if ftype.Tag.Get("split_words") == "true" {
+			words := expr.FindAllStringSubmatch(ftype.Name, -1)
+			if len(words) > 0 {
+				var name []string
+				for _, words := range words {
+					name = append(name, words[0])
+				}
+
+				info.Key = strings.Join(name, "_")
+			}
+		}
+		if info.Alt != "" {
+			info.Key = info.Alt
+		}
+		if prefix != "" {
+			info.Key = fmt.Sprintf("%s_%s", prefix, info.Key)
+		}
+		info.Key = strings.ToUpper(info.Key)
+		infos = append(infos, info)
+
+		if f.Kind() == reflect.Struct {
+			// honor Decode if present
+			if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil {
+				innerPrefix := prefix
+				if !ftype.Anonymous {
+					innerPrefix = info.Key
+				}
+
+				embeddedPtr := f.Addr().Interface()
+				embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr)
+				if err != nil {
+					return nil, err
+				}
+				infos = append(infos[:len(infos)-1], embeddedInfos...)
+
+				continue
+			}
+		}
+	}
+	return infos, nil
+}
+
+// Process populates the specified struct based on environment variables
+func Process(prefix string, spec interface{}) error {
+	infos, err := gatherInfo(prefix, spec)
+
+	for _, info := range infos {
+
+		// `os.Getenv` cannot differentiate between an explicitly set empty value
+		// and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`,
+		// but it is only available in go1.5 or newer. We're using Go build tags
+		// here to use os.LookupEnv for >=go1.5
+		value, ok := lookupEnv(info.Key)
+		if !ok && info.Alt != "" {
+			value, ok = lookupEnv(info.Alt)
+		}
+
+		def := info.Tags.Get("default")
+		if def != "" && !ok {
+			value = def
+		}
+
+		req := info.Tags.Get("required")
+		if !ok && def == "" {
+			if req == "true" {
+				return fmt.Errorf("required key %s missing value", info.Key)
+			}
+			continue
+		}
+
+		err := processField(value, info.Field)
+		if err != nil {
+			return &ParseError{
+				KeyName:   info.Key,
+				FieldName: info.Name,
+				TypeName:  info.Field.Type().String(),
+				Value:     value,
+				Err:       err,
+			}
+		}
+	}
+
+	return err
+}
+
+// MustProcess is the same as Process but panics if an error occurs
+func MustProcess(prefix string, spec interface{}) {
+	if err := Process(prefix, spec); err != nil {
+		panic(err)
+	}
+}
+
+func processField(value string, field reflect.Value) error {
+	typ := field.Type()
+
+	decoder := decoderFrom(field)
+	if decoder != nil {
+		return decoder.Decode(value)
+	}
+	// look for Set method if Decode not defined
+	setter := setterFrom(field)
+	if setter != nil {
+		return setter.Set(value)
+	}
+
+	if t := textUnmarshaler(field); t != nil {
+		return t.UnmarshalText([]byte(value))
+	}
+
+	if typ.Kind() == reflect.Ptr {
+		typ = typ.Elem()
+		if field.IsNil() {
+			field.Set(reflect.New(typ))
+		}
+		field = field.Elem()
+	}
+
+	switch typ.Kind() {
+	case reflect.String:
+		field.SetString(value)
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		var (
+			val int64
+			err error
+		)
+		if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" {
+			var d time.Duration
+			d, err = time.ParseDuration(value)
+			val = int64(d)
+		} else {
+			val, err = strconv.ParseInt(value, 0, typ.Bits())
+		}
+		if err != nil {
+			return err
+		}
+
+		field.SetInt(val)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		val, err := strconv.ParseUint(value, 0, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetUint(val)
+	case reflect.Bool:
+		val, err := strconv.ParseBool(value)
+		if err != nil {
+			return err
+		}
+		field.SetBool(val)
+	case reflect.Float32, reflect.Float64:
+		val, err := strconv.ParseFloat(value, typ.Bits())
+		if err != nil {
+			return err
+		}
+		field.SetFloat(val)
+	case reflect.Slice:
+		vals := strings.Split(value, ",")
+		sl := reflect.MakeSlice(typ, len(vals), len(vals))
+		for i, val := range vals {
+			err := processField(val, sl.Index(i))
+			if err != nil {
+				return err
+			}
+		}
+		field.Set(sl)
+	}
+
+	return nil
+}
+
+func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) {
+	// it may be impossible for a struct field to fail this check
+	if !field.CanInterface() {
+		return
+	}
+	var ok bool
+	fn(field.Interface(), &ok)
+	if !ok && field.CanAddr() {
+		fn(field.Addr().Interface(), &ok)
+	}
+}
+
+func decoderFrom(field reflect.Value) (d Decoder) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) })
+	return d
+}
+
+func setterFrom(field reflect.Value) (s Setter) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) })
+	return s
+}
+
+func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) {
+	interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) })
+	return t
+}
diff --git a/automation/vendor/github.com/kelseyhightower/envconfig/usage.go b/automation/vendor/github.com/kelseyhightower/envconfig/usage.go
new file mode 100644
index 0000000..4870237
--- /dev/null
+++ b/automation/vendor/github.com/kelseyhightower/envconfig/usage.go
@@ -0,0 +1,152 @@
+// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved.
+// Use of this source code is governed by the MIT License that can be found in
+// the LICENSE file.
+
+package envconfig
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"strconv"
+	"strings"
+	"text/tabwriter"
+	"text/template"
+)
+
+const (
+	// DefaultListFormat constant to use to display usage in a list format
+	DefaultListFormat = `This application is configured via the environment. The following environment
+variables can be used:
+{{range .}}
+{{usage_key .}}
+  [description] {{usage_description .}}
+  [type]        {{usage_type .}}
+  [default]     {{usage_default .}}
+  [required]    {{usage_required .}}{{end}}
+`
+	// DefaultTableFormat constant to use to display usage in a tabluar format
+	DefaultTableFormat = `This application is configured via the environment. The following environment
+variables can be used:
+
+KEY	TYPE	DEFAULT	REQUIRED	DESCRIPTION
+{{range .}}{{usage_key .}}	{{usage_type .}}	{{usage_default .}}	{{usage_required .}}	{{usage_description .}}
+{{end}}`
+)
+
+var (
+	decoderType     = reflect.TypeOf((*Decoder)(nil)).Elem()
+	setterType      = reflect.TypeOf((*Setter)(nil)).Elem()
+	unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+)
+
+func implementsInterface(t reflect.Type) bool {
+	return t.Implements(decoderType) ||
+		reflect.PtrTo(t).Implements(decoderType) ||
+		t.Implements(setterType) ||
+		reflect.PtrTo(t).Implements(setterType) ||
+		t.Implements(unmarshalerType) ||
+		reflect.PtrTo(t).Implements(unmarshalerType)
+}
+
+// toTypeDescription converts Go types into a human readable description
+func toTypeDescription(t reflect.Type) string {
+	switch t.Kind() {
+	case reflect.Array, reflect.Slice:
+		return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem()))
+	case reflect.Ptr:
+		return toTypeDescription(t.Elem())
+	case reflect.Struct:
+		if implementsInterface(t) && t.Name() != "" {
+			return t.Name()
+		}
+		return ""
+	case reflect.String:
+		name := t.Name()
+		if name != "" && name != "string" {
+			return name
+		}
+		return "String"
+	case reflect.Bool:
+		name := t.Name()
+		if name != "" && name != "bool" {
+			return name
+		}
+		return "True or False"
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "int") {
+			return name
+		}
+		return "Integer"
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "uint") {
+			return name
+		}
+		return "Unsigned Integer"
+	case reflect.Float32, reflect.Float64:
+		name := t.Name()
+		if name != "" && !strings.HasPrefix(name, "float") {
+			return name
+		}
+		return "Float"
+	}
+	return fmt.Sprintf("%+v", t)
+}
+
+// Usage writes usage information to stderr using the default header and table format
+func Usage(prefix string, spec interface{}) error {
+	// The default is to output the usage information as a table
+	// Create tabwriter instance to support table output
+	tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0)
+
+	err := Usagef(prefix, spec, tabs, DefaultTableFormat)
+	tabs.Flush()
+	return err
+}
+
+// Usagef writes usage information to the specified io.Writer using the specifed template specification
+func Usagef(prefix string, spec interface{}, out io.Writer, format string) error {
+
+	// Specify the default usage template functions
+	functions := template.FuncMap{
+		"usage_key":         func(v varInfo) string { return v.Key },
+		"usage_description": func(v varInfo) string { return v.Tags.Get("desc") },
+		"usage_type":        func(v varInfo) string { return toTypeDescription(v.Field.Type()) },
+		"usage_default":     func(v varInfo) string { return v.Tags.Get("default") },
+		"usage_required": func(v varInfo) (string, error) {
+			req := v.Tags.Get("required")
+			if req != "" {
+				reqB, err := strconv.ParseBool(req)
+				if err != nil {
+					return "", err
+				}
+				if reqB {
+					req = "true"
+				}
+			}
+			return req, nil
+		},
+	}
+
+	tmpl, err := template.New("envconfig").Funcs(functions).Parse(format)
+	if err != nil {
+		return err
+	}
+
+	return Usaget(prefix, spec, out, tmpl)
+}
+
+// Usaget writes usage information to the specified io.Writer using the specified template
+func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error {
+	// gather first
+	infos, err := gatherInfo(prefix, spec)
+	if err != nil {
+		return err
+	}
+
+	return tmpl.Execute(out, infos)
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/LICENSE b/automation/vendor/github.com/lunixbochs/vtclean/LICENSE
new file mode 100644
index 0000000..42e8263
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2015 Ryan Hileman
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/README.md b/automation/vendor/github.com/lunixbochs/vtclean/README.md
new file mode 100644
index 0000000..ddd1372
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/README.md
@@ -0,0 +1,44 @@
+vtclean
+----
+
+Clean up raw terminal output by stripping escape sequences, optionally preserving color.
+
+Get it: `go get github.com/lunixbochs/vtclean/vtclean`
+
+API:
+
+    import "github.com/lunixbochs/vtclean"
+    vtclean.Clean(line string, color bool) string
+
+Command line example:
+
+    $ echo -e '\x1b[1;32mcolor example
+    color forced to stop at end of line
+    backspace is ba\b\bgood
+    no beeps!\x07\x07' | ./vtclean -color
+
+    color example
+    color forced to stop at end of line
+    backspace is good
+    no beeps!
+
+Go example:
+
+    package main
+
+    import (
+        "fmt"
+        "github.com/lunixbochs/vtclean"
+    )
+
+    func main() {
+        line := vtclean.Clean(
+            "\033[1;32mcolor, " +
+            "curs\033[Aor, " +
+            "backspace\b\b\b\b\b\b\b\b\b\b\b\033[K", false)
+        fmt.Println(line)
+    }
+
+Output:
+
+    color, cursor
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/io.go b/automation/vendor/github.com/lunixbochs/vtclean/io.go
new file mode 100644
index 0000000..31be007
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/io.go
@@ -0,0 +1,93 @@
+package vtclean
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+)
+
+type reader struct {
+	io.Reader
+	scanner *bufio.Scanner
+	buf     []byte
+
+	color bool
+}
+
+func NewReader(r io.Reader, color bool) io.Reader {
+	return &reader{Reader: r, color: color}
+}
+
+func (r *reader) scan() bool {
+	if r.scanner == nil {
+		r.scanner = bufio.NewScanner(r.Reader)
+	}
+	if len(r.buf) > 0 {
+		return true
+	}
+	if r.scanner.Scan() {
+		r.buf = []byte(Clean(r.scanner.Text(), r.color) + "\n")
+		return true
+	}
+	return false
+}
+
+func (r *reader) fill(p []byte) int {
+	n := len(r.buf)
+	copy(p, r.buf)
+	if len(p) < len(r.buf) {
+		r.buf = r.buf[len(p):]
+		n = len(p)
+	} else {
+		r.buf = nil
+	}
+	return n
+}
+
+func (r *reader) Read(p []byte) (int, error) {
+	n := r.fill(p)
+	if n < len(p) {
+		if !r.scan() {
+			if n == 0 {
+				return 0, io.EOF
+			}
+			return n, nil
+		}
+		n += r.fill(p[n:])
+	}
+	return n, nil
+}
+
+type writer struct {
+	io.Writer
+	buf   []byte
+	color bool
+}
+
+func NewWriter(w io.Writer, color bool) io.WriteCloser {
+	return &writer{Writer: w, color: color}
+}
+
+func (w *writer) Write(p []byte) (int, error) {
+	buf := append(w.buf, p...)
+	lines := bytes.Split(buf, []byte("\n"))
+	if len(lines) > 0 {
+		last := len(lines) - 1
+		w.buf = lines[last]
+		count := 0
+		for _, line := range lines[:last] {
+			n, err := w.Writer.Write([]byte(Clean(string(line), w.color) + "\n"))
+			count += n
+			if err != nil {
+				return count, err
+			}
+		}
+	}
+	return len(p), nil
+}
+
+func (w *writer) Close() error {
+	cl := Clean(string(w.buf), w.color)
+	_, err := w.Writer.Write([]byte(cl))
+	return err
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/line.go b/automation/vendor/github.com/lunixbochs/vtclean/line.go
new file mode 100644
index 0000000..7272d91
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/line.go
@@ -0,0 +1,107 @@
+package vtclean
+
+type char struct {
+	char  byte
+	vt100 []byte
+}
+
+func chars(p []byte) []char {
+	tmp := make([]char, len(p))
+	for i, v := range p {
+		tmp[i].char = v
+	}
+	return tmp
+}
+
+type lineEdit struct {
+	buf       []char
+	pos, size int
+	vt100     []byte
+}
+
+func newLineEdit(length int) *lineEdit {
+	return &lineEdit{buf: make([]char, length)}
+}
+
+func (l *lineEdit) Vt100(p []byte) {
+	l.vt100 = p
+}
+
+func (l *lineEdit) Move(x int) {
+	if x < 0 && l.pos <= -x {
+		l.pos = 0
+	} else if x > 0 && l.pos+x > l.size {
+		l.pos = l.size
+	} else {
+		l.pos += x
+	}
+}
+
+func (l *lineEdit) Write(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	if len(l.buf)-l.pos < len(c) {
+		l.buf = append(l.buf[:l.pos], c...)
+	} else {
+		copy(l.buf[l.pos:], c)
+	}
+	l.pos += len(c)
+	if l.pos > l.size {
+		l.size = l.pos
+	}
+}
+
+func (l *lineEdit) Insert(p []byte) {
+	c := chars(p)
+	if len(c) > 0 {
+		c[0].vt100 = l.vt100
+		l.vt100 = nil
+	}
+	l.size += len(c)
+	c = append(c, l.buf[l.pos:]...)
+	l.buf = append(l.buf[:l.pos], c...)
+}
+
+func (l *lineEdit) Delete(n int) {
+	most := l.size - l.pos
+	if n > most {
+		n = most
+	}
+	copy(l.buf[l.pos:], l.buf[l.pos+n:])
+	l.size -= n
+}
+
+func (l *lineEdit) Clear() {
+	for i := 0; i < len(l.buf); i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearLeft() {
+	for i := 0; i < l.pos+1; i++ {
+		l.buf[i].char = ' '
+	}
+}
+func (l *lineEdit) ClearRight() {
+	l.size = l.pos
+}
+
+func (l *lineEdit) Bytes() []byte {
+	length := 0
+	buf := l.buf[:l.size]
+	for _, v := range buf {
+		length += 1 + len(v.vt100)
+	}
+	tmp := make([]byte, 0, length)
+	for _, v := range buf {
+		tmp = append(tmp, v.vt100...)
+		tmp = append(tmp, v.char)
+	}
+	return tmp
+}
+
+func (l *lineEdit) String() string {
+	return string(l.Bytes())
+}
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/regex.txt b/automation/vendor/github.com/lunixbochs/vtclean/regex.txt
new file mode 100644
index 0000000..e55e7f2
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/regex.txt
@@ -0,0 +1,14 @@
+this is the source definitions for the scary escape code regex
+
+# from tests in Terminal.app, this regex should cover all basic \e[ and \e] cases
+^([\[\]]([\d\?]+)?(;[\d\?]+)*)?.
+
+# this catches any case the above does not
+# make sure to not include any special characters the main regex finds (like ?)
+\[[^a-zA-Z0-9@\?]+.
+
+# esc + paren + any single char
+[\(\)].
+
+# didn't re-check this one (not included)
+[\[K]\d+;\d+
diff --git a/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go b/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go
new file mode 100644
index 0000000..1e74237
--- /dev/null
+++ b/automation/vendor/github.com/lunixbochs/vtclean/vtclean.go
@@ -0,0 +1,81 @@
+package vtclean
+
+import (
+	"bytes"
+	"regexp"
+	"strconv"
+)
+
+// see regex.txt for a slightly separated version of this regex
+var vt100re = regexp.MustCompile(`^\033([\[\]]([\d\?]+)?(;[\d\?]+)*)?(.)`)
+var vt100exc = regexp.MustCompile(`^\033(\[[^a-zA-Z0-9@\?]+|[\(\)]).`)
+
+func Clean(line string, color bool) string {
+	var edit = newLineEdit(len(line))
+	lineb := []byte(line)
+
+	hadColor := false
+	for i := 0; i < len(lineb); {
+		c := lineb[i]
+		switch c {
+		case '\b':
+			edit.Move(-1)
+		case '\033':
+			// set terminal title
+			if bytes.HasPrefix(lineb[i:], []byte("\x1b]0;")) {
+				pos := bytes.Index(lineb[i:], []byte("\a"))
+				if pos != -1 {
+					i += pos + 1
+					continue
+				}
+			}
+			if m := vt100exc.Find(lineb[i:]); m != nil {
+				i += len(m)
+			} else if m := vt100re.FindSubmatch(lineb[i:]); m != nil {
+				i += len(m[0])
+				num := string(m[2])
+				n, err := strconv.Atoi(num)
+				if err != nil || n > 10000 {
+					n = 1
+				}
+				switch m[4][0] {
+				case 'm':
+					if color {
+						hadColor = true
+						edit.Vt100(m[0])
+					}
+				case '@':
+					edit.Insert(bytes.Repeat([]byte{' '}, n))
+				case 'C':
+					edit.Move(n)
+				case 'D':
+					edit.Move(-n)
+				case 'P':
+					edit.Delete(n)
+				case 'K':
+					switch num {
+					case "", "0":
+						edit.ClearRight()
+					case "1":
+						edit.ClearLeft()
+					case "2":
+						edit.Clear()
+					}
+				}
+			} else {
+				i += 1
+			}
+			continue
+		default:
+			if c == '\n' || c >= ' ' {
+				edit.Write([]byte{c})
+			}
+		}
+		i += 1
+	}
+	out := edit.Bytes()
+	if hadColor {
+		out = append(out, []byte("\033[0m")...)
+	}
+	return string(out)
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/LICENSE b/automation/vendor/github.com/mattn/go-colorable/LICENSE
new file mode 100644
index 0000000..91b5cef
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/automation/vendor/github.com/mattn/go-colorable/README.md b/automation/vendor/github.com/mattn/go-colorable/README.md
new file mode 100644
index 0000000..e84226a
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/README.md
@@ -0,0 +1,43 @@
+# go-colorable
+
+Colorable writer for windows.
+
+For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.)
+This package is possible to handle escape sequence for ansi color on windows.
+
+## Too Bad!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png)
+
+
+## So Good!
+
+![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png)
+
+## Usage
+
+```go
+logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true})
+logrus.SetOutput(colorable.NewColorableStdout())
+
+logrus.Info("succeeded")
+logrus.Warn("not correct")
+logrus.Error("something error")
+logrus.Fatal("panic")
+```
+
+You can compile above code on non-windows OSs.
+
+## Installation
+
+```
+$ go get github.com/mattn/go-colorable
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/automation/vendor/github.com/mattn/go-colorable/colorable_others.go b/automation/vendor/github.com/mattn/go-colorable/colorable_others.go
new file mode 100644
index 0000000..a7fe19a
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/colorable_others.go
@@ -0,0 +1,27 @@
+// +build !windows
+
+package colorable
+
+import (
+	"io"
+	"os"
+)
+
+// NewColorable return new instance of Writer which handle escape sequence.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	return file
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return os.Stdout
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return os.Stderr
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go b/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go
new file mode 100644
index 0000000..628ad90
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/colorable_windows.go
@@ -0,0 +1,820 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+	"math"
+	"os"
+	"strconv"
+	"strings"
+	"syscall"
+	"unsafe"
+
+	"github.com/mattn/go-isatty"
+)
+
+const (
+	foregroundBlue      = 0x1
+	foregroundGreen     = 0x2
+	foregroundRed       = 0x4
+	foregroundIntensity = 0x8
+	foregroundMask      = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity)
+	backgroundBlue      = 0x10
+	backgroundGreen     = 0x20
+	backgroundRed       = 0x40
+	backgroundIntensity = 0x80
+	backgroundMask      = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity)
+)
+
+type wchar uint16
+type short int16
+type dword uint32
+type word uint16
+
+type coord struct {
+	x short
+	y short
+}
+
+type smallRect struct {
+	left   short
+	top    short
+	right  short
+	bottom short
+}
+
+type consoleScreenBufferInfo struct {
+	size              coord
+	cursorPosition    coord
+	attributes        word
+	window            smallRect
+	maximumWindowSize coord
+}
+
+type consoleCursorInfo struct {
+	size    dword
+	visible int32
+}
+
+var (
+	kernel32                       = syscall.NewLazyDLL("kernel32.dll")
+	procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+	procSetConsoleTextAttribute    = kernel32.NewProc("SetConsoleTextAttribute")
+	procSetConsoleCursorPosition   = kernel32.NewProc("SetConsoleCursorPosition")
+	procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW")
+	procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute")
+	procGetConsoleCursorInfo       = kernel32.NewProc("GetConsoleCursorInfo")
+	procSetConsoleCursorInfo       = kernel32.NewProc("SetConsoleCursorInfo")
+)
+
+type Writer struct {
+	out     io.Writer
+	handle  syscall.Handle
+	lastbuf bytes.Buffer
+	oldattr word
+	oldpos  coord
+}
+
+// NewColorable return new instance of Writer which handle escape sequence from File.
+func NewColorable(file *os.File) io.Writer {
+	if file == nil {
+		panic("nil passed instead of *os.File to NewColorable()")
+	}
+
+	if isatty.IsTerminal(file.Fd()) {
+		var csbi consoleScreenBufferInfo
+		handle := syscall.Handle(file.Fd())
+		procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi)))
+		return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}}
+	} else {
+		return file
+	}
+}
+
+// NewColorableStdout return new instance of Writer which handle escape sequence for stdout.
+func NewColorableStdout() io.Writer {
+	return NewColorable(os.Stdout)
+}
+
+// NewColorableStderr return new instance of Writer which handle escape sequence for stderr.
+func NewColorableStderr() io.Writer {
+	return NewColorable(os.Stderr)
+}
+
+var color256 = map[int]int{
+	0:   0x000000,
+	1:   0x800000,
+	2:   0x008000,
+	3:   0x808000,
+	4:   0x000080,
+	5:   0x800080,
+	6:   0x008080,
+	7:   0xc0c0c0,
+	8:   0x808080,
+	9:   0xff0000,
+	10:  0x00ff00,
+	11:  0xffff00,
+	12:  0x0000ff,
+	13:  0xff00ff,
+	14:  0x00ffff,
+	15:  0xffffff,
+	16:  0x000000,
+	17:  0x00005f,
+	18:  0x000087,
+	19:  0x0000af,
+	20:  0x0000d7,
+	21:  0x0000ff,
+	22:  0x005f00,
+	23:  0x005f5f,
+	24:  0x005f87,
+	25:  0x005faf,
+	26:  0x005fd7,
+	27:  0x005fff,
+	28:  0x008700,
+	29:  0x00875f,
+	30:  0x008787,
+	31:  0x0087af,
+	32:  0x0087d7,
+	33:  0x0087ff,
+	34:  0x00af00,
+	35:  0x00af5f,
+	36:  0x00af87,
+	37:  0x00afaf,
+	38:  0x00afd7,
+	39:  0x00afff,
+	40:  0x00d700,
+	41:  0x00d75f,
+	42:  0x00d787,
+	43:  0x00d7af,
+	44:  0x00d7d7,
+	45:  0x00d7ff,
+	46:  0x00ff00,
+	47:  0x00ff5f,
+	48:  0x00ff87,
+	49:  0x00ffaf,
+	50:  0x00ffd7,
+	51:  0x00ffff,
+	52:  0x5f0000,
+	53:  0x5f005f,
+	54:  0x5f0087,
+	55:  0x5f00af,
+	56:  0x5f00d7,
+	57:  0x5f00ff,
+	58:  0x5f5f00,
+	59:  0x5f5f5f,
+	60:  0x5f5f87,
+	61:  0x5f5faf,
+	62:  0x5f5fd7,
+	63:  0x5f5fff,
+	64:  0x5f8700,
+	65:  0x5f875f,
+	66:  0x5f8787,
+	67:  0x5f87af,
+	68:  0x5f87d7,
+	69:  0x5f87ff,
+	70:  0x5faf00,
+	71:  0x5faf5f,
+	72:  0x5faf87,
+	73:  0x5fafaf,
+	74:  0x5fafd7,
+	75:  0x5fafff,
+	76:  0x5fd700,
+	77:  0x5fd75f,
+	78:  0x5fd787,
+	79:  0x5fd7af,
+	80:  0x5fd7d7,
+	81:  0x5fd7ff,
+	82:  0x5fff00,
+	83:  0x5fff5f,
+	84:  0x5fff87,
+	85:  0x5fffaf,
+	86:  0x5fffd7,
+	87:  0x5fffff,
+	88:  0x870000,
+	89:  0x87005f,
+	90:  0x870087,
+	91:  0x8700af,
+	92:  0x8700d7,
+	93:  0x8700ff,
+	94:  0x875f00,
+	95:  0x875f5f,
+	96:  0x875f87,
+	97:  0x875faf,
+	98:  0x875fd7,
+	99:  0x875fff,
+	100: 0x878700,
+	101: 0x87875f,
+	102: 0x878787,
+	103: 0x8787af,
+	104: 0x8787d7,
+	105: 0x8787ff,
+	106: 0x87af00,
+	107: 0x87af5f,
+	108: 0x87af87,
+	109: 0x87afaf,
+	110: 0x87afd7,
+	111: 0x87afff,
+	112: 0x87d700,
+	113: 0x87d75f,
+	114: 0x87d787,
+	115: 0x87d7af,
+	116: 0x87d7d7,
+	117: 0x87d7ff,
+	118: 0x87ff00,
+	119: 0x87ff5f,
+	120: 0x87ff87,
+	121: 0x87ffaf,
+	122: 0x87ffd7,
+	123: 0x87ffff,
+	124: 0xaf0000,
+	125: 0xaf005f,
+	126: 0xaf0087,
+	127: 0xaf00af,
+	128: 0xaf00d7,
+	129: 0xaf00ff,
+	130: 0xaf5f00,
+	131: 0xaf5f5f,
+	132: 0xaf5f87,
+	133: 0xaf5faf,
+	134: 0xaf5fd7,
+	135: 0xaf5fff,
+	136: 0xaf8700,
+	137: 0xaf875f,
+	138: 0xaf8787,
+	139: 0xaf87af,
+	140: 0xaf87d7,
+	141: 0xaf87ff,
+	142: 0xafaf00,
+	143: 0xafaf5f,
+	144: 0xafaf87,
+	145: 0xafafaf,
+	146: 0xafafd7,
+	147: 0xafafff,
+	148: 0xafd700,
+	149: 0xafd75f,
+	150: 0xafd787,
+	151: 0xafd7af,
+	152: 0xafd7d7,
+	153: 0xafd7ff,
+	154: 0xafff00,
+	155: 0xafff5f,
+	156: 0xafff87,
+	157: 0xafffaf,
+	158: 0xafffd7,
+	159: 0xafffff,
+	160: 0xd70000,
+	161: 0xd7005f,
+	162: 0xd70087,
+	163: 0xd700af,
+	164: 0xd700d7,
+	165: 0xd700ff,
+	166: 0xd75f00,
+	167: 0xd75f5f,
+	168: 0xd75f87,
+	169: 0xd75faf,
+	170: 0xd75fd7,
+	171: 0xd75fff,
+	172: 0xd78700,
+	173: 0xd7875f,
+	174: 0xd78787,
+	175: 0xd787af,
+	176: 0xd787d7,
+	177: 0xd787ff,
+	178: 0xd7af00,
+	179: 0xd7af5f,
+	180: 0xd7af87,
+	181: 0xd7afaf,
+	182: 0xd7afd7,
+	183: 0xd7afff,
+	184: 0xd7d700,
+	185: 0xd7d75f,
+	186: 0xd7d787,
+	187: 0xd7d7af,
+	188: 0xd7d7d7,
+	189: 0xd7d7ff,
+	190: 0xd7ff00,
+	191: 0xd7ff5f,
+	192: 0xd7ff87,
+	193: 0xd7ffaf,
+	194: 0xd7ffd7,
+	195: 0xd7ffff,
+	196: 0xff0000,
+	197: 0xff005f,
+	198: 0xff0087,
+	199: 0xff00af,
+	200: 0xff00d7,
+	201: 0xff00ff,
+	202: 0xff5f00,
+	203: 0xff5f5f,
+	204: 0xff5f87,
+	205: 0xff5faf,
+	206: 0xff5fd7,
+	207: 0xff5fff,
+	208: 0xff8700,
+	209: 0xff875f,
+	210: 0xff8787,
+	211: 0xff87af,
+	212: 0xff87d7,
+	213: 0xff87ff,
+	214: 0xffaf00,
+	215: 0xffaf5f,
+	216: 0xffaf87,
+	217: 0xffafaf,
+	218: 0xffafd7,
+	219: 0xffafff,
+	220: 0xffd700,
+	221: 0xffd75f,
+	222: 0xffd787,
+	223: 0xffd7af,
+	224: 0xffd7d7,
+	225: 0xffd7ff,
+	226: 0xffff00,
+	227: 0xffff5f,
+	228: 0xffff87,
+	229: 0xffffaf,
+	230: 0xffffd7,
+	231: 0xffffff,
+	232: 0x080808,
+	233: 0x121212,
+	234: 0x1c1c1c,
+	235: 0x262626,
+	236: 0x303030,
+	237: 0x3a3a3a,
+	238: 0x444444,
+	239: 0x4e4e4e,
+	240: 0x585858,
+	241: 0x626262,
+	242: 0x6c6c6c,
+	243: 0x767676,
+	244: 0x808080,
+	245: 0x8a8a8a,
+	246: 0x949494,
+	247: 0x9e9e9e,
+	248: 0xa8a8a8,
+	249: 0xb2b2b2,
+	250: 0xbcbcbc,
+	251: 0xc6c6c6,
+	252: 0xd0d0d0,
+	253: 0xdadada,
+	254: 0xe4e4e4,
+	255: 0xeeeeee,
+}
+
+// Write write data on console
+func (w *Writer) Write(data []byte) (n int, err error) {
+	var csbi consoleScreenBufferInfo
+	procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+		if r1 == 0 {
+			break loop
+		}
+
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		var m byte
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				m = c
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+
+		var csbi consoleScreenBufferInfo
+		switch m {
+		case 'A':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'B':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'C':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'D':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			if n, err = strconv.Atoi(buf.String()); err == nil {
+				var csbi consoleScreenBufferInfo
+				procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+				csbi.cursorPosition.x += short(n)
+				procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+			}
+		case 'E':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y += short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'F':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = 0
+			csbi.cursorPosition.y -= short(n)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'G':
+			n, err = strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			csbi.cursorPosition.x = short(n - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'H':
+			token := strings.Split(buf.String(), ";")
+			if len(token) != 2 {
+				continue
+			}
+			n1, err := strconv.Atoi(token[0])
+			if err != nil {
+				continue
+			}
+			n2, err := strconv.Atoi(token[1])
+			if err != nil {
+				continue
+			}
+			csbi.cursorPosition.x = short(n2 - 1)
+			csbi.cursorPosition.y = short(n1 - 1)
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition)))
+		case 'J':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'K':
+			n, err := strconv.Atoi(buf.String())
+			if err != nil {
+				continue
+			}
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			var cursor coord
+			switch n {
+			case 0:
+				cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y}
+			case 1:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			case 2:
+				cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y}
+			}
+			var count, written dword
+			count = dword(csbi.size.x - csbi.cursorPosition.x)
+			procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+			procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written)))
+		case 'm':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			attr := csbi.attributes
+			cs := buf.String()
+			if cs == "" {
+				procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr))
+				continue
+			}
+			token := strings.Split(cs, ";")
+			for i := 0; i < len(token); i++ {
+				ns := token[i]
+				if n, err = strconv.Atoi(ns); err == nil {
+					switch {
+					case n == 0 || n == 100:
+						attr = w.oldattr
+					case 1 <= n && n <= 5:
+						attr |= foregroundIntensity
+					case n == 7:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 22 == n || n == 25 || n == 25:
+						attr |= foregroundIntensity
+					case n == 27:
+						attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4)
+					case 30 <= n && n <= 37:
+						attr &= backgroundMask
+						if (n-30)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-30)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-30)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case n == 38: // set foreground color.
+						if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256foreAttr == nil {
+									n256setup()
+								}
+								attr &= backgroundMask
+								attr |= n256foreAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & backgroundMask)
+						}
+					case n == 39: // reset foreground color.
+						attr &= backgroundMask
+						attr |= w.oldattr & foregroundMask
+					case 40 <= n && n <= 47:
+						attr &= foregroundMask
+						if (n-40)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-40)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-40)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					case n == 48: // set background color.
+						if i < len(token)-2 && token[i+1] == "5" {
+							if n256, err := strconv.Atoi(token[i+2]); err == nil {
+								if n256backAttr == nil {
+									n256setup()
+								}
+								attr &= foregroundMask
+								attr |= n256backAttr[n256]
+								i += 2
+							}
+						} else {
+							attr = attr & (w.oldattr & foregroundMask)
+						}
+					case n == 49: // reset foreground color.
+						attr &= foregroundMask
+						attr |= w.oldattr & backgroundMask
+					case 90 <= n && n <= 97:
+						attr = (attr & backgroundMask)
+						attr |= foregroundIntensity
+						if (n-90)&1 != 0 {
+							attr |= foregroundRed
+						}
+						if (n-90)&2 != 0 {
+							attr |= foregroundGreen
+						}
+						if (n-90)&4 != 0 {
+							attr |= foregroundBlue
+						}
+					case 100 <= n && n <= 107:
+						attr = (attr & foregroundMask)
+						attr |= backgroundIntensity
+						if (n-100)&1 != 0 {
+							attr |= backgroundRed
+						}
+						if (n-100)&2 != 0 {
+							attr |= backgroundGreen
+						}
+						if (n-100)&4 != 0 {
+							attr |= backgroundBlue
+						}
+					}
+					procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr))
+				}
+			}
+		case 'h':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 1
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 'l':
+			cs := buf.String()
+			if cs == "?25" {
+				var ci consoleCursorInfo
+				procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+				ci.visible = 0
+				procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci)))
+			}
+		case 's':
+			procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi)))
+			w.oldpos = csbi.cursorPosition
+		case 'u':
+			procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
+
+type consoleColor struct {
+	rgb       int
+	red       bool
+	green     bool
+	blue      bool
+	intensity bool
+}
+
+func (c consoleColor) foregroundAttr() (attr word) {
+	if c.red {
+		attr |= foregroundRed
+	}
+	if c.green {
+		attr |= foregroundGreen
+	}
+	if c.blue {
+		attr |= foregroundBlue
+	}
+	if c.intensity {
+		attr |= foregroundIntensity
+	}
+	return
+}
+
+func (c consoleColor) backgroundAttr() (attr word) {
+	if c.red {
+		attr |= backgroundRed
+	}
+	if c.green {
+		attr |= backgroundGreen
+	}
+	if c.blue {
+		attr |= backgroundBlue
+	}
+	if c.intensity {
+		attr |= backgroundIntensity
+	}
+	return
+}
+
+var color16 = []consoleColor{
+	consoleColor{0x000000, false, false, false, false},
+	consoleColor{0x000080, false, false, true, false},
+	consoleColor{0x008000, false, true, false, false},
+	consoleColor{0x008080, false, true, true, false},
+	consoleColor{0x800000, true, false, false, false},
+	consoleColor{0x800080, true, false, true, false},
+	consoleColor{0x808000, true, true, false, false},
+	consoleColor{0xc0c0c0, true, true, true, false},
+	consoleColor{0x808080, false, false, false, true},
+	consoleColor{0x0000ff, false, false, true, true},
+	consoleColor{0x00ff00, false, true, false, true},
+	consoleColor{0x00ffff, false, true, true, true},
+	consoleColor{0xff0000, true, false, false, true},
+	consoleColor{0xff00ff, true, false, true, true},
+	consoleColor{0xffff00, true, true, false, true},
+	consoleColor{0xffffff, true, true, true, true},
+}
+
+type hsv struct {
+	h, s, v float32
+}
+
+func (a hsv) dist(b hsv) float32 {
+	dh := a.h - b.h
+	switch {
+	case dh > 0.5:
+		dh = 1 - dh
+	case dh < -0.5:
+		dh = -1 - dh
+	}
+	ds := a.s - b.s
+	dv := a.v - b.v
+	return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv)))
+}
+
+func toHSV(rgb int) hsv {
+	r, g, b := float32((rgb&0xFF0000)>>16)/256.0,
+		float32((rgb&0x00FF00)>>8)/256.0,
+		float32(rgb&0x0000FF)/256.0
+	min, max := minmax3f(r, g, b)
+	h := max - min
+	if h > 0 {
+		if max == r {
+			h = (g - b) / h
+			if h < 0 {
+				h += 6
+			}
+		} else if max == g {
+			h = 2 + (b-r)/h
+		} else {
+			h = 4 + (r-g)/h
+		}
+	}
+	h /= 6.0
+	s := max - min
+	if max != 0 {
+		s /= max
+	}
+	v := max
+	return hsv{h: h, s: s, v: v}
+}
+
+type hsvTable []hsv
+
+func toHSVTable(rgbTable []consoleColor) hsvTable {
+	t := make(hsvTable, len(rgbTable))
+	for i, c := range rgbTable {
+		t[i] = toHSV(c.rgb)
+	}
+	return t
+}
+
+func (t hsvTable) find(rgb int) consoleColor {
+	hsv := toHSV(rgb)
+	n := 7
+	l := float32(5.0)
+	for i, p := range t {
+		d := hsv.dist(p)
+		if d < l {
+			l, n = d, i
+		}
+	}
+	return color16[n]
+}
+
+func minmax3f(a, b, c float32) (min, max float32) {
+	if a < b {
+		if b < c {
+			return a, c
+		} else if a < c {
+			return a, b
+		} else {
+			return c, b
+		}
+	} else {
+		if a < c {
+			return b, c
+		} else if b < c {
+			return b, a
+		} else {
+			return c, a
+		}
+	}
+}
+
+var n256foreAttr []word
+var n256backAttr []word
+
+func n256setup() {
+	n256foreAttr = make([]word, 256)
+	n256backAttr = make([]word, 256)
+	t := toHSVTable(color16)
+	for i, rgb := range color256 {
+		c := t.find(rgb)
+		n256foreAttr[i] = c.foregroundAttr()
+		n256backAttr[i] = c.backgroundAttr()
+	}
+}
diff --git a/automation/vendor/github.com/mattn/go-colorable/noncolorable.go b/automation/vendor/github.com/mattn/go-colorable/noncolorable.go
new file mode 100644
index 0000000..ca588c7
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-colorable/noncolorable.go
@@ -0,0 +1,61 @@
+package colorable
+
+import (
+	"bytes"
+	"io"
+)
+
+// NonColorable hold writer but remove escape sequence.
+type NonColorable struct {
+	out     io.Writer
+	lastbuf bytes.Buffer
+}
+
+// NewNonColorable return new instance of Writer which remove escape sequence from Writer.
+func NewNonColorable(w io.Writer) io.Writer {
+	return &NonColorable{out: w}
+}
+
+// Write write data on console
+func (w *NonColorable) Write(data []byte) (n int, err error) {
+	er := bytes.NewReader(data)
+	var bw [1]byte
+loop:
+	for {
+		c1, err := er.ReadByte()
+		if err != nil {
+			break loop
+		}
+		if c1 != 0x1b {
+			bw[0] = c1
+			w.out.Write(bw[:])
+			continue
+		}
+		c2, err := er.ReadByte()
+		if err != nil {
+			w.lastbuf.WriteByte(c1)
+			break loop
+		}
+		if c2 != 0x5b {
+			w.lastbuf.WriteByte(c1)
+			w.lastbuf.WriteByte(c2)
+			continue
+		}
+
+		var buf bytes.Buffer
+		for {
+			c, err := er.ReadByte()
+			if err != nil {
+				w.lastbuf.WriteByte(c1)
+				w.lastbuf.WriteByte(c2)
+				w.lastbuf.Write(buf.Bytes())
+				break loop
+			}
+			if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' {
+				break
+			}
+			buf.Write([]byte(string(c)))
+		}
+	}
+	return len(data) - w.lastbuf.Len(), nil
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/LICENSE b/automation/vendor/github.com/mattn/go-isatty/LICENSE
new file mode 100644
index 0000000..65dc692
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/LICENSE
@@ -0,0 +1,9 @@
+Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
+
+MIT License (Expat)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/automation/vendor/github.com/mattn/go-isatty/README.md b/automation/vendor/github.com/mattn/go-isatty/README.md
new file mode 100644
index 0000000..74845de
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/README.md
@@ -0,0 +1,37 @@
+# go-isatty
+
+isatty for golang
+
+## Usage
+
+```go
+package main
+
+import (
+	"fmt"
+	"github.com/mattn/go-isatty"
+	"os"
+)
+
+func main() {
+	if isatty.IsTerminal(os.Stdout.Fd()) {
+		fmt.Println("Is Terminal")
+	} else {
+		fmt.Println("Is Not Terminal")
+	}
+}
+```
+
+## Installation
+
+```
+$ go get github.com/mattn/go-isatty
+```
+
+# License
+
+MIT
+
+# Author
+
+Yasuhiro Matsumoto (a.k.a mattn)
diff --git a/automation/vendor/github.com/mattn/go-isatty/doc.go b/automation/vendor/github.com/mattn/go-isatty/doc.go
new file mode 100644
index 0000000..17d4f90
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/doc.go
@@ -0,0 +1,2 @@
+// Package isatty implements interface to isatty
+package isatty
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go
new file mode 100644
index 0000000..83c5887
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_appengine.go
@@ -0,0 +1,9 @@
+// +build appengine
+
+package isatty
+
+// IsTerminal returns true if the file descriptor is terminal which
+// is always false on on appengine classic which is a sandboxed PaaS.
+func IsTerminal(fd uintptr) bool {
+	return false
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go
new file mode 100644
index 0000000..42f2514
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_bsd.go
@@ -0,0 +1,18 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go b/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go
new file mode 100644
index 0000000..9d24bac
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_linux.go
@@ -0,0 +1,18 @@
+// +build linux
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+const ioctlReadTermios = syscall.TCGETS
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var termios syscall.Termios
+	_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+	return err == 0
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go
new file mode 100644
index 0000000..1f0c6bf
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_solaris.go
@@ -0,0 +1,16 @@
+// +build solaris
+// +build !appengine
+
+package isatty
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c
+func IsTerminal(fd uintptr) bool {
+	var termio unix.Termio
+	err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio)
+	return err == nil
+}
diff --git a/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go b/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go
new file mode 100644
index 0000000..83c398b
--- /dev/null
+++ b/automation/vendor/github.com/mattn/go-isatty/isatty_windows.go
@@ -0,0 +1,19 @@
+// +build windows
+// +build !appengine
+
+package isatty
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+
+// IsTerminal return true if the file descriptor is terminal.
+func IsTerminal(fd uintptr) bool {
+	var st uint32
+	r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0)
+	return r != 0 && e == 0
+}