VOL-2496 Add "event listen" command to voltctl

Change-Id: I8f1fb34b55f56c8125142ac289e2f19fc170d804
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
new file mode 100644
index 0000000..83c8f82
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.gitignore
@@ -0,0 +1,9 @@
+*.[68]
+*.a
+*.out
+*.swp
+_obj
+_testmain.go
+cmd/metrics-bench/metrics-bench
+cmd/metrics-example/metrics-example
+cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
new file mode 100644
index 0000000..117763e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/.travis.yml
@@ -0,0 +1,17 @@
+language: go
+
+go:
+    - 1.3
+    - 1.4
+    - 1.5
+    - 1.6
+    - 1.7
+    - 1.8
+    - 1.9
+
+script:
+    - ./validate.sh
+
+# this should give us faster builds according to 
+# http://docs.travis-ci.com/user/migrating-from-legacy/
+sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
new file mode 100644
index 0000000..363fa9e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/LICENSE
@@ -0,0 +1,29 @@
+Copyright 2012 Richard Crowley. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    1.  Redistributions of source code must retain the above copyright
+        notice, this list of conditions and the following disclaimer.
+
+    2.  Redistributions in binary form must reproduce the above
+        copyright notice, this list of conditions and the following
+        disclaimer in the documentation and/or other materials provided
+        with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation
+are those of the authors and should not be interpreted as representing
+official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
new file mode 100644
index 0000000..b7356b5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/README.md
@@ -0,0 +1,168 @@
+go-metrics
+==========
+
+![travis build status](https://travis-ci.org/rcrowley/go-metrics.svg?branch=master)
+
+Go port of Coda Hale's Metrics library: <https://github.com/dropwizard/metrics>.
+
+Documentation: <http://godoc.org/github.com/rcrowley/go-metrics>.
+
+Usage
+-----
+
+Create and update metrics:
+
+```go
+c := metrics.NewCounter()
+metrics.Register("foo", c)
+c.Inc(47)
+
+g := metrics.NewGauge()
+metrics.Register("bar", g)
+g.Update(47)
+
+r := NewRegistry()
+g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
+
+s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
+h := metrics.NewHistogram(s)
+metrics.Register("baz", h)
+h.Update(47)
+
+m := metrics.NewMeter()
+metrics.Register("quux", m)
+m.Mark(47)
+
+t := metrics.NewTimer()
+metrics.Register("bang", t)
+t.Time(func() {})
+t.Update(47)
+```
+
+Register() is not threadsafe. For threadsafe metric registration use
+GetOrRegister:
+
+```go
+t := metrics.GetOrRegisterTimer("account.create.latency", nil)
+t.Time(func() {})
+t.Update(47)
+```
+
+**NOTE:** Be sure to unregister short-lived meters and timers otherwise they will
+leak memory:
+
+```go
+// Will call Stop() on the Meter to allow for garbage collection
+metrics.Unregister("quux")
+// Or similarly for a Timer that embeds a Meter
+metrics.Unregister("bang")
+```
+
+Periodically log every metric in human-readable form to standard error:
+
+```go
+go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
+```
+
+Periodically log every metric in slightly-more-parseable form to syslog:
+
+```go
+w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
+go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
+```
+
+Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
+
+```go
+
+import "github.com/cyberdelia/go-metrics-graphite"
+
+addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
+go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
+```
+
+Periodically emit every metric into InfluxDB:
+
+**NOTE:** this has been pulled out of the library due to constant fluctuations
+in the InfluxDB API. In fact, all client libraries are on their way out. see
+issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
+[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
+
+```go
+import "github.com/vrischmann/go-metrics-influxdb"
+
+go influxdb.InfluxDB(metrics.DefaultRegistry,
+  10e9, 
+  "127.0.0.1:8086", 
+  "database-name", 
+  "username", 
+  "password"
+)
+```
+
+Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
+
+**Note**: the client included with this repository under the `librato` package
+has been deprecated and moved to the repository linked above.
+
+```go
+import "github.com/mihasya/go-metrics-librato"
+
+go librato.Librato(metrics.DefaultRegistry,
+    10e9,                  // interval
+    "example@example.com", // account owner email address
+    "token",               // Librato API token
+    "hostname",            // source
+    []float64{0.95},       // percentiles to send
+    time.Millisecond,      // time unit
+)
+```
+
+Periodically emit every metric to StatHat:
+
+```go
+import "github.com/rcrowley/go-metrics/stathat"
+
+go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
+```
+
+Maintain all metrics along with expvars at `/debug/metrics`:
+
+This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
+but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
+as well as all your go-metrics.
+
+
+```go
+import "github.com/rcrowley/go-metrics/exp"
+
+exp.Exp(metrics.DefaultRegistry)
+```
+
+Installation
+------------
+
+```sh
+go get github.com/rcrowley/go-metrics
+```
+
+StatHat support additionally requires their Go client:
+
+```sh
+go get github.com/stathat/go
+```
+
+Publishing Metrics
+------------------
+
+Clients are available for the following destinations:
+
+* Librato - https://github.com/mihasya/go-metrics-librato
+* Graphite - https://github.com/cyberdelia/go-metrics-graphite
+* InfluxDB - https://github.com/vrischmann/go-metrics-influxdb
+* Ganglia - https://github.com/appscode/metlia
+* Prometheus - https://github.com/deathowl/go-metrics-prometheus
+* DataDog - https://github.com/syntaqx/go-metrics-datadog
+* SignalFX - https://github.com/pascallouisperez/go-metrics-signalfx
+* Honeycomb - https://github.com/getspine/go-metrics-honeycomb
+* Wavefront - https://github.com/wavefrontHQ/go-metrics-wavefront
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
new file mode 100644
index 0000000..bb7b039
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/counter.go
@@ -0,0 +1,112 @@
+package metrics
+
+import "sync/atomic"
+
+// Counters hold an int64 value that can be incremented and decremented.
+type Counter interface {
+	Clear()
+	Count() int64
+	Dec(int64)
+	Inc(int64)
+	Snapshot() Counter
+}
+
+// GetOrRegisterCounter returns an existing Counter or constructs and registers
+// a new StandardCounter.
+func GetOrRegisterCounter(name string, r Registry) Counter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewCounter).(Counter)
+}
+
+// NewCounter constructs a new StandardCounter.
+func NewCounter() Counter {
+	if UseNilMetrics {
+		return NilCounter{}
+	}
+	return &StandardCounter{0}
+}
+
+// NewRegisteredCounter constructs and registers a new StandardCounter.
+func NewRegisteredCounter(name string, r Registry) Counter {
+	c := NewCounter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// CounterSnapshot is a read-only copy of another Counter.
+type CounterSnapshot int64
+
+// Clear panics.
+func (CounterSnapshot) Clear() {
+	panic("Clear called on a CounterSnapshot")
+}
+
+// Count returns the count at the time the snapshot was taken.
+func (c CounterSnapshot) Count() int64 { return int64(c) }
+
+// Dec panics.
+func (CounterSnapshot) Dec(int64) {
+	panic("Dec called on a CounterSnapshot")
+}
+
+// Inc panics.
+func (CounterSnapshot) Inc(int64) {
+	panic("Inc called on a CounterSnapshot")
+}
+
+// Snapshot returns the snapshot.
+func (c CounterSnapshot) Snapshot() Counter { return c }
+
+// NilCounter is a no-op Counter.
+type NilCounter struct{}
+
+// Clear is a no-op.
+func (NilCounter) Clear() {}
+
+// Count is a no-op.
+func (NilCounter) Count() int64 { return 0 }
+
+// Dec is a no-op.
+func (NilCounter) Dec(i int64) {}
+
+// Inc is a no-op.
+func (NilCounter) Inc(i int64) {}
+
+// Snapshot is a no-op.
+func (NilCounter) Snapshot() Counter { return NilCounter{} }
+
+// StandardCounter is the standard implementation of a Counter and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardCounter struct {
+	count int64
+}
+
+// Clear sets the counter to zero.
+func (c *StandardCounter) Clear() {
+	atomic.StoreInt64(&c.count, 0)
+}
+
+// Count returns the current count.
+func (c *StandardCounter) Count() int64 {
+	return atomic.LoadInt64(&c.count)
+}
+
+// Dec decrements the counter by the given amount.
+func (c *StandardCounter) Dec(i int64) {
+	atomic.AddInt64(&c.count, -i)
+}
+
+// Inc increments the counter by the given amount.
+func (c *StandardCounter) Inc(i int64) {
+	atomic.AddInt64(&c.count, i)
+}
+
+// Snapshot returns a read-only copy of the counter.
+func (c *StandardCounter) Snapshot() Counter {
+	return CounterSnapshot(c.Count())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
new file mode 100644
index 0000000..043ccef
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/debug.go
@@ -0,0 +1,76 @@
+package metrics
+
+import (
+	"runtime/debug"
+	"time"
+)
+
+var (
+	debugMetrics struct {
+		GCStats struct {
+			LastGC Gauge
+			NumGC  Gauge
+			Pause  Histogram
+			//PauseQuantiles Histogram
+			PauseTotal Gauge
+		}
+		ReadGCStats Timer
+	}
+	gcStats debug.GCStats
+)
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called as a goroutine.
+func CaptureDebugGCStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureDebugGCStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go garbage collector statistics exported in
+// debug.GCStats.  This is designed to be called in a background goroutine.
+// Giving a registry which has not been given to RegisterDebugGCStats will
+// panic.
+//
+// Be careful (but much less so) with this because debug.ReadGCStats calls
+// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
+// operation, isn't something you want to be doing all the time.
+func CaptureDebugGCStatsOnce(r Registry) {
+	lastGC := gcStats.LastGC
+	t := time.Now()
+	debug.ReadGCStats(&gcStats)
+	debugMetrics.ReadGCStats.UpdateSince(t)
+
+	debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
+	debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
+	if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
+		debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
+	}
+	//debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
+	debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
+}
+
+// Register metrics for the Go garbage collector statistics exported in
+// debug.GCStats.  The metrics are named by their fully-qualified Go symbols,
+// i.e. debug.GCStats.PauseTotal.
+func RegisterDebugGCStats(r Registry) {
+	debugMetrics.GCStats.LastGC = NewGauge()
+	debugMetrics.GCStats.NumGC = NewGauge()
+	debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
+	//debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
+	debugMetrics.GCStats.PauseTotal = NewGauge()
+	debugMetrics.ReadGCStats = NewTimer()
+
+	r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
+	r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
+	r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
+	//r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
+	r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
+	r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
+}
+
+// Allocate an initial slice for gcStats.Pause to avoid allocations during
+// normal operation.
+func init() {
+	gcStats.Pause = make([]time.Duration, 11)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
new file mode 100644
index 0000000..a8183dd
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/ewma.go
@@ -0,0 +1,138 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+)
+
+// EWMAs continuously calculate an exponentially-weighted moving average
+// based on an outside source of clock ticks.
+type EWMA interface {
+	Rate() float64
+	Snapshot() EWMA
+	Tick()
+	Update(int64)
+}
+
+// NewEWMA constructs a new EWMA with the given alpha.
+func NewEWMA(alpha float64) EWMA {
+	if UseNilMetrics {
+		return NilEWMA{}
+	}
+	return &StandardEWMA{alpha: alpha}
+}
+
+// NewEWMA1 constructs a new EWMA for a one-minute moving average.
+func NewEWMA1() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/1))
+}
+
+// NewEWMA5 constructs a new EWMA for a five-minute moving average.
+func NewEWMA5() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/5))
+}
+
+// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
+func NewEWMA15() EWMA {
+	return NewEWMA(1 - math.Exp(-5.0/60.0/15))
+}
+
+// EWMASnapshot is a read-only copy of another EWMA.
+type EWMASnapshot float64
+
+// Rate returns the rate of events per second at the time the snapshot was
+// taken.
+func (a EWMASnapshot) Rate() float64 { return float64(a) }
+
+// Snapshot returns the snapshot.
+func (a EWMASnapshot) Snapshot() EWMA { return a }
+
+// Tick panics.
+func (EWMASnapshot) Tick() {
+	panic("Tick called on an EWMASnapshot")
+}
+
+// Update panics.
+func (EWMASnapshot) Update(int64) {
+	panic("Update called on an EWMASnapshot")
+}
+
+// NilEWMA is a no-op EWMA.
+type NilEWMA struct{}
+
+// Rate is a no-op.
+func (NilEWMA) Rate() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
+
+// Tick is a no-op.
+func (NilEWMA) Tick() {}
+
+// Update is a no-op.
+func (NilEWMA) Update(n int64) {}
+
+// StandardEWMA is the standard implementation of an EWMA and tracks the number
+// of uncounted events and processes them on each tick.  It uses the
+// sync/atomic package to manage uncounted events.
+type StandardEWMA struct {
+	uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
+	alpha     float64
+	rate      uint64
+	init      uint32
+	mutex     sync.Mutex
+}
+
+// Rate returns the moving average rate of events per second.
+func (a *StandardEWMA) Rate() float64 {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate)) * float64(1e9)
+	return currentRate
+}
+
+// Snapshot returns a read-only copy of the EWMA.
+func (a *StandardEWMA) Snapshot() EWMA {
+	return EWMASnapshot(a.Rate())
+}
+
+// Tick ticks the clock to update the moving average.  It assumes it is called
+// every five seconds.
+func (a *StandardEWMA) Tick() {
+	// Optimization to avoid mutex locking in the hot-path.
+	if atomic.LoadUint32(&a.init) == 1 {
+		a.updateRate(a.fetchInstantRate())
+	} else {
+		// Slow-path: this is only needed on the first Tick() and preserves transactional updating
+		// of init and rate in the else block. The first conditional is needed below because
+		// a different thread could have set a.init = 1 between the time of the first atomic load and when
+		// the lock was acquired.
+		a.mutex.Lock()
+		if atomic.LoadUint32(&a.init) == 1 {
+			// The fetchInstantRate() uses atomic loading, which is unecessary in this critical section
+			// but again, this section is only invoked on the first successful Tick() operation.
+			a.updateRate(a.fetchInstantRate())
+		} else {
+			atomic.StoreUint32(&a.init, 1)
+			atomic.StoreUint64(&a.rate, math.Float64bits(a.fetchInstantRate()))
+		}
+		a.mutex.Unlock()
+	}
+}
+
+func (a *StandardEWMA) fetchInstantRate() float64 {
+	count := atomic.LoadInt64(&a.uncounted)
+	atomic.AddInt64(&a.uncounted, -count)
+	instantRate := float64(count) / float64(5e9)
+	return instantRate
+}
+
+func (a *StandardEWMA) updateRate(instantRate float64) {
+	currentRate := math.Float64frombits(atomic.LoadUint64(&a.rate))
+	currentRate += a.alpha * (instantRate - currentRate)
+	atomic.StoreUint64(&a.rate, math.Float64bits(currentRate))
+}
+
+// Update adds n uncounted events.
+func (a *StandardEWMA) Update(n int64) {
+	atomic.AddInt64(&a.uncounted, n)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
new file mode 100644
index 0000000..cb57a93
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge.go
@@ -0,0 +1,120 @@
+package metrics
+
+import "sync/atomic"
+
+// Gauges hold an int64 value that can be set arbitrarily.
+type Gauge interface {
+	Snapshot() Gauge
+	Update(int64)
+	Value() int64
+}
+
+// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
+// new StandardGauge.
+func GetOrRegisterGauge(name string, r Registry) Gauge {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGauge).(Gauge)
+}
+
+// NewGauge constructs a new StandardGauge.
+func NewGauge() Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &StandardGauge{0}
+}
+
+// NewRegisteredGauge constructs and registers a new StandardGauge.
+func NewRegisteredGauge(name string, r Registry) Gauge {
+	c := NewGauge()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGauge(f func() int64) Gauge {
+	if UseNilMetrics {
+		return NilGauge{}
+	}
+	return &FunctionalGauge{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
+	c := NewFunctionalGauge(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeSnapshot is a read-only copy of another Gauge.
+type GaugeSnapshot int64
+
+// Snapshot returns the snapshot.
+func (g GaugeSnapshot) Snapshot() Gauge { return g }
+
+// Update panics.
+func (GaugeSnapshot) Update(int64) {
+	panic("Update called on a GaugeSnapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeSnapshot) Value() int64 { return int64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGauge struct{}
+
+// Snapshot is a no-op.
+func (NilGauge) Snapshot() Gauge { return NilGauge{} }
+
+// Update is a no-op.
+func (NilGauge) Update(v int64) {}
+
+// Value is a no-op.
+func (NilGauge) Value() int64 { return 0 }
+
+// StandardGauge is the standard implementation of a Gauge and uses the
+// sync/atomic package to manage a single int64 value.
+type StandardGauge struct {
+	value int64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGauge) Snapshot() Gauge {
+	return GaugeSnapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGauge) Update(v int64) {
+	atomic.StoreInt64(&g.value, v)
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGauge) Value() int64 {
+	return atomic.LoadInt64(&g.value)
+}
+
+// FunctionalGauge returns value from given function
+type FunctionalGauge struct {
+	value func() int64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGauge) Value() int64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGauge) Update(int64) {
+	panic("Update called on a FunctionalGauge")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
new file mode 100644
index 0000000..3962e6d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
@@ -0,0 +1,125 @@
+package metrics
+
+import (
+	"math"
+	"sync/atomic"
+)
+
+// GaugeFloat64s hold a float64 value that can be set arbitrarily.
+type GaugeFloat64 interface {
+	Snapshot() GaugeFloat64
+	Update(float64)
+	Value() float64
+}
+
+// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
+// new StandardGaugeFloat64.
+func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
+}
+
+// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
+func NewGaugeFloat64() GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &StandardGaugeFloat64{
+		value: 0.0,
+	}
+}
+
+// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
+func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
+	c := NewGaugeFloat64()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewFunctionalGauge constructs a new FunctionalGauge.
+func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
+	if UseNilMetrics {
+		return NilGaugeFloat64{}
+	}
+	return &FunctionalGaugeFloat64{value: f}
+}
+
+// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
+func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
+	c := NewFunctionalGaugeFloat64(f)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
+type GaugeFloat64Snapshot float64
+
+// Snapshot returns the snapshot.
+func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
+
+// Update panics.
+func (GaugeFloat64Snapshot) Update(float64) {
+	panic("Update called on a GaugeFloat64Snapshot")
+}
+
+// Value returns the value at the time the snapshot was taken.
+func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
+
+// NilGauge is a no-op Gauge.
+type NilGaugeFloat64 struct{}
+
+// Snapshot is a no-op.
+func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
+
+// Update is a no-op.
+func (NilGaugeFloat64) Update(v float64) {}
+
+// Value is a no-op.
+func (NilGaugeFloat64) Value() float64 { return 0.0 }
+
+// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
+// sync.Mutex to manage a single float64 value.
+type StandardGaugeFloat64 struct {
+	value uint64
+}
+
+// Snapshot returns a read-only copy of the gauge.
+func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
+	return GaugeFloat64Snapshot(g.Value())
+}
+
+// Update updates the gauge's value.
+func (g *StandardGaugeFloat64) Update(v float64) {
+	atomic.StoreUint64(&g.value, math.Float64bits(v))
+}
+
+// Value returns the gauge's current value.
+func (g *StandardGaugeFloat64) Value() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&g.value))
+}
+
+// FunctionalGaugeFloat64 returns value from given function
+type FunctionalGaugeFloat64 struct {
+	value func() float64
+}
+
+// Value returns the gauge's current value.
+func (g FunctionalGaugeFloat64) Value() float64 {
+	return g.value()
+}
+
+// Snapshot returns the snapshot.
+func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
+
+// Update panics.
+func (FunctionalGaugeFloat64) Update(float64) {
+	panic("Update called on a FunctionalGaugeFloat64")
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
new file mode 100644
index 0000000..abd0a7d
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/graphite.go
@@ -0,0 +1,113 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// GraphiteConfig provides a container with configuration parameters for
+// the Graphite exporter
+type GraphiteConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+	Percentiles   []float64     // Percentiles to export from timers and histograms
+}
+
+// Graphite is a blocking exporter function which reports metrics in r
+// to a graphite server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	GraphiteWithConfig(GraphiteConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+		Percentiles:   []float64{0.5, 0.75, 0.95, 0.99, 0.999},
+	})
+}
+
+// GraphiteWithConfig is a blocking exporter function just like Graphite,
+// but it takes a GraphiteConfig instead.
+func GraphiteWithConfig(c GraphiteConfig) {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := graphite(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+// GraphiteOnce performs a single submission to Graphite, returning a
+// non-nil error on failed connections. This can be used in a loop
+// similar to GraphiteWithConfig for custom error handling.
+func GraphiteOnce(c GraphiteConfig) error {
+	log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
+	return graphite(&c)
+}
+
+func graphite(c *GraphiteConfig) error {
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
+		case Gauge:
+			fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles(c.Percentiles)
+			fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
+			fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
+			fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
+			fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
+			for psIdx, psKey := range c.Percentiles {
+				key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
+				fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
+			}
+			fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
+			fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
+			fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
+			fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
new file mode 100644
index 0000000..445131c
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
@@ -0,0 +1,61 @@
+package metrics
+
+// Healthchecks hold an error value describing an arbitrary up/down status.
+type Healthcheck interface {
+	Check()
+	Error() error
+	Healthy()
+	Unhealthy(error)
+}
+
+// NewHealthcheck constructs a new Healthcheck which will use the given
+// function to update its status.
+func NewHealthcheck(f func(Healthcheck)) Healthcheck {
+	if UseNilMetrics {
+		return NilHealthcheck{}
+	}
+	return &StandardHealthcheck{nil, f}
+}
+
+// NilHealthcheck is a no-op.
+type NilHealthcheck struct{}
+
+// Check is a no-op.
+func (NilHealthcheck) Check() {}
+
+// Error is a no-op.
+func (NilHealthcheck) Error() error { return nil }
+
+// Healthy is a no-op.
+func (NilHealthcheck) Healthy() {}
+
+// Unhealthy is a no-op.
+func (NilHealthcheck) Unhealthy(error) {}
+
+// StandardHealthcheck is the standard implementation of a Healthcheck and
+// stores the status and a function to call to update the status.
+type StandardHealthcheck struct {
+	err error
+	f   func(Healthcheck)
+}
+
+// Check runs the healthcheck function to update the healthcheck's status.
+func (h *StandardHealthcheck) Check() {
+	h.f(h)
+}
+
+// Error returns the healthcheck's status, which will be nil if it is healthy.
+func (h *StandardHealthcheck) Error() error {
+	return h.err
+}
+
+// Healthy marks the healthcheck as healthy.
+func (h *StandardHealthcheck) Healthy() {
+	h.err = nil
+}
+
+// Unhealthy marks the healthcheck as unhealthy.  The error is stored and
+// may be retrieved by the Error method.
+func (h *StandardHealthcheck) Unhealthy(err error) {
+	h.err = err
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
new file mode 100644
index 0000000..dbc837f
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/histogram.go
@@ -0,0 +1,202 @@
+package metrics
+
+// Histograms calculate distribution statistics from a series of int64 values.
+type Histogram interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Sample() Sample
+	Snapshot() Histogram
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Variance() float64
+}
+
+// GetOrRegisterHistogram returns an existing Histogram or constructs and
+// registers a new StandardHistogram.
+func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
+}
+
+// NewHistogram constructs a new StandardHistogram from a Sample.
+func NewHistogram(s Sample) Histogram {
+	if UseNilMetrics {
+		return NilHistogram{}
+	}
+	return &StandardHistogram{sample: s}
+}
+
+// NewRegisteredHistogram constructs and registers a new StandardHistogram from
+// a Sample.
+func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
+	c := NewHistogram(s)
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// HistogramSnapshot is a read-only copy of another Histogram.
+type HistogramSnapshot struct {
+	sample *SampleSnapshot
+}
+
+// Clear panics.
+func (*HistogramSnapshot) Clear() {
+	panic("Clear called on a HistogramSnapshot")
+}
+
+// Count returns the number of samples recorded at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample at the time the snapshot
+// was taken.
+func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample at the time the snapshot was
+// taken.
+func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the sample
+// at the time the snapshot was taken.
+func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *HistogramSnapshot) Sample() Sample { return h.sample }
+
+// Snapshot returns the snapshot.
+func (h *HistogramSnapshot) Snapshot() Histogram { return h }
+
+// StdDev returns the standard deviation of the values in the sample at the
+// time the snapshot was taken.
+func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample at the time the snapshot was taken.
+func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
+
+// Update panics.
+func (*HistogramSnapshot) Update(int64) {
+	panic("Update called on a HistogramSnapshot")
+}
+
+// Variance returns the variance of inputs at the time the snapshot was taken.
+func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
+
+// NilHistogram is a no-op Histogram.
+type NilHistogram struct{}
+
+// Clear is a no-op.
+func (NilHistogram) Clear() {}
+
+// Count is a no-op.
+func (NilHistogram) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilHistogram) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilHistogram) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilHistogram) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilHistogram) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Sample is a no-op.
+func (NilHistogram) Sample() Sample { return NilSample{} }
+
+// Snapshot is a no-op.
+func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
+
+// StdDev is a no-op.
+func (NilHistogram) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilHistogram) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilHistogram) Update(v int64) {}
+
+// Variance is a no-op.
+func (NilHistogram) Variance() float64 { return 0.0 }
+
+// StandardHistogram is the standard implementation of a Histogram and uses a
+// Sample to bound its memory use.
+type StandardHistogram struct {
+	sample Sample
+}
+
+// Clear clears the histogram and its sample.
+func (h *StandardHistogram) Clear() { h.sample.Clear() }
+
+// Count returns the number of samples recorded since the histogram was last
+// cleared.
+func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
+
+// Max returns the maximum value in the sample.
+func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
+
+// Mean returns the mean of the values in the sample.
+func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
+
+// Min returns the minimum value in the sample.
+func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (h *StandardHistogram) Percentile(p float64) float64 {
+	return h.sample.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
+	return h.sample.Percentiles(ps)
+}
+
+// Sample returns the Sample underlying the histogram.
+func (h *StandardHistogram) Sample() Sample { return h.sample }
+
+// Snapshot returns a read-only copy of the histogram.
+func (h *StandardHistogram) Snapshot() Histogram {
+	return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
+
+// Sum returns the sum in the sample.
+func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
+
+// Update samples a new value.
+func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
+
+// Variance returns the variance of the values in the sample.
+func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
new file mode 100644
index 0000000..174b947
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/json.go
@@ -0,0 +1,31 @@
+package metrics
+
+import (
+	"encoding/json"
+	"io"
+	"time"
+)
+
+// MarshalJSON returns a byte slice containing a JSON representation of all
+// the metrics in the Registry.
+func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(r.GetAll())
+}
+
+// WriteJSON writes metrics from the given registry  periodically to the
+// specified io.Writer as JSON.
+func WriteJSON(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteJSONOnce(r, w)
+	}
+}
+
+// WriteJSONOnce writes metrics from the given registry to the specified
+// io.Writer as JSON.
+func WriteJSONOnce(r Registry, w io.Writer) {
+	json.NewEncoder(w).Encode(r)
+}
+
+func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
+	return json.Marshal(p.GetAll())
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
new file mode 100644
index 0000000..f8074c0
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/log.go
@@ -0,0 +1,80 @@
+package metrics
+
+import (
+	"time"
+)
+
+type Logger interface {
+	Printf(format string, v ...interface{})
+}
+
+func Log(r Registry, freq time.Duration, l Logger) {
+	LogScaled(r, freq, time.Nanosecond, l)
+}
+
+// Output each metric in the given registry periodically using the given
+// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
+func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
+	du := float64(scale)
+	duSuffix := scale.String()[1:]
+
+	for _ = range time.Tick(freq) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				l.Printf("counter %s\n", name)
+				l.Printf("  count:       %9d\n", metric.Count())
+			case Gauge:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %9d\n", metric.Value())
+			case GaugeFloat64:
+				l.Printf("gauge %s\n", name)
+				l.Printf("  value:       %f\n", metric.Value())
+			case Healthcheck:
+				metric.Check()
+				l.Printf("healthcheck %s\n", name)
+				l.Printf("  error:       %v\n", metric.Error())
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("histogram %s\n", name)
+				l.Printf("  count:       %9d\n", h.Count())
+				l.Printf("  min:         %9d\n", h.Min())
+				l.Printf("  max:         %9d\n", h.Max())
+				l.Printf("  mean:        %12.2f\n", h.Mean())
+				l.Printf("  stddev:      %12.2f\n", h.StdDev())
+				l.Printf("  median:      %12.2f\n", ps[0])
+				l.Printf("  75%%:         %12.2f\n", ps[1])
+				l.Printf("  95%%:         %12.2f\n", ps[2])
+				l.Printf("  99%%:         %12.2f\n", ps[3])
+				l.Printf("  99.9%%:       %12.2f\n", ps[4])
+			case Meter:
+				m := metric.Snapshot()
+				l.Printf("meter %s\n", name)
+				l.Printf("  count:       %9d\n", m.Count())
+				l.Printf("  1-min rate:  %12.2f\n", m.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", m.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", m.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", m.RateMean())
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				l.Printf("timer %s\n", name)
+				l.Printf("  count:       %9d\n", t.Count())
+				l.Printf("  min:         %12.2f%s\n", float64(t.Min())/du, duSuffix)
+				l.Printf("  max:         %12.2f%s\n", float64(t.Max())/du, duSuffix)
+				l.Printf("  mean:        %12.2f%s\n", t.Mean()/du, duSuffix)
+				l.Printf("  stddev:      %12.2f%s\n", t.StdDev()/du, duSuffix)
+				l.Printf("  median:      %12.2f%s\n", ps[0]/du, duSuffix)
+				l.Printf("  75%%:         %12.2f%s\n", ps[1]/du, duSuffix)
+				l.Printf("  95%%:         %12.2f%s\n", ps[2]/du, duSuffix)
+				l.Printf("  99%%:         %12.2f%s\n", ps[3]/du, duSuffix)
+				l.Printf("  99.9%%:       %12.2f%s\n", ps[4]/du, duSuffix)
+				l.Printf("  1-min rate:  %12.2f\n", t.Rate1())
+				l.Printf("  5-min rate:  %12.2f\n", t.Rate5())
+				l.Printf("  15-min rate: %12.2f\n", t.Rate15())
+				l.Printf("  mean rate:   %12.2f\n", t.RateMean())
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
new file mode 100644
index 0000000..47454f5
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/memory.md
@@ -0,0 +1,285 @@
+Memory usage
+============
+
+(Highly unscientific.)
+
+Command used to gather static memory usage:
+
+```sh
+grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
+```
+
+Program used to gather baseline memory usage:
+
+```go
+package main
+
+import "time"
+
+func main() {
+	time.Sleep(600e9)
+}
+```
+
+Baseline
+--------
+
+```
+VmPeak:    42604 kB
+VmSize:    42604 kB
+VmLck:         0 kB
+VmHWM:      1120 kB
+VmRSS:      1120 kB
+VmData:    35460 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        36 kB
+VmSwap:        0 kB
+```
+
+Program used to gather metric memory usage (with other metrics being similar):
+
+```go
+package main
+
+import (
+	"fmt"
+	"metrics"
+	"time"
+)
+
+func main() {
+	fmt.Sprintf("foo")
+	metrics.NewRegistry()
+	time.Sleep(600e9)
+}
+```
+
+1000 counters registered
+------------------------
+
+```
+VmPeak:    44016 kB
+VmSize:    44016 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.412 kB virtual, TODO 0.808 kB resident per counter.**
+
+100000 counters registered
+--------------------------
+
+```
+VmPeak:    55024 kB
+VmSize:    55024 kB
+VmLck:         0 kB
+VmHWM:     12440 kB
+VmRSS:     12440 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1024 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**0.1242 kB virtual, 0.1132 kB resident per counter.**
+
+1000 gauges registered
+----------------------
+
+```
+VmPeak:    44012 kB
+VmSize:    44012 kB
+VmLck:         0 kB
+VmHWM:      1928 kB
+VmRSS:      1928 kB
+VmData:    36868 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        40 kB
+VmSwap:        0 kB
+```
+
+**1.408 kB virtual, 0.808 kB resident per counter.**
+
+100000 gauges registered
+------------------------
+
+```
+VmPeak:    55020 kB
+VmSize:    55020 kB
+VmLck:         0 kB
+VmHWM:     12432 kB
+VmRSS:     12432 kB
+VmData:    47876 kB
+VmStk:       136 kB
+VmExe:      1020 kB
+VmLib:      1848 kB
+VmPTE:        60 kB
+VmSwap:        0 kB
+```
+
+**0.12416 kB virtual, 0.11312 resident per gauge.**
+
+1000 histograms with a uniform sample size of 1028
+--------------------------------------------------
+
+```
+VmPeak:    72272 kB
+VmSize:    72272 kB
+VmLck:         0 kB
+VmHWM:     16204 kB
+VmRSS:     16204 kB
+VmData:    65100 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        80 kB
+VmSwap:        0 kB
+```
+
+**29.668 kB virtual, TODO 15.084 resident per histogram.**
+
+10000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   256912 kB
+VmSize:   256912 kB
+VmLck:         0 kB
+VmHWM:    146204 kB
+VmRSS:    146204 kB
+VmData:   249740 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       448 kB
+VmSwap:        0 kB
+```
+
+**21.4308 kB virtual, 14.5084 kB resident per histogram.**
+
+50000 histograms with a uniform sample size of 1028
+---------------------------------------------------
+
+```
+VmPeak:   908112 kB
+VmSize:   908112 kB
+VmLck:         0 kB
+VmHWM:    645832 kB
+VmRSS:    645588 kB
+VmData:   900940 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1716 kB
+VmSwap:     1544 kB
+```
+
+**17.31016 kB virtual, 12.88936 kB resident per histogram.**
+
+1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+-------------------------------------------------------------------------------------
+
+```
+VmPeak:    62480 kB
+VmSize:    62480 kB
+VmLck:         0 kB
+VmHWM:     11572 kB
+VmRSS:     11572 kB
+VmData:    55308 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:        64 kB
+VmSwap:        0 kB
+```
+
+**19.876 kB virtual, 10.452 kB resident per histogram.**
+
+10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   153296 kB
+VmSize:   153296 kB
+VmLck:         0 kB
+VmHWM:    101176 kB
+VmRSS:    101176 kB
+VmData:   146124 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:       240 kB
+VmSwap:        0 kB
+```
+
+**11.0692 kB virtual, 10.0056 kB resident per histogram.**
+
+50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
+--------------------------------------------------------------------------------------
+
+```
+VmPeak:   557264 kB
+VmSize:   557264 kB
+VmLck:         0 kB
+VmHWM:    501056 kB
+VmRSS:    501056 kB
+VmData:   550092 kB
+VmStk:       136 kB
+VmExe:      1048 kB
+VmLib:      1848 kB
+VmPTE:      1032 kB
+VmSwap:        0 kB
+```
+
+**10.2932 kB virtual, 9.99872 kB resident per histogram.**
+
+1000 meters
+-----------
+
+```
+VmPeak:    74504 kB
+VmSize:    74504 kB
+VmLck:         0 kB
+VmHWM:     24124 kB
+VmRSS:     24124 kB
+VmData:    67340 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:        92 kB
+VmSwap:        0 kB
+```
+
+**31.9 kB virtual, 23.004 kB resident per meter.**
+
+10000 meters
+------------
+
+```
+VmPeak:   278920 kB
+VmSize:   278920 kB
+VmLck:         0 kB
+VmHWM:    227300 kB
+VmRSS:    227300 kB
+VmData:   271756 kB
+VmStk:       136 kB
+VmExe:      1040 kB
+VmLib:      1848 kB
+VmPTE:       488 kB
+VmSwap:        0 kB
+```
+
+**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
new file mode 100644
index 0000000..223669b
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/meter.go
@@ -0,0 +1,251 @@
+package metrics
+
+import (
+	"math"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// Meters count events to produce exponentially-weighted moving average rates
+// at one-, five-, and fifteen-minutes and a mean rate.
+type Meter interface {
+	Count() int64
+	Mark(int64)
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Meter
+	Stop()
+}
+
+// GetOrRegisterMeter returns an existing Meter or constructs and registers a
+// new StandardMeter.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterMeter(name string, r Registry) Meter {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewMeter).(Meter)
+}
+
+// NewMeter constructs a new StandardMeter and launches a goroutine.
+// Be sure to call Stop() once the meter is of no use to allow for garbage collection.
+func NewMeter() Meter {
+	if UseNilMetrics {
+		return NilMeter{}
+	}
+	m := newStandardMeter()
+	arbiter.Lock()
+	defer arbiter.Unlock()
+	arbiter.meters[m] = struct{}{}
+	if !arbiter.started {
+		arbiter.started = true
+		go arbiter.tick()
+	}
+	return m
+}
+
+// NewMeter constructs and registers a new StandardMeter and launches a
+// goroutine.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredMeter(name string, r Registry) Meter {
+	c := NewMeter()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// MeterSnapshot is a read-only copy of another Meter.
+type MeterSnapshot struct {
+	count                          int64
+	rate1, rate5, rate15, rateMean uint64
+}
+
+// Count returns the count of events at the time the snapshot was taken.
+func (m *MeterSnapshot) Count() int64 { return m.count }
+
+// Mark panics.
+func (*MeterSnapshot) Mark(n int64) {
+	panic("Mark called on a MeterSnapshot")
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (m *MeterSnapshot) Rate1() float64 { return math.Float64frombits(m.rate1) }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (m *MeterSnapshot) Rate5() float64 { return math.Float64frombits(m.rate5) }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (m *MeterSnapshot) Rate15() float64 { return math.Float64frombits(m.rate15) }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (m *MeterSnapshot) RateMean() float64 { return math.Float64frombits(m.rateMean) }
+
+// Snapshot returns the snapshot.
+func (m *MeterSnapshot) Snapshot() Meter { return m }
+
+// Stop is a no-op.
+func (m *MeterSnapshot) Stop() {}
+
+// NilMeter is a no-op Meter.
+type NilMeter struct{}
+
+// Count is a no-op.
+func (NilMeter) Count() int64 { return 0 }
+
+// Mark is a no-op.
+func (NilMeter) Mark(n int64) {}
+
+// Rate1 is a no-op.
+func (NilMeter) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilMeter) Rate5() float64 { return 0.0 }
+
+// Rate15is a no-op.
+func (NilMeter) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilMeter) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilMeter) Snapshot() Meter { return NilMeter{} }
+
+// Stop is a no-op.
+func (NilMeter) Stop() {}
+
+// StandardMeter is the standard implementation of a Meter.
+type StandardMeter struct {
+	snapshot    *MeterSnapshot
+	a1, a5, a15 EWMA
+	startTime   time.Time
+	stopped     uint32
+}
+
+func newStandardMeter() *StandardMeter {
+	return &StandardMeter{
+		snapshot:  &MeterSnapshot{},
+		a1:        NewEWMA1(),
+		a5:        NewEWMA5(),
+		a15:       NewEWMA15(),
+		startTime: time.Now(),
+	}
+}
+
+// Stop stops the meter, Mark() will be a no-op if you use it after being stopped.
+func (m *StandardMeter) Stop() {
+	if atomic.CompareAndSwapUint32(&m.stopped, 0, 1) {
+		arbiter.Lock()
+		delete(arbiter.meters, m)
+		arbiter.Unlock()
+	}
+}
+
+// Count returns the number of events recorded.
+func (m *StandardMeter) Count() int64 {
+	return atomic.LoadInt64(&m.snapshot.count)
+}
+
+// Mark records the occurance of n events.
+func (m *StandardMeter) Mark(n int64) {
+	if atomic.LoadUint32(&m.stopped) == 1 {
+		return
+	}
+
+	atomic.AddInt64(&m.snapshot.count, n)
+
+	m.a1.Update(n)
+	m.a5.Update(n)
+	m.a15.Update(n)
+	m.updateSnapshot()
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (m *StandardMeter) Rate1() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate1))
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (m *StandardMeter) Rate5() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate5))
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (m *StandardMeter) Rate15() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rate15))
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (m *StandardMeter) RateMean() float64 {
+	return math.Float64frombits(atomic.LoadUint64(&m.snapshot.rateMean))
+}
+
+// Snapshot returns a read-only copy of the meter.
+func (m *StandardMeter) Snapshot() Meter {
+	copiedSnapshot := MeterSnapshot{
+		count:    atomic.LoadInt64(&m.snapshot.count),
+		rate1:    atomic.LoadUint64(&m.snapshot.rate1),
+		rate5:    atomic.LoadUint64(&m.snapshot.rate5),
+		rate15:   atomic.LoadUint64(&m.snapshot.rate15),
+		rateMean: atomic.LoadUint64(&m.snapshot.rateMean),
+	}
+	return &copiedSnapshot
+}
+
+func (m *StandardMeter) updateSnapshot() {
+	rate1 := math.Float64bits(m.a1.Rate())
+	rate5 := math.Float64bits(m.a5.Rate())
+	rate15 := math.Float64bits(m.a15.Rate())
+	rateMean := math.Float64bits(float64(m.Count()) / time.Since(m.startTime).Seconds())
+
+	atomic.StoreUint64(&m.snapshot.rate1, rate1)
+	atomic.StoreUint64(&m.snapshot.rate5, rate5)
+	atomic.StoreUint64(&m.snapshot.rate15, rate15)
+	atomic.StoreUint64(&m.snapshot.rateMean, rateMean)
+}
+
+func (m *StandardMeter) tick() {
+	m.a1.Tick()
+	m.a5.Tick()
+	m.a15.Tick()
+	m.updateSnapshot()
+}
+
+// meterArbiter ticks meters every 5s from a single goroutine.
+// meters are references in a set for future stopping.
+type meterArbiter struct {
+	sync.RWMutex
+	started bool
+	meters  map[*StandardMeter]struct{}
+	ticker  *time.Ticker
+}
+
+var arbiter = meterArbiter{ticker: time.NewTicker(5e9), meters: make(map[*StandardMeter]struct{})}
+
+// Ticks meters on the scheduled interval
+func (ma *meterArbiter) tick() {
+	for {
+		select {
+		case <-ma.ticker.C:
+			ma.tickMeters()
+		}
+	}
+}
+
+func (ma *meterArbiter) tickMeters() {
+	ma.RLock()
+	defer ma.RUnlock()
+	for meter := range ma.meters {
+		meter.tick()
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
new file mode 100644
index 0000000..b97a49e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/metrics.go
@@ -0,0 +1,13 @@
+// Go port of Coda Hale's Metrics library
+//
+// <https://github.com/rcrowley/go-metrics>
+//
+// Coda Hale's original work: <https://github.com/codahale/metrics>
+package metrics
+
+// UseNilMetrics is checked by the constructor functions for all of the
+// standard metrics.  If it is true, the metric returned is a stub.
+//
+// This global kill-switch helps quantify the observer effect and makes
+// for less cluttered pprof profiles.
+var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
new file mode 100644
index 0000000..266b6c9
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
@@ -0,0 +1,119 @@
+package metrics
+
+import (
+	"bufio"
+	"fmt"
+	"log"
+	"net"
+	"os"
+	"strings"
+	"time"
+)
+
+var shortHostName string = ""
+
+// OpenTSDBConfig provides a container with configuration parameters for
+// the OpenTSDB exporter
+type OpenTSDBConfig struct {
+	Addr          *net.TCPAddr  // Network address to connect to
+	Registry      Registry      // Registry to be exported
+	FlushInterval time.Duration // Flush interval
+	DurationUnit  time.Duration // Time conversion unit for durations
+	Prefix        string        // Prefix to be prepended to metric names
+}
+
+// OpenTSDB is a blocking exporter function which reports metrics in r
+// to a TSDB server located at addr, flushing them every d duration
+// and prepending metric names with prefix.
+func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
+	OpenTSDBWithConfig(OpenTSDBConfig{
+		Addr:          addr,
+		Registry:      r,
+		FlushInterval: d,
+		DurationUnit:  time.Nanosecond,
+		Prefix:        prefix,
+	})
+}
+
+// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
+// but it takes a OpenTSDBConfig instead.
+func OpenTSDBWithConfig(c OpenTSDBConfig) {
+	for _ = range time.Tick(c.FlushInterval) {
+		if err := openTSDB(&c); nil != err {
+			log.Println(err)
+		}
+	}
+}
+
+func getShortHostname() string {
+	if shortHostName == "" {
+		host, _ := os.Hostname()
+		if index := strings.Index(host, "."); index > 0 {
+			shortHostName = host[:index]
+		} else {
+			shortHostName = host
+		}
+	}
+	return shortHostName
+}
+
+func openTSDB(c *OpenTSDBConfig) error {
+	shortHostname := getShortHostname()
+	now := time.Now().Unix()
+	du := float64(c.DurationUnit)
+	conn, err := net.DialTCP("tcp", nil, c.Addr)
+	if nil != err {
+		return err
+	}
+	defer conn.Close()
+	w := bufio.NewWriter(conn)
+	c.Registry.Each(func(name string, i interface{}) {
+		switch metric := i.(type) {
+		case Counter:
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
+		case Gauge:
+			fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case GaugeFloat64:
+			fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
+			fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
+			fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
+		}
+		w.Flush()
+	})
+	return nil
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
new file mode 100644
index 0000000..b3bab64
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/registry.go
@@ -0,0 +1,363 @@
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// DuplicateMetric is the error returned by Registry.Register when a metric
+// already exists.  If you mean to Register that metric you must first
+// Unregister the existing metric.
+type DuplicateMetric string
+
+func (err DuplicateMetric) Error() string {
+	return fmt.Sprintf("duplicate metric: %s", string(err))
+}
+
+// A Registry holds references to a set of metrics by name and can iterate
+// over them, calling callback functions provided by the user.
+//
+// This is an interface so as to encourage other structs to implement
+// the Registry API as appropriate.
+type Registry interface {
+
+	// Call the given function for each registered metric.
+	Each(func(string, interface{}))
+
+	// Get the metric by the given name or nil if none is registered.
+	Get(string) interface{}
+
+	// GetAll metrics in the Registry.
+	GetAll() map[string]map[string]interface{}
+
+	// Gets an existing metric or registers the given one.
+	// The interface can be the metric to register if not found in registry,
+	// or a function returning the metric for lazy instantiation.
+	GetOrRegister(string, interface{}) interface{}
+
+	// Register the given metric under the given name.
+	Register(string, interface{}) error
+
+	// Run all registered healthchecks.
+	RunHealthchecks()
+
+	// Unregister the metric with the given name.
+	Unregister(string)
+
+	// Unregister all metrics.  (Mostly for testing.)
+	UnregisterAll()
+}
+
+// The standard implementation of a Registry is a mutex-protected map
+// of names to metrics.
+type StandardRegistry struct {
+	metrics map[string]interface{}
+	mutex   sync.RWMutex
+}
+
+// Create a new registry.
+func NewRegistry() Registry {
+	return &StandardRegistry{metrics: make(map[string]interface{})}
+}
+
+// Call the given function for each registered metric.
+func (r *StandardRegistry) Each(f func(string, interface{})) {
+	for name, i := range r.registered() {
+		f(name, i)
+	}
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *StandardRegistry) Get(name string) interface{} {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	return r.metrics[name]
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
+	// access the read lock first which should be re-entrant
+	r.mutex.RLock()
+	metric, ok := r.metrics[name]
+	r.mutex.RUnlock()
+	if ok {
+		return metric
+	}
+
+	// only take the write lock if we'll be modifying the metrics map
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	if metric, ok := r.metrics[name]; ok {
+		return metric
+	}
+	if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
+		i = v.Call(nil)[0].Interface()
+	}
+	r.register(name, i)
+	return i
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func (r *StandardRegistry) Register(name string, i interface{}) error {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	return r.register(name, i)
+}
+
+// Run all registered healthchecks.
+func (r *StandardRegistry) RunHealthchecks() {
+	r.mutex.RLock()
+	defer r.mutex.RUnlock()
+	for _, i := range r.metrics {
+		if h, ok := i.(Healthcheck); ok {
+			h.Check()
+		}
+	}
+}
+
+// GetAll metrics in the Registry
+func (r *StandardRegistry) GetAll() map[string]map[string]interface{} {
+	data := make(map[string]map[string]interface{})
+	r.Each(func(name string, i interface{}) {
+		values := make(map[string]interface{})
+		switch metric := i.(type) {
+		case Counter:
+			values["count"] = metric.Count()
+		case Gauge:
+			values["value"] = metric.Value()
+		case GaugeFloat64:
+			values["value"] = metric.Value()
+		case Healthcheck:
+			values["error"] = nil
+			metric.Check()
+			if err := metric.Error(); nil != err {
+				values["error"] = metric.Error().Error()
+			}
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = h.Count()
+			values["min"] = h.Min()
+			values["max"] = h.Max()
+			values["mean"] = h.Mean()
+			values["stddev"] = h.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+		case Meter:
+			m := metric.Snapshot()
+			values["count"] = m.Count()
+			values["1m.rate"] = m.Rate1()
+			values["5m.rate"] = m.Rate5()
+			values["15m.rate"] = m.Rate15()
+			values["mean.rate"] = m.RateMean()
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			values["count"] = t.Count()
+			values["min"] = t.Min()
+			values["max"] = t.Max()
+			values["mean"] = t.Mean()
+			values["stddev"] = t.StdDev()
+			values["median"] = ps[0]
+			values["75%"] = ps[1]
+			values["95%"] = ps[2]
+			values["99%"] = ps[3]
+			values["99.9%"] = ps[4]
+			values["1m.rate"] = t.Rate1()
+			values["5m.rate"] = t.Rate5()
+			values["15m.rate"] = t.Rate15()
+			values["mean.rate"] = t.RateMean()
+		}
+		data[name] = values
+	})
+	return data
+}
+
+// Unregister the metric with the given name.
+func (r *StandardRegistry) Unregister(name string) {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	r.stop(name)
+	delete(r.metrics, name)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *StandardRegistry) UnregisterAll() {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	for name, _ := range r.metrics {
+		r.stop(name)
+		delete(r.metrics, name)
+	}
+}
+
+func (r *StandardRegistry) register(name string, i interface{}) error {
+	if _, ok := r.metrics[name]; ok {
+		return DuplicateMetric(name)
+	}
+	switch i.(type) {
+	case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
+		r.metrics[name] = i
+	}
+	return nil
+}
+
+func (r *StandardRegistry) registered() map[string]interface{} {
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+	metrics := make(map[string]interface{}, len(r.metrics))
+	for name, i := range r.metrics {
+		metrics[name] = i
+	}
+	return metrics
+}
+
+func (r *StandardRegistry) stop(name string) {
+	if i, ok := r.metrics[name]; ok {
+		if s, ok := i.(Stoppable); ok {
+			s.Stop()
+		}
+	}
+}
+
+// Stoppable defines the metrics which has to be stopped.
+type Stoppable interface {
+	Stop()
+}
+
+type PrefixedRegistry struct {
+	underlying Registry
+	prefix     string
+}
+
+func NewPrefixedRegistry(prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: NewRegistry(),
+		prefix:     prefix,
+	}
+}
+
+func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
+	return &PrefixedRegistry{
+		underlying: parent,
+		prefix:     prefix,
+	}
+}
+
+// Call the given function for each registered metric.
+func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
+	wrappedFn := func(prefix string) func(string, interface{}) {
+		return func(name string, iface interface{}) {
+			if strings.HasPrefix(name, prefix) {
+				fn(name, iface)
+			} else {
+				return
+			}
+		}
+	}
+
+	baseRegistry, prefix := findPrefix(r, "")
+	baseRegistry.Each(wrappedFn(prefix))
+}
+
+func findPrefix(registry Registry, prefix string) (Registry, string) {
+	switch r := registry.(type) {
+	case *PrefixedRegistry:
+		return findPrefix(r.underlying, r.prefix+prefix)
+	case *StandardRegistry:
+		return r, prefix
+	}
+	return nil, ""
+}
+
+// Get the metric by the given name or nil if none is registered.
+func (r *PrefixedRegistry) Get(name string) interface{} {
+	realName := r.prefix + name
+	return r.underlying.Get(realName)
+}
+
+// Gets an existing metric or registers the given one.
+// The interface can be the metric to register if not found in registry,
+// or a function returning the metric for lazy instantiation.
+func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
+	realName := r.prefix + name
+	return r.underlying.GetOrRegister(realName, metric)
+}
+
+// Register the given metric under the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
+	realName := r.prefix + name
+	return r.underlying.Register(realName, metric)
+}
+
+// Run all registered healthchecks.
+func (r *PrefixedRegistry) RunHealthchecks() {
+	r.underlying.RunHealthchecks()
+}
+
+// GetAll metrics in the Registry
+func (r *PrefixedRegistry) GetAll() map[string]map[string]interface{} {
+	return r.underlying.GetAll()
+}
+
+// Unregister the metric with the given name. The name will be prefixed.
+func (r *PrefixedRegistry) Unregister(name string) {
+	realName := r.prefix + name
+	r.underlying.Unregister(realName)
+}
+
+// Unregister all metrics.  (Mostly for testing.)
+func (r *PrefixedRegistry) UnregisterAll() {
+	r.underlying.UnregisterAll()
+}
+
+var DefaultRegistry Registry = NewRegistry()
+
+// Call the given function for each registered metric.
+func Each(f func(string, interface{})) {
+	DefaultRegistry.Each(f)
+}
+
+// Get the metric by the given name or nil if none is registered.
+func Get(name string) interface{} {
+	return DefaultRegistry.Get(name)
+}
+
+// Gets an existing metric or creates and registers a new one. Threadsafe
+// alternative to calling Get and Register on failure.
+func GetOrRegister(name string, i interface{}) interface{} {
+	return DefaultRegistry.GetOrRegister(name, i)
+}
+
+// Register the given metric under the given name.  Returns a DuplicateMetric
+// if a metric by the given name is already registered.
+func Register(name string, i interface{}) error {
+	return DefaultRegistry.Register(name, i)
+}
+
+// Register the given metric under the given name.  Panics if a metric by the
+// given name is already registered.
+func MustRegister(name string, i interface{}) {
+	if err := Register(name, i); err != nil {
+		panic(err)
+	}
+}
+
+// Run all registered healthchecks.
+func RunHealthchecks() {
+	DefaultRegistry.RunHealthchecks()
+}
+
+// Unregister the metric with the given name.
+func Unregister(name string) {
+	DefaultRegistry.Unregister(name)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
new file mode 100644
index 0000000..11c6b78
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime.go
@@ -0,0 +1,212 @@
+package metrics
+
+import (
+	"runtime"
+	"runtime/pprof"
+	"time"
+)
+
+var (
+	memStats       runtime.MemStats
+	runtimeMetrics struct {
+		MemStats struct {
+			Alloc         Gauge
+			BuckHashSys   Gauge
+			DebugGC       Gauge
+			EnableGC      Gauge
+			Frees         Gauge
+			HeapAlloc     Gauge
+			HeapIdle      Gauge
+			HeapInuse     Gauge
+			HeapObjects   Gauge
+			HeapReleased  Gauge
+			HeapSys       Gauge
+			LastGC        Gauge
+			Lookups       Gauge
+			Mallocs       Gauge
+			MCacheInuse   Gauge
+			MCacheSys     Gauge
+			MSpanInuse    Gauge
+			MSpanSys      Gauge
+			NextGC        Gauge
+			NumGC         Gauge
+			GCCPUFraction GaugeFloat64
+			PauseNs       Histogram
+			PauseTotalNs  Gauge
+			StackInuse    Gauge
+			StackSys      Gauge
+			Sys           Gauge
+			TotalAlloc    Gauge
+		}
+		NumCgoCall   Gauge
+		NumGoroutine Gauge
+		NumThread    Gauge
+		ReadMemStats Timer
+	}
+	frees       uint64
+	lookups     uint64
+	mallocs     uint64
+	numGC       uint32
+	numCgoCalls int64
+
+	threadCreateProfile = pprof.Lookup("threadcreate")
+)
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called as a goroutine.
+func CaptureRuntimeMemStats(r Registry, d time.Duration) {
+	for _ = range time.Tick(d) {
+		CaptureRuntimeMemStatsOnce(r)
+	}
+}
+
+// Capture new values for the Go runtime statistics exported in
+// runtime.MemStats.  This is designed to be called in a background
+// goroutine.  Giving a registry which has not been given to
+// RegisterRuntimeMemStats will panic.
+//
+// Be very careful with this because runtime.ReadMemStats calls the C
+// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
+// and that last one does what it says on the tin.
+func CaptureRuntimeMemStatsOnce(r Registry) {
+	t := time.Now()
+	runtime.ReadMemStats(&memStats) // This takes 50-200us.
+	runtimeMetrics.ReadMemStats.UpdateSince(t)
+
+	runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
+	runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
+	if memStats.DebugGC {
+		runtimeMetrics.MemStats.DebugGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.DebugGC.Update(0)
+	}
+	if memStats.EnableGC {
+		runtimeMetrics.MemStats.EnableGC.Update(1)
+	} else {
+		runtimeMetrics.MemStats.EnableGC.Update(0)
+	}
+
+	runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
+	runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
+	runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
+	runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
+	runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
+	runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
+	runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
+	runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
+	runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
+	runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
+	runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
+	runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
+	runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
+	runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
+	runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
+	runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
+	runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
+
+	// <https://code.google.com/p/go/source/browse/src/pkg/runtime/mgc0.c>
+	i := numGC % uint32(len(memStats.PauseNs))
+	ii := memStats.NumGC % uint32(len(memStats.PauseNs))
+	if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
+		for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	} else {
+		if i > ii {
+			for ; i < uint32(len(memStats.PauseNs)); i++ {
+				runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+			}
+			i = 0
+		}
+		for ; i < ii; i++ {
+			runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
+		}
+	}
+	frees = memStats.Frees
+	lookups = memStats.Lookups
+	mallocs = memStats.Mallocs
+	numGC = memStats.NumGC
+
+	runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
+	runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
+	runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
+	runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
+	runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
+
+	currentNumCgoCalls := numCgoCall()
+	runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
+	numCgoCalls = currentNumCgoCalls
+
+	runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
+
+	runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
+}
+
+// Register runtimeMetrics for the Go runtime statistics exported in runtime and
+// specifically runtime.MemStats.  The runtimeMetrics are named by their
+// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
+func RegisterRuntimeMemStats(r Registry) {
+	runtimeMetrics.MemStats.Alloc = NewGauge()
+	runtimeMetrics.MemStats.BuckHashSys = NewGauge()
+	runtimeMetrics.MemStats.DebugGC = NewGauge()
+	runtimeMetrics.MemStats.EnableGC = NewGauge()
+	runtimeMetrics.MemStats.Frees = NewGauge()
+	runtimeMetrics.MemStats.HeapAlloc = NewGauge()
+	runtimeMetrics.MemStats.HeapIdle = NewGauge()
+	runtimeMetrics.MemStats.HeapInuse = NewGauge()
+	runtimeMetrics.MemStats.HeapObjects = NewGauge()
+	runtimeMetrics.MemStats.HeapReleased = NewGauge()
+	runtimeMetrics.MemStats.HeapSys = NewGauge()
+	runtimeMetrics.MemStats.LastGC = NewGauge()
+	runtimeMetrics.MemStats.Lookups = NewGauge()
+	runtimeMetrics.MemStats.Mallocs = NewGauge()
+	runtimeMetrics.MemStats.MCacheInuse = NewGauge()
+	runtimeMetrics.MemStats.MCacheSys = NewGauge()
+	runtimeMetrics.MemStats.MSpanInuse = NewGauge()
+	runtimeMetrics.MemStats.MSpanSys = NewGauge()
+	runtimeMetrics.MemStats.NextGC = NewGauge()
+	runtimeMetrics.MemStats.NumGC = NewGauge()
+	runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
+	runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
+	runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
+	runtimeMetrics.MemStats.StackInuse = NewGauge()
+	runtimeMetrics.MemStats.StackSys = NewGauge()
+	runtimeMetrics.MemStats.Sys = NewGauge()
+	runtimeMetrics.MemStats.TotalAlloc = NewGauge()
+	runtimeMetrics.NumCgoCall = NewGauge()
+	runtimeMetrics.NumGoroutine = NewGauge()
+	runtimeMetrics.NumThread = NewGauge()
+	runtimeMetrics.ReadMemStats = NewTimer()
+
+	r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
+	r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
+	r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
+	r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
+	r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
+	r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
+	r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
+	r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
+	r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
+	r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
+	r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
+	r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
+	r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
+	r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
+	r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
+	r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
+	r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
+	r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
+	r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
+	r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
+	r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
+	r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
+	r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
+	r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
+	r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
+	r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
+	r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
+	r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
+	r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
+	r.Register("runtime.NumThread", runtimeMetrics.NumThread)
+	r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
new file mode 100644
index 0000000..e3391f4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
@@ -0,0 +1,10 @@
+// +build cgo
+// +build !appengine
+
+package metrics
+
+import "runtime"
+
+func numCgoCall() int64 {
+	return runtime.NumCgoCall()
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
new file mode 100644
index 0000000..ca12c05
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return memStats.GCCPUFraction
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
new file mode 100644
index 0000000..616a3b4
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
@@ -0,0 +1,7 @@
+// +build !cgo appengine
+
+package metrics
+
+func numCgoCall() int64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
new file mode 100644
index 0000000..be96aa6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
@@ -0,0 +1,9 @@
+// +build !go1.5
+
+package metrics
+
+import "runtime"
+
+func gcCPUFraction(memStats *runtime.MemStats) float64 {
+	return 0
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
new file mode 100644
index 0000000..fecee5e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/sample.go
@@ -0,0 +1,616 @@
+package metrics
+
+import (
+	"math"
+	"math/rand"
+	"sort"
+	"sync"
+	"time"
+)
+
+const rescaleThreshold = time.Hour
+
+// Samples maintain a statistically-significant selection of values from
+// a stream.
+type Sample interface {
+	Clear()
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Size() int
+	Snapshot() Sample
+	StdDev() float64
+	Sum() int64
+	Update(int64)
+	Values() []int64
+	Variance() float64
+}
+
+// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
+// priority reservoir.  See Cormode et al's "Forward Decay: A Practical Time
+// Decay Model for Streaming Systems".
+//
+// <http://dimacs.rutgers.edu/~graham/pubs/papers/fwddecay.pdf>
+type ExpDecaySample struct {
+	alpha         float64
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	t0, t1        time.Time
+	values        *expDecaySampleHeap
+}
+
+// NewExpDecaySample constructs a new exponentially-decaying sample with the
+// given reservoir size and alpha.
+func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	s := &ExpDecaySample{
+		alpha:         alpha,
+		reservoirSize: reservoirSize,
+		t0:            time.Now(),
+		values:        newExpDecaySampleHeap(reservoirSize),
+	}
+	s.t1 = s.t0.Add(rescaleThreshold)
+	return s
+}
+
+// Clear clears all samples.
+func (s *ExpDecaySample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.t0 = time.Now()
+	s.t1 = s.t0.Add(rescaleThreshold)
+	s.values.Clear()
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *ExpDecaySample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Max() int64 {
+	return SampleMax(s.Values())
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *ExpDecaySample) Mean() float64 {
+	return SampleMean(s.Values())
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *ExpDecaySample) Min() int64 {
+	return SampleMin(s.Values())
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *ExpDecaySample) Percentile(p float64) float64 {
+	return SamplePercentile(s.Values(), p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.Values(), ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *ExpDecaySample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.values.Size()
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *ExpDecaySample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *ExpDecaySample) StdDev() float64 {
+	return SampleStdDev(s.Values())
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *ExpDecaySample) Sum() int64 {
+	return SampleSum(s.Values())
+}
+
+// Update samples a new value.
+func (s *ExpDecaySample) Update(v int64) {
+	s.update(time.Now(), v)
+}
+
+// Values returns a copy of the values in the sample.
+func (s *ExpDecaySample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	vals := s.values.Values()
+	values := make([]int64, len(vals))
+	for i, v := range vals {
+		values[i] = v.v
+	}
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *ExpDecaySample) Variance() float64 {
+	return SampleVariance(s.Values())
+}
+
+// update samples a new value at a particular timestamp.  This is a method all
+// its own to facilitate testing.
+func (s *ExpDecaySample) update(t time.Time, v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if s.values.Size() == s.reservoirSize {
+		s.values.Pop()
+	}
+	s.values.Push(expDecaySample{
+		k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
+		v: v,
+	})
+	if t.After(s.t1) {
+		values := s.values.Values()
+		t0 := s.t0
+		s.values.Clear()
+		s.t0 = t
+		s.t1 = s.t0.Add(rescaleThreshold)
+		for _, v := range values {
+			v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
+			s.values.Push(v)
+		}
+	}
+}
+
+// NilSample is a no-op Sample.
+type NilSample struct{}
+
+// Clear is a no-op.
+func (NilSample) Clear() {}
+
+// Count is a no-op.
+func (NilSample) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilSample) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilSample) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilSample) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilSample) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilSample) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Size is a no-op.
+func (NilSample) Size() int { return 0 }
+
+// Sample is a no-op.
+func (NilSample) Snapshot() Sample { return NilSample{} }
+
+// StdDev is a no-op.
+func (NilSample) StdDev() float64 { return 0.0 }
+
+// Sum is a no-op.
+func (NilSample) Sum() int64 { return 0 }
+
+// Update is a no-op.
+func (NilSample) Update(v int64) {}
+
+// Values is a no-op.
+func (NilSample) Values() []int64 { return []int64{} }
+
+// Variance is a no-op.
+func (NilSample) Variance() float64 { return 0.0 }
+
+// SampleMax returns the maximum value of the slice of int64.
+func SampleMax(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var max int64 = math.MinInt64
+	for _, v := range values {
+		if max < v {
+			max = v
+		}
+	}
+	return max
+}
+
+// SampleMean returns the mean value of the slice of int64.
+func SampleMean(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	return float64(SampleSum(values)) / float64(len(values))
+}
+
+// SampleMin returns the minimum value of the slice of int64.
+func SampleMin(values []int64) int64 {
+	if 0 == len(values) {
+		return 0
+	}
+	var min int64 = math.MaxInt64
+	for _, v := range values {
+		if min > v {
+			min = v
+		}
+	}
+	return min
+}
+
+// SamplePercentiles returns an arbitrary percentile of the slice of int64.
+func SamplePercentile(values int64Slice, p float64) float64 {
+	return SamplePercentiles(values, []float64{p})[0]
+}
+
+// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
+// int64.
+func SamplePercentiles(values int64Slice, ps []float64) []float64 {
+	scores := make([]float64, len(ps))
+	size := len(values)
+	if size > 0 {
+		sort.Sort(values)
+		for i, p := range ps {
+			pos := p * float64(size+1)
+			if pos < 1.0 {
+				scores[i] = float64(values[0])
+			} else if pos >= float64(size) {
+				scores[i] = float64(values[size-1])
+			} else {
+				lower := float64(values[int(pos)-1])
+				upper := float64(values[int(pos)])
+				scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
+			}
+		}
+	}
+	return scores
+}
+
+// SampleSnapshot is a read-only copy of another Sample.
+type SampleSnapshot struct {
+	count  int64
+	values []int64
+}
+
+func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
+	return &SampleSnapshot{
+		count:  count,
+		values: values,
+	}
+}
+
+// Clear panics.
+func (*SampleSnapshot) Clear() {
+	panic("Clear called on a SampleSnapshot")
+}
+
+// Count returns the count of inputs at the time the snapshot was taken.
+func (s *SampleSnapshot) Count() int64 { return s.count }
+
+// Max returns the maximal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
+
+// Min returns the minimal value at the time the snapshot was taken.
+func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
+
+// Percentile returns an arbitrary percentile of values at the time the
+// snapshot was taken.
+func (s *SampleSnapshot) Percentile(p float64) float64 {
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values at the time
+// the snapshot was taken.
+func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample at the time the snapshot was taken.
+func (s *SampleSnapshot) Size() int { return len(s.values) }
+
+// Snapshot returns the snapshot.
+func (s *SampleSnapshot) Snapshot() Sample { return s }
+
+// StdDev returns the standard deviation of values at the time the snapshot was
+// taken.
+func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
+
+// Sum returns the sum of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
+
+// Update panics.
+func (*SampleSnapshot) Update(int64) {
+	panic("Update called on a SampleSnapshot")
+}
+
+// Values returns a copy of the values in the sample.
+func (s *SampleSnapshot) Values() []int64 {
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of values at the time the snapshot was taken.
+func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
+
+// SampleStdDev returns the standard deviation of the slice of int64.
+func SampleStdDev(values []int64) float64 {
+	return math.Sqrt(SampleVariance(values))
+}
+
+// SampleSum returns the sum of the slice of int64.
+func SampleSum(values []int64) int64 {
+	var sum int64
+	for _, v := range values {
+		sum += v
+	}
+	return sum
+}
+
+// SampleVariance returns the variance of the slice of int64.
+func SampleVariance(values []int64) float64 {
+	if 0 == len(values) {
+		return 0.0
+	}
+	m := SampleMean(values)
+	var sum float64
+	for _, v := range values {
+		d := float64(v) - m
+		sum += d * d
+	}
+	return sum / float64(len(values))
+}
+
+// A uniform sample using Vitter's Algorithm R.
+//
+// <http://www.cs.umd.edu/~samir/498/vitter.pdf>
+type UniformSample struct {
+	count         int64
+	mutex         sync.Mutex
+	reservoirSize int
+	values        []int64
+}
+
+// NewUniformSample constructs a new uniform sample with the given reservoir
+// size.
+func NewUniformSample(reservoirSize int) Sample {
+	if UseNilMetrics {
+		return NilSample{}
+	}
+	return &UniformSample{
+		reservoirSize: reservoirSize,
+		values:        make([]int64, 0, reservoirSize),
+	}
+}
+
+// Clear clears all samples.
+func (s *UniformSample) Clear() {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count = 0
+	s.values = make([]int64, 0, s.reservoirSize)
+}
+
+// Count returns the number of samples recorded, which may exceed the
+// reservoir size.
+func (s *UniformSample) Count() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return s.count
+}
+
+// Max returns the maximum value in the sample, which may not be the maximum
+// value ever to be part of the sample.
+func (s *UniformSample) Max() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMax(s.values)
+}
+
+// Mean returns the mean of the values in the sample.
+func (s *UniformSample) Mean() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMean(s.values)
+}
+
+// Min returns the minimum value in the sample, which may not be the minimum
+// value ever to be part of the sample.
+func (s *UniformSample) Min() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleMin(s.values)
+}
+
+// Percentile returns an arbitrary percentile of values in the sample.
+func (s *UniformSample) Percentile(p float64) float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentile(s.values, p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of values in the
+// sample.
+func (s *UniformSample) Percentiles(ps []float64) []float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SamplePercentiles(s.values, ps)
+}
+
+// Size returns the size of the sample, which is at most the reservoir size.
+func (s *UniformSample) Size() int {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return len(s.values)
+}
+
+// Snapshot returns a read-only copy of the sample.
+func (s *UniformSample) Snapshot() Sample {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return &SampleSnapshot{
+		count:  s.count,
+		values: values,
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (s *UniformSample) StdDev() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleStdDev(s.values)
+}
+
+// Sum returns the sum of the values in the sample.
+func (s *UniformSample) Sum() int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleSum(s.values)
+}
+
+// Update samples a new value.
+func (s *UniformSample) Update(v int64) {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	s.count++
+	if len(s.values) < s.reservoirSize {
+		s.values = append(s.values, v)
+	} else {
+		r := rand.Int63n(s.count)
+		if r < int64(len(s.values)) {
+			s.values[int(r)] = v
+		}
+	}
+}
+
+// Values returns a copy of the values in the sample.
+func (s *UniformSample) Values() []int64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	values := make([]int64, len(s.values))
+	copy(values, s.values)
+	return values
+}
+
+// Variance returns the variance of the values in the sample.
+func (s *UniformSample) Variance() float64 {
+	s.mutex.Lock()
+	defer s.mutex.Unlock()
+	return SampleVariance(s.values)
+}
+
+// expDecaySample represents an individual sample in a heap.
+type expDecaySample struct {
+	k float64
+	v int64
+}
+
+func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
+	return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
+}
+
+// expDecaySampleHeap is a min-heap of expDecaySamples.
+// The internal implementation is copied from the standard library's container/heap
+type expDecaySampleHeap struct {
+	s []expDecaySample
+}
+
+func (h *expDecaySampleHeap) Clear() {
+	h.s = h.s[:0]
+}
+
+func (h *expDecaySampleHeap) Push(s expDecaySample) {
+	n := len(h.s)
+	h.s = h.s[0 : n+1]
+	h.s[n] = s
+	h.up(n)
+}
+
+func (h *expDecaySampleHeap) Pop() expDecaySample {
+	n := len(h.s) - 1
+	h.s[0], h.s[n] = h.s[n], h.s[0]
+	h.down(0, n)
+
+	n = len(h.s)
+	s := h.s[n-1]
+	h.s = h.s[0 : n-1]
+	return s
+}
+
+func (h *expDecaySampleHeap) Size() int {
+	return len(h.s)
+}
+
+func (h *expDecaySampleHeap) Values() []expDecaySample {
+	return h.s
+}
+
+func (h *expDecaySampleHeap) up(j int) {
+	for {
+		i := (j - 1) / 2 // parent
+		if i == j || !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		j = i
+	}
+}
+
+func (h *expDecaySampleHeap) down(i, n int) {
+	for {
+		j1 := 2*i + 1
+		if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
+			break
+		}
+		j := j1 // left child
+		if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
+			j = j2 // = 2*i + 2  // right child
+		}
+		if !(h.s[j].k < h.s[i].k) {
+			break
+		}
+		h.s[i], h.s[j] = h.s[j], h.s[i]
+		i = j
+	}
+}
+
+type int64Slice []int64
+
+func (p int64Slice) Len() int           { return len(p) }
+func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p int64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
new file mode 100644
index 0000000..693f190
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/syslog.go
@@ -0,0 +1,78 @@
+// +build !windows
+
+package metrics
+
+import (
+	"fmt"
+	"log/syslog"
+	"time"
+)
+
+// Output each metric in the given registry to syslog periodically using
+// the given syslogger.
+func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
+	for _ = range time.Tick(d) {
+		r.Each(func(name string, i interface{}) {
+			switch metric := i.(type) {
+			case Counter:
+				w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
+			case Gauge:
+				w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
+			case GaugeFloat64:
+				w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
+			case Healthcheck:
+				metric.Check()
+				w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
+			case Histogram:
+				h := metric.Snapshot()
+				ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
+					name,
+					h.Count(),
+					h.Min(),
+					h.Max(),
+					h.Mean(),
+					h.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+				))
+			case Meter:
+				m := metric.Snapshot()
+				w.Info(fmt.Sprintf(
+					"meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
+					name,
+					m.Count(),
+					m.Rate1(),
+					m.Rate5(),
+					m.Rate15(),
+					m.RateMean(),
+				))
+			case Timer:
+				t := metric.Snapshot()
+				ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+				w.Info(fmt.Sprintf(
+					"timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
+					name,
+					t.Count(),
+					t.Min(),
+					t.Max(),
+					t.Mean(),
+					t.StdDev(),
+					ps[0],
+					ps[1],
+					ps[2],
+					ps[3],
+					ps[4],
+					t.Rate1(),
+					t.Rate5(),
+					t.Rate15(),
+					t.RateMean(),
+				))
+			}
+		})
+	}
+}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
new file mode 100644
index 0000000..d6ec4c6
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/timer.go
@@ -0,0 +1,329 @@
+package metrics
+
+import (
+	"sync"
+	"time"
+)
+
+// Timers capture the duration and rate of events.
+type Timer interface {
+	Count() int64
+	Max() int64
+	Mean() float64
+	Min() int64
+	Percentile(float64) float64
+	Percentiles([]float64) []float64
+	Rate1() float64
+	Rate5() float64
+	Rate15() float64
+	RateMean() float64
+	Snapshot() Timer
+	StdDev() float64
+	Stop()
+	Sum() int64
+	Time(func())
+	Update(time.Duration)
+	UpdateSince(time.Time)
+	Variance() float64
+}
+
+// GetOrRegisterTimer returns an existing Timer or constructs and registers a
+// new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func GetOrRegisterTimer(name string, r Registry) Timer {
+	if nil == r {
+		r = DefaultRegistry
+	}
+	return r.GetOrRegister(name, NewTimer).(Timer)
+}
+
+// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewCustomTimer(h Histogram, m Meter) Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: h,
+		meter:     m,
+	}
+}
+
+// NewRegisteredTimer constructs and registers a new StandardTimer.
+// Be sure to unregister the meter from the registry once it is of no use to
+// allow for garbage collection.
+func NewRegisteredTimer(name string, r Registry) Timer {
+	c := NewTimer()
+	if nil == r {
+		r = DefaultRegistry
+	}
+	r.Register(name, c)
+	return c
+}
+
+// NewTimer constructs a new StandardTimer using an exponentially-decaying
+// sample with the same reservoir size and alpha as UNIX load averages.
+// Be sure to call Stop() once the timer is of no use to allow for garbage collection.
+func NewTimer() Timer {
+	if UseNilMetrics {
+		return NilTimer{}
+	}
+	return &StandardTimer{
+		histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
+		meter:     NewMeter(),
+	}
+}
+
+// NilTimer is a no-op Timer.
+type NilTimer struct {
+	h Histogram
+	m Meter
+}
+
+// Count is a no-op.
+func (NilTimer) Count() int64 { return 0 }
+
+// Max is a no-op.
+func (NilTimer) Max() int64 { return 0 }
+
+// Mean is a no-op.
+func (NilTimer) Mean() float64 { return 0.0 }
+
+// Min is a no-op.
+func (NilTimer) Min() int64 { return 0 }
+
+// Percentile is a no-op.
+func (NilTimer) Percentile(p float64) float64 { return 0.0 }
+
+// Percentiles is a no-op.
+func (NilTimer) Percentiles(ps []float64) []float64 {
+	return make([]float64, len(ps))
+}
+
+// Rate1 is a no-op.
+func (NilTimer) Rate1() float64 { return 0.0 }
+
+// Rate5 is a no-op.
+func (NilTimer) Rate5() float64 { return 0.0 }
+
+// Rate15 is a no-op.
+func (NilTimer) Rate15() float64 { return 0.0 }
+
+// RateMean is a no-op.
+func (NilTimer) RateMean() float64 { return 0.0 }
+
+// Snapshot is a no-op.
+func (NilTimer) Snapshot() Timer { return NilTimer{} }
+
+// StdDev is a no-op.
+func (NilTimer) StdDev() float64 { return 0.0 }
+
+// Stop is a no-op.
+func (NilTimer) Stop() {}
+
+// Sum is a no-op.
+func (NilTimer) Sum() int64 { return 0 }
+
+// Time is a no-op.
+func (NilTimer) Time(func()) {}
+
+// Update is a no-op.
+func (NilTimer) Update(time.Duration) {}
+
+// UpdateSince is a no-op.
+func (NilTimer) UpdateSince(time.Time) {}
+
+// Variance is a no-op.
+func (NilTimer) Variance() float64 { return 0.0 }
+
+// StandardTimer is the standard implementation of a Timer and uses a Histogram
+// and Meter.
+type StandardTimer struct {
+	histogram Histogram
+	meter     Meter
+	mutex     sync.Mutex
+}
+
+// Count returns the number of events recorded.
+func (t *StandardTimer) Count() int64 {
+	return t.histogram.Count()
+}
+
+// Max returns the maximum value in the sample.
+func (t *StandardTimer) Max() int64 {
+	return t.histogram.Max()
+}
+
+// Mean returns the mean of the values in the sample.
+func (t *StandardTimer) Mean() float64 {
+	return t.histogram.Mean()
+}
+
+// Min returns the minimum value in the sample.
+func (t *StandardTimer) Min() int64 {
+	return t.histogram.Min()
+}
+
+// Percentile returns an arbitrary percentile of the values in the sample.
+func (t *StandardTimer) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of the values in the
+// sample.
+func (t *StandardTimer) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second.
+func (t *StandardTimer) Rate1() float64 {
+	return t.meter.Rate1()
+}
+
+// Rate5 returns the five-minute moving average rate of events per second.
+func (t *StandardTimer) Rate5() float64 {
+	return t.meter.Rate5()
+}
+
+// Rate15 returns the fifteen-minute moving average rate of events per second.
+func (t *StandardTimer) Rate15() float64 {
+	return t.meter.Rate15()
+}
+
+// RateMean returns the meter's mean rate of events per second.
+func (t *StandardTimer) RateMean() float64 {
+	return t.meter.RateMean()
+}
+
+// Snapshot returns a read-only copy of the timer.
+func (t *StandardTimer) Snapshot() Timer {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	return &TimerSnapshot{
+		histogram: t.histogram.Snapshot().(*HistogramSnapshot),
+		meter:     t.meter.Snapshot().(*MeterSnapshot),
+	}
+}
+
+// StdDev returns the standard deviation of the values in the sample.
+func (t *StandardTimer) StdDev() float64 {
+	return t.histogram.StdDev()
+}
+
+// Stop stops the meter.
+func (t *StandardTimer) Stop() {
+	t.meter.Stop()
+}
+
+// Sum returns the sum in the sample.
+func (t *StandardTimer) Sum() int64 {
+	return t.histogram.Sum()
+}
+
+// Record the duration of the execution of the given function.
+func (t *StandardTimer) Time(f func()) {
+	ts := time.Now()
+	f()
+	t.Update(time.Since(ts))
+}
+
+// Record the duration of an event.
+func (t *StandardTimer) Update(d time.Duration) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(d))
+	t.meter.Mark(1)
+}
+
+// Record the duration of an event that started at a time and ends now.
+func (t *StandardTimer) UpdateSince(ts time.Time) {
+	t.mutex.Lock()
+	defer t.mutex.Unlock()
+	t.histogram.Update(int64(time.Since(ts)))
+	t.meter.Mark(1)
+}
+
+// Variance returns the variance of the values in the sample.
+func (t *StandardTimer) Variance() float64 {
+	return t.histogram.Variance()
+}
+
+// TimerSnapshot is a read-only copy of another Timer.
+type TimerSnapshot struct {
+	histogram *HistogramSnapshot
+	meter     *MeterSnapshot
+}
+
+// Count returns the number of events recorded at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
+
+// Max returns the maximum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
+
+// Mean returns the mean value at the time the snapshot was taken.
+func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
+
+// Min returns the minimum value at the time the snapshot was taken.
+func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
+
+// Percentile returns an arbitrary percentile of sampled values at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) Percentile(p float64) float64 {
+	return t.histogram.Percentile(p)
+}
+
+// Percentiles returns a slice of arbitrary percentiles of sampled values at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
+	return t.histogram.Percentiles(ps)
+}
+
+// Rate1 returns the one-minute moving average rate of events per second at the
+// time the snapshot was taken.
+func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
+
+// Rate5 returns the five-minute moving average rate of events per second at
+// the time the snapshot was taken.
+func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
+
+// Rate15 returns the fifteen-minute moving average rate of events per second
+// at the time the snapshot was taken.
+func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
+
+// RateMean returns the meter's mean rate of events per second at the time the
+// snapshot was taken.
+func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
+
+// Snapshot returns the snapshot.
+func (t *TimerSnapshot) Snapshot() Timer { return t }
+
+// StdDev returns the standard deviation of the values at the time the snapshot
+// was taken.
+func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
+
+// Stop is a no-op.
+func (t *TimerSnapshot) Stop() {}
+
+// Sum returns the sum at the time the snapshot was taken.
+func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
+
+// Time panics.
+func (*TimerSnapshot) Time(func()) {
+	panic("Time called on a TimerSnapshot")
+}
+
+// Update panics.
+func (*TimerSnapshot) Update(time.Duration) {
+	panic("Update called on a TimerSnapshot")
+}
+
+// UpdateSince panics.
+func (*TimerSnapshot) UpdateSince(time.Time) {
+	panic("UpdateSince called on a TimerSnapshot")
+}
+
+// Variance returns the variance of the values at the time the snapshot was
+// taken.
+func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
new file mode 100644
index 0000000..c4ae91e
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/validate.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+set -e
+
+# check there are no formatting issues
+GOFMT_LINES=`gofmt -l . | wc -l | xargs`
+test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
+
+# run the tests for the root package
+go test -race .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
new file mode 100644
index 0000000..091e971
--- /dev/null
+++ b/vendor/github.com/rcrowley/go-metrics/writer.go
@@ -0,0 +1,100 @@
+package metrics
+
+import (
+	"fmt"
+	"io"
+	"sort"
+	"time"
+)
+
+// Write sorts writes each metric in the given registry periodically to the
+// given io.Writer.
+func Write(r Registry, d time.Duration, w io.Writer) {
+	for _ = range time.Tick(d) {
+		WriteOnce(r, w)
+	}
+}
+
+// WriteOnce sorts and writes metrics in the given registry to the given
+// io.Writer.
+func WriteOnce(r Registry, w io.Writer) {
+	var namedMetrics namedMetricSlice
+	r.Each(func(name string, i interface{}) {
+		namedMetrics = append(namedMetrics, namedMetric{name, i})
+	})
+
+	sort.Sort(namedMetrics)
+	for _, namedMetric := range namedMetrics {
+		switch metric := namedMetric.m.(type) {
+		case Counter:
+			fmt.Fprintf(w, "counter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", metric.Count())
+		case Gauge:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %9d\n", metric.Value())
+		case GaugeFloat64:
+			fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  value:       %f\n", metric.Value())
+		case Healthcheck:
+			metric.Check()
+			fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  error:       %v\n", metric.Error())
+		case Histogram:
+			h := metric.Snapshot()
+			ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", h.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", h.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", h.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", h.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", h.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+		case Meter:
+			m := metric.Snapshot()
+			fmt.Fprintf(w, "meter %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", m.Count())
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", m.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", m.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", m.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", m.RateMean())
+		case Timer:
+			t := metric.Snapshot()
+			ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
+			fmt.Fprintf(w, "timer %s\n", namedMetric.name)
+			fmt.Fprintf(w, "  count:       %9d\n", t.Count())
+			fmt.Fprintf(w, "  min:         %9d\n", t.Min())
+			fmt.Fprintf(w, "  max:         %9d\n", t.Max())
+			fmt.Fprintf(w, "  mean:        %12.2f\n", t.Mean())
+			fmt.Fprintf(w, "  stddev:      %12.2f\n", t.StdDev())
+			fmt.Fprintf(w, "  median:      %12.2f\n", ps[0])
+			fmt.Fprintf(w, "  75%%:         %12.2f\n", ps[1])
+			fmt.Fprintf(w, "  95%%:         %12.2f\n", ps[2])
+			fmt.Fprintf(w, "  99%%:         %12.2f\n", ps[3])
+			fmt.Fprintf(w, "  99.9%%:       %12.2f\n", ps[4])
+			fmt.Fprintf(w, "  1-min rate:  %12.2f\n", t.Rate1())
+			fmt.Fprintf(w, "  5-min rate:  %12.2f\n", t.Rate5())
+			fmt.Fprintf(w, "  15-min rate: %12.2f\n", t.Rate15())
+			fmt.Fprintf(w, "  mean rate:   %12.2f\n", t.RateMean())
+		}
+	}
+}
+
+type namedMetric struct {
+	name string
+	m    interface{}
+}
+
+// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
+type namedMetricSlice []namedMetric
+
+func (nms namedMetricSlice) Len() int { return len(nms) }
+
+func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
+
+func (nms namedMetricSlice) Less(i, j int) bool {
+	return nms[i].name < nms[j].name
+}